From 6fb7a61e98ac311a65bc652a12611d9899994f49 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 16 Apr 2015 12:46:25 -0700 Subject: nilfs2: do not use async write flag for segment summary buffers The async write flag is introduced to nilfs2 in the commit 7f42ec394156 ("nilfs2: fix issue with race condition of competition between segments for dirty blocks"), but the flag only makes sense for data buffers and btree node buffers. It is not needed for segment summary buffers. This gets rid of the latter uses as part of refactoring of atomic bit operations on buffer state bitmap. Signed-off-by: Ryusuke Konishi Cc: Vyacheslav Dubeyko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/segment.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs/nilfs2/segment.c') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0c3f303baf32..c9a4e6013445 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1588,7 +1588,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { - set_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) { lock_page(bd_page); @@ -1688,7 +1687,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err) list_for_each_entry(segbuf, logs, sb_list) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { - clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); @@ -1768,7 +1766,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); - clear_buffer_async_write(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); -- cgit From ead8ecffa3e180202c1096a39f14bbecffb139a1 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 16 Apr 2015 12:46:28 -0700 Subject: nilfs2: use set_mask_bits() for operations on buffer state bitmap nilfs_forget_buffer(), nilfs_clear_dirty_page(), and nilfs_segctor_complete_write() are using a bunch of atomic bit operations against buffer state bitmap. This reduces the number of them by utilizing set_mask_bits() macro. Signed-off-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/page.c | 24 ++++++++++-------------- fs/nilfs2/segment.c | 14 ++++++++------ 2 files changed, 18 insertions(+), 20 deletions(-) (limited to 'fs/nilfs2/segment.c') diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 700ecbcca55d..45d650addd56 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -89,18 +89,16 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; + const unsigned long clear_bits = + (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | + 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); lock_buffer(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_checked(bh); - clear_buffer_nilfs_redirected(bh); - clear_buffer_async_write(bh); - clear_buffer_dirty(bh); + set_mask_bits(&bh->b_state, clear_bits, 0); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); - clear_buffer_uptodate(bh); - clear_buffer_mapped(bh); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); @@ -421,6 +419,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) if (page_has_buffers(page)) { struct buffer_head *bh, *head; + const unsigned long clear_bits = + (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | + 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); bh = head = page_buffers(page); do { @@ -430,13 +432,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } - clear_buffer_async_write(bh); - clear_buffer_dirty(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_checked(bh); - clear_buffer_nilfs_redirected(bh); - clear_buffer_uptodate(bh); - clear_buffer_mapped(bh); + set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index c9a4e6013445..c6abbad9b8e3 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1785,12 +1786,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) */ list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); - clear_buffer_async_write(bh); - clear_buffer_delay(bh); - clear_buffer_nilfs_volatile(bh); - clear_buffer_nilfs_redirected(bh); + const unsigned long set_bits = (1 << BH_Uptodate); + const unsigned long clear_bits = + (1 << BH_Dirty | 1 << BH_Async_Write | + 1 << BH_Delay | 1 << BH_NILFS_Volatile | + 1 << BH_NILFS_Redirected); + + set_mask_bits(&bh->b_state, clear_bits, set_bits); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); -- cgit