[PATCH] __block_write_full_page speedup

Remove all those get_bh()'s and put_bh()'s by extending lock_page() to cover
the troublesome regions.

(get_bh() and put_bh() happen every time whereas contention on a page's lock
in there happens basically never).

Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Andrew Morton 2005-05-05 16:15:47 -07:00 committed by Linus Torvalds
parent ad576e63e0
commit 05937baae9

View file

@ -1826,7 +1826,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
}
if (test_clear_buffer_dirty(bh)) {
mark_buffer_async_write(bh);
get_bh(bh);
last_bh = bh;
} else {
unlock_buffer(bh);
@ -1839,20 +1838,19 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(WRITE, bh);
nr_underway++;
put_bh(bh);
if (bh == last_bh)
break;
}
bh = next;
} while (bh != head);
bh = head;
unlock_page(page);
err = 0;
done:
@ -1894,7 +1892,6 @@ recover:
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
get_bh(bh);
last_bh = bh;
} else {
/*
@ -1914,7 +1911,6 @@ recover:
clear_buffer_dirty(bh);
submit_bh(WRITE, bh);
nr_underway++;
put_bh(bh);
if (bh == last_bh)
break;
}