diff options
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 0736a6a2e2f0..f3491074b035 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -53,13 +53,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) -void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) -{ - bh->b_end_io = handler; - bh->b_private = private; -} -EXPORT_SYMBOL(init_buffer); - inline void touch_buffer(struct buffer_head *bh) { trace_block_touch_buffer(bh); @@ -192,10 +185,9 @@ EXPORT_SYMBOL(end_buffer_write_sync); * we get exclusion from try_to_free_buffers with the blockdev mapping's * private_lock. * - * Hack idea: for the blockdev mapping, i_bufferlist_lock contention + * Hack idea: for the blockdev mapping, private_lock contention * may be quite high. This code could TryLock the page, and if that - * succeeds, there is no need to take private_lock. (But if - * private_lock is contended then so is mapping->tree_lock). + * succeeds, there is no need to take private_lock. */ static struct buffer_head * __find_get_block_slow(struct block_device *bdev, sector_t block) @@ -601,20 +593,21 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * * The caller must hold lock_page_memcg(). */ -static void __set_page_dirty(struct page *page, struct address_space *mapping, +void __set_page_dirty(struct page *page, struct address_space *mapping, int warn) { unsigned long flags; - spin_lock_irqsave(&mapping->tree_lock, flags); + xa_lock_irqsave(&mapping->i_pages, flags); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); account_page_dirtied(page, mapping); - radix_tree_tag_set(&mapping->page_tree, + radix_tree_tag_set(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY); } - spin_unlock_irqrestore(&mapping->tree_lock, flags); + xa_unlock_irqrestore(&mapping->i_pages, flags); } +EXPORT_SYMBOL_GPL(__set_page_dirty); /* * Add a page to the dirty page list. @@ -922,7 +915,8 @@ init_page_buffers(struct page *page, struct block_device *bdev, do { if (!buffer_mapped(bh)) { - init_buffer(bh, NULL, NULL); + bh->b_end_io = NULL; + bh->b_private = NULL; bh->b_bdev = bdev; bh->b_blocknr = block; if (uptodate) @@ -1101,7 +1095,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, * inode list. * * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, - * mapping->tree_lock and mapping->host->i_lock. + * i_pages lock and mapping->host->i_lock. */ void mark_buffer_dirty(struct buffer_head *bh) { @@ -1517,7 +1511,7 @@ void block_invalidatepage(struct page *page, unsigned int offset, * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (offset == 0) + if (length == PAGE_SIZE) try_to_release_page(page, 0); out: return; @@ -3014,7 +3008,7 @@ static void end_bio_bh_io_sync(struct bio *bio) void guard_bio_eod(int op, struct bio *bio) { sector_t maxsector; - struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; + struct bio_vec *bvec = bio_last_bvec_all(bio); unsigned truncated_bytes; struct hd_struct *part; |