diff options
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 141 | 
1 files changed, 68 insertions, 73 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 46bc589b7a03..ed26cb1d381d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);   * FIXME: may need to call ->reservepage here as well.  That's rather up to the   * address_space though.   */ -int __set_page_dirty_buffers(struct page *page) +bool block_dirty_folio(struct address_space *mapping, struct folio *folio)  { -	int newly_dirty; -	struct address_space *mapping = page_mapping(page); - -	if (unlikely(!mapping)) -		return !TestSetPageDirty(page); +	struct buffer_head *head; +	bool newly_dirty;  	spin_lock(&mapping->private_lock); -	if (page_has_buffers(page)) { -		struct buffer_head *head = page_buffers(page); +	head = folio_buffers(folio); +	if (head) {  		struct buffer_head *bh = head;  		do { @@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)  	 * Lock out page's memcg migration to keep PageDirty  	 * synchronized with per-memcg dirty page counters.  	 */ -	lock_page_memcg(page); -	newly_dirty = !TestSetPageDirty(page); +	folio_memcg_lock(folio); +	newly_dirty = !folio_test_set_dirty(folio);  	spin_unlock(&mapping->private_lock);  	if (newly_dirty) -		__set_page_dirty(page, mapping, 1); +		__folio_mark_dirty(folio, mapping, 1); -	unlock_page_memcg(page); +	folio_memcg_unlock(folio);  	if (newly_dirty)  		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);  	return newly_dirty;  } -EXPORT_SYMBOL(__set_page_dirty_buffers); +EXPORT_SYMBOL(block_dirty_folio);  /*   * Write out and wait upon a list of buffers. @@ -1235,16 +1232,18 @@ static void bh_lru_install(struct buffer_head *bh)  	int i;  	check_irqs_on(); +	bh_lru_lock(); +  	/*  	 * the refcount of buffer_head in bh_lru prevents dropping the  	 * attached page(i.e., try_to_free_buffers) so it could cause  	 * failing page migration.  	 * Skip putting upcoming bh into bh_lru until migration is done.  	 */ -	if (lru_cache_disabled()) +	if (lru_cache_disabled()) { +		bh_lru_unlock();  		return; - -	bh_lru_lock(); +	}  	b = this_cpu_ptr(&bh_lrus);  	for (i = 0; i < BH_LRU_SIZE; i++) { @@ -1482,41 +1481,40 @@ static void discard_buffer(struct buffer_head * bh)  }  /** - * block_invalidatepage - invalidate part or all of a buffer-backed page - * - * @page: the page which is affected + * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. + * @folio: The folio which is affected.   * @offset: start of the range to invalidate   * @length: length of the range to invalidate   * - * block_invalidatepage() is called when all or part of the page has become + * block_invalidate_folio() is called when all or part of the folio has been   * invalidated by a truncate operation.   * - * block_invalidatepage() does not have to release all buffers, but it must + * block_invalidate_folio() does not have to release all buffers, but it must   * ensure that no dirty buffer is left outside @offset and that no I/O   * is underway against any of the blocks which are outside the truncation   * point.  Because the caller is about to free (and possibly reuse) those   * blocks on-disk.   */ -void block_invalidatepage(struct page *page, unsigned int offset, -			  unsigned int length) +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)  {  	struct buffer_head *head, *bh, *next; -	unsigned int curr_off = 0; -	unsigned int stop = length + offset; +	size_t curr_off = 0; +	size_t stop = length + offset; -	BUG_ON(!PageLocked(page)); -	if (!page_has_buffers(page)) -		goto out; +	BUG_ON(!folio_test_locked(folio));  	/*  	 * Check for overflow  	 */ -	BUG_ON(stop > PAGE_SIZE || stop < length); +	BUG_ON(stop > folio_size(folio) || stop < length); + +	head = folio_buffers(folio); +	if (!head) +		return; -	head = page_buffers(page);  	bh = head;  	do { -		unsigned int next_off = curr_off + bh->b_size; +		size_t next_off = curr_off + bh->b_size;  		next = bh->b_this_page;  		/* @@ -1535,21 +1533,21 @@ void block_invalidatepage(struct page *page, unsigned int offset,  	} while (bh != head);  	/* -	 * We release buffers only if the entire page is being invalidated. +	 * We release buffers only if the entire folio is being invalidated.  	 * The get_block cached value has been unconditionally invalidated,  	 * so real IO is not possible anymore.  	 */ -	if (length == PAGE_SIZE) -		try_to_release_page(page, 0); +	if (length == folio_size(folio)) +		filemap_release_folio(folio, 0);  out:  	return;  } -EXPORT_SYMBOL(block_invalidatepage); +EXPORT_SYMBOL(block_invalidate_folio);  /*   * We attach and possibly dirty the buffers atomically wrt - * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers + * block_dirty_folio() via private_lock.  try_to_free_buffers   * is already excluded via the page lock.   */  void create_empty_buffers(struct page *page, @@ -1724,12 +1722,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,  					(1 << BH_Dirty)|(1 << BH_Uptodate));  	/* -	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers +	 * Be very careful.  We have no exclusion from block_dirty_folio  	 * here, and the (potentially unmapped) buffers may become dirty at  	 * any time.  If a buffer becomes dirty here after we've inspected it  	 * then we just miss that fact, and the page stays dirty.  	 * -	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; +	 * Buffers outside i_size may be dirtied by block_dirty_folio;  	 * handle that here by just cleaning them.  	 */ @@ -1969,34 +1967,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,  	}  } -int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, +int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,  		get_block_t *get_block, const struct iomap *iomap)  {  	unsigned from = pos & (PAGE_SIZE - 1);  	unsigned to = from + len; -	struct inode *inode = page->mapping->host; +	struct inode *inode = folio->mapping->host;  	unsigned block_start, block_end;  	sector_t block;  	int err = 0;  	unsigned blocksize, bbits;  	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; -	BUG_ON(!PageLocked(page)); +	BUG_ON(!folio_test_locked(folio));  	BUG_ON(from > PAGE_SIZE);  	BUG_ON(to > PAGE_SIZE);  	BUG_ON(from > to); -	head = create_page_buffers(page, inode, 0); +	head = create_page_buffers(&folio->page, inode, 0);  	blocksize = head->b_size;  	bbits = block_size_bits(blocksize); -	block = (sector_t)page->index << (PAGE_SHIFT - bbits); +	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);  	for(bh = head, block_start = 0; bh != head || !block_start;  	    block++, block_start=block_end, bh = bh->b_this_page) {  		block_end = block_start + blocksize;  		if (block_end <= from || block_start >= to) { -			if (PageUptodate(page)) { +			if (folio_test_uptodate(folio)) {  				if (!buffer_uptodate(bh))  					set_buffer_uptodate(bh);  			} @@ -2016,20 +2014,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,  			if (buffer_new(bh)) {  				clean_bdev_bh_alias(bh); -				if (PageUptodate(page)) { +				if (folio_test_uptodate(folio)) {  					clear_buffer_new(bh);  					set_buffer_uptodate(bh);  					mark_buffer_dirty(bh);  					continue;  				}  				if (block_end > to || block_start < from) -					zero_user_segments(page, +					folio_zero_segments(folio,  						to, block_end,  						block_start, from);  				continue;  			}  		} -		if (PageUptodate(page)) { +		if (folio_test_uptodate(folio)) {  			if (!buffer_uptodate(bh))  				set_buffer_uptodate(bh);  			continue;  @@ -2050,14 +2048,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,  			err = -EIO;  	}  	if (unlikely(err)) -		page_zero_new_buffers(page, from, to); +		page_zero_new_buffers(&folio->page, from, to);  	return err;  }  int __block_write_begin(struct page *page, loff_t pos, unsigned len,  		get_block_t *get_block)  { -	return __block_write_begin_int(page, pos, len, get_block, NULL); +	return __block_write_begin_int(page_folio(page), pos, len, get_block, +				       NULL);  }  EXPORT_SYMBOL(__block_write_begin); @@ -2205,29 +2204,27 @@ int generic_write_end(struct file *file, struct address_space *mapping,  EXPORT_SYMBOL(generic_write_end);  /* - * block_is_partially_uptodate checks whether buffers within a page are + * block_is_partially_uptodate checks whether buffers within a folio are   * uptodate or not.   * - * Returns true if all buffers which correspond to a file portion - * we want to read are uptodate. + * Returns true if all buffers which correspond to the specified part + * of the folio are uptodate.   */ -int block_is_partially_uptodate(struct page *page, unsigned long from, -					unsigned long count) +bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)  {  	unsigned block_start, block_end, blocksize;  	unsigned to;  	struct buffer_head *bh, *head; -	int ret = 1; - -	if (!page_has_buffers(page)) -		return 0; +	bool ret = true; -	head = page_buffers(page); +	head = folio_buffers(folio); +	if (!head) +		return false;  	blocksize = head->b_size; -	to = min_t(unsigned, PAGE_SIZE - from, count); +	to = min_t(unsigned, folio_size(folio) - from, count);  	to = from + to; -	if (from < blocksize && to > PAGE_SIZE - blocksize) -		return 0; +	if (from < blocksize && to > folio_size(folio) - blocksize) +		return false;  	bh = head;  	block_start = 0; @@ -2235,7 +2232,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,  		block_end = block_start + blocksize;  		if (block_end > from && block_start < to) {  			if (!buffer_uptodate(bh)) { -				ret = 0; +				ret = false;  				break;  			}  			if (block_end >= to) @@ -3023,12 +3020,16 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,  	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))  		clear_buffer_write_io_error(bh); -	bio = bio_alloc(GFP_NOIO, 1); +	if (buffer_meta(bh)) +		op_flags |= REQ_META; +	if (buffer_prio(bh)) +		op_flags |= REQ_PRIO; + +	bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO);  	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);  	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); -	bio_set_dev(bio, bh->b_bdev);  	bio->bi_write_hint = write_hint;  	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); @@ -3037,12 +3038,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,  	bio->bi_end_io = end_bio_bh_io_sync;  	bio->bi_private = bh; -	if (buffer_meta(bh)) -		op_flags |= REQ_META; -	if (buffer_prio(bh)) -		op_flags |= REQ_PRIO; -	bio_set_op_attrs(bio, op, op_flags); -  	/* Take care of bh's that straddle the end of the device */  	guard_bio_eod(bio); @@ -3184,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);   *   * The same applies to regular filesystem pages: if all the buffers are   * clean then we set the page clean and proceed.  To do that, we require - * total exclusion from __set_page_dirty_buffers().  That is obtained with + * total exclusion from block_dirty_folio().  That is obtained with   * private_lock.   *   * try_to_free_buffers() is non-blocking. @@ -3251,7 +3246,7 @@ int try_to_free_buffers(struct page *page)  	 * the page also.  	 *  	 * private_lock must be held over this entire operation in order -	 * to synchronise against __set_page_dirty_buffers and prevent the +	 * to synchronise against block_dirty_folio and prevent the  	 * dirty bit from being lost.  	 */  	if (ret)  |