From cd1067beeebfe23fc8cab071790fefb273962d51 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:26 +0000 Subject: buffer: Add folio_buffers() While there is no intent to use large folios in filesystems using buffer heads, converting the filesystems to use single-page folios is still worth doing to remove legacy infrastructure and hidden calls to compound_head(). These helper functions are needed for that conversion to take place. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/buffer_head.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/buffer_head.h') diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 36f33685c8c0..3451f1fcda12 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -144,6 +144,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) ((struct buffer_head *)page_private(page)); \ }) #define page_has_buffers(page) PagePrivate(page) +#define folio_buffers(folio) folio_get_private(folio) void buffer_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback); -- cgit From 2e7e80f7e7e9dbbb3c2a85ee923ca32826052816 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:27 +0000 Subject: fs: Convert is_partially_uptodate to folios Since the uptodate property is maintained on a per-folio basis, the is_partially_uptodate method should also take a folio. Fix the types at the same time so it's clear that it returns true/false and takes the count in bytes, not blocks. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 2 +- Documentation/filesystems/vfs.rst | 10 +++++----- fs/buffer.c | 26 ++++++++++++------------- fs/iomap/buffered-io.c | 36 ++++++++++++++++------------------- include/linux/buffer_head.h | 3 +-- include/linux/fs.h | 4 ++-- include/linux/iomap.h | 3 +-- mm/filemap.c | 4 ++-- 8 files changed, 40 insertions(+), 48 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 3f9b1497ebb8..88b33524687f 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -258,7 +258,7 @@ prototypes:: int (*migratepage)(struct address_space *, struct page *, struct page *); void (*putback_page) (struct page *); int (*launder_page)(struct page *); - int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); + bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct file *); int (*swap_deactivate)(struct file *); diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index bf5c48066fac..da3e7b470f0a 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -747,8 +747,8 @@ cache in your filesystem. The following members are defined: void (*putback_page) (struct page *); int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); + bool (*is_partially_uptodate) (struct folio *, size_t from, + size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page) (struct mapping *mapping, struct page *page); int (*swap_activate)(struct file *); @@ -937,9 +937,9 @@ cache in your filesystem. The following members are defined: ``is_partially_uptodate`` Called by the VM when reading a file through the pagecache when - the underlying blocksize != pagesize. If the required block is - up to date then the read can complete without needing the IO to - bring the whole page up to date. + the underlying blocksize is smaller than the size of the folio. + If the required block is up to date then the read can complete + without needing I/O to bring the whole page up to date. ``is_dirty_writeback`` Called by the VM when attempting to reclaim a page. The VM uses diff --git a/fs/buffer.c b/fs/buffer.c index 8e112b6bd371..929061995cf8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2206,29 +2206,27 @@ int generic_write_end(struct file *file, struct address_space *mapping, EXPORT_SYMBOL(generic_write_end); /* - * block_is_partially_uptodate checks whether buffers within a page are + * block_is_partially_uptodate checks whether buffers within a folio are * uptodate or not. * - * Returns true if all buffers which correspond to a file portion - * we want to read are uptodate. + * Returns true if all buffers which correspond to the specified part + * of the folio are uptodate. */ -int block_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count) +bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; - int ret = 1; - - if (!page_has_buffers(page)) - return 0; + bool ret = true; - head = page_buffers(page); + head = folio_buffers(folio); + if (!head) + return false; blocksize = head->b_size; - to = min_t(unsigned, PAGE_SIZE - from, count); + to = min_t(unsigned, folio_size(folio) - from, count); to = from + to; - if (from < blocksize && to > PAGE_SIZE - blocksize) - return 0; + if (from < blocksize && to > folio_size(folio) - blocksize) + return false; bh = head; block_start = 0; @@ -2236,7 +2234,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from, block_end = block_start + blocksize; if (block_end > from && block_start < to) { if (!buffer_uptodate(bh)) { - ret = 0; + ret = false; break; } if (block_end >= to) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d020a2e81a24..da0a7b15a64e 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -424,37 +424,33 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) EXPORT_SYMBOL_GPL(iomap_readahead); /* - * iomap_is_partially_uptodate checks whether blocks within a page are + * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * - * Returns true if all blocks which correspond to a file portion - * we want to read within the page are uptodate. + * Returns true if all blocks which correspond to the specified part + * of the folio are uptodate. */ -int -iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count) +bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { - struct folio *folio = page_folio(page); struct iomap_page *iop = to_iomap_page(folio); - struct inode *inode = page->mapping->host; - unsigned len, first, last; - unsigned i; + struct inode *inode = folio->mapping->host; + size_t len; + unsigned first, last, i; - /* Limit range to one page */ - len = min_t(unsigned, PAGE_SIZE - from, count); + if (!iop) + return false; + + /* Limit range to this folio */ + len = min(folio_size(folio) - from, count); /* First and last blocks in range within page */ first = from >> inode->i_blkbits; last = (from + len - 1) >> inode->i_blkbits; - if (iop) { - for (i = first; i <= last; i++) - if (!test_bit(i, iop->uptodate)) - return 0; - return 1; - } - - return 0; + for (i = first; i <= last; i++) + if (!test_bit(i, iop->uptodate)) + return false; + return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3451f1fcda12..79d465057889 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -225,8 +225,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); -int block_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block); int __block_write_begin(struct page *page, loff_t pos, unsigned len, diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..5939e6694ada 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -400,8 +400,8 @@ struct address_space_operations { bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); + bool (*is_partially_uptodate) (struct folio *, size_t from, + size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 97a3a2edb585..3bcbb264f83f 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -227,8 +227,7 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); void iomap_invalidatepage(struct page *page, unsigned int offset, diff --git a/mm/filemap.c b/mm/filemap.c index ad8c39d90bf9..9639b844dd31 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2452,7 +2452,7 @@ static bool filemap_range_uptodate(struct address_space *mapping, pos -= folio_pos(folio); } - return mapping->a_ops->is_partially_uptodate(&folio->page, pos, count); + return mapping->a_ops->is_partially_uptodate(folio, pos, count); } static int filemap_update_page(struct kiocb *iocb, @@ -2844,7 +2844,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas, offset = offset_in_folio(folio, start) & ~(bsz - 1); do { - if (ops->is_partially_uptodate(&folio->page, offset, bsz) == + if (ops->is_partially_uptodate(folio, offset, bsz) == seek_data) break; start = (start + bsz) & ~(bsz - 1); -- cgit From 7ba13abbd31ee9265e88d7dc029c0f786e665192 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:34 +0000 Subject: fs: Turn block_invalidatepage into block_invalidate_folio Remove special-casing of a NULL invalidatepage, since there is no more block_invalidatepage. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- block/fops.c | 1 + fs/adfs/inode.c | 1 + fs/affs/file.c | 2 ++ fs/bfs/file.c | 1 + fs/buffer.c | 37 ++++++++++++++++++------------------- fs/ecryptfs/mmap.c | 1 + fs/exfat/inode.c | 1 + fs/ext2/inode.c | 2 ++ fs/ext4/inode.c | 32 ++++++++++++++++---------------- fs/fat/inode.c | 1 + fs/gfs2/meta_io.c | 2 ++ fs/hfs/inode.c | 2 ++ fs/hfsplus/inode.c | 2 ++ fs/hpfs/file.c | 1 + fs/jfs/inode.c | 1 + fs/minix/inode.c | 1 + fs/nilfs2/inode.c | 2 +- fs/nilfs2/mdt.c | 1 + fs/ntfs/aops.c | 5 +++-- fs/ocfs2/aops.c | 2 +- fs/omfs/file.c | 1 + fs/sysv/itree.c | 1 + fs/udf/file.c | 1 + fs/udf/inode.c | 1 + fs/ufs/inode.c | 1 + include/linux/buffer_head.h | 3 +-- mm/truncate.c | 4 ---- 27 files changed, 65 insertions(+), 45 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/block/fops.c b/block/fops.c index 4f59e0f5bf30..8ce1dccd15b9 100644 --- a/block/fops.c +++ b/block/fops.c @@ -430,6 +430,7 @@ static int blkdev_writepages(struct address_space *mapping, const struct address_space_operations def_blk_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = blkdev_readpage, .readahead = blkdev_readahead, .writepage = blkdev_writepage, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 5156821bfe6a..5c423254895a 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -74,6 +74,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations adfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = adfs_readpage, .writepage = adfs_writepage, .write_begin = adfs_write_begin, diff --git a/fs/affs/file.c b/fs/affs/file.c index 75ebd2b576ca..6d4921f97162 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -454,6 +454,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations affs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, @@ -835,6 +836,7 @@ err_bh: const struct address_space_operations affs_aops_ofs = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage_ofs, //.writepage = affs_writepage_ofs, .write_begin = affs_write_begin_ofs, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 7f8544abf636..2e42b82edb58 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -189,6 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations bfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = bfs_readpage, .writepage = bfs_writepage, .write_begin = bfs_write_begin, diff --git a/fs/buffer.c b/fs/buffer.c index 929061995cf8..5fe02e5a9807 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1482,41 +1482,40 @@ static void discard_buffer(struct buffer_head * bh) } /** - * block_invalidatepage - invalidate part or all of a buffer-backed page - * - * @page: the page which is affected + * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. + * @folio: The folio which is affected. * @offset: start of the range to invalidate * @length: length of the range to invalidate * - * block_invalidatepage() is called when all or part of the page has become + * block_invalidate_folio() is called when all or part of the folio has been * invalidated by a truncate operation. * - * block_invalidatepage() does not have to release all buffers, but it must + * block_invalidate_folio() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct buffer_head *head, *bh, *next; - unsigned int curr_off = 0; - unsigned int stop = length + offset; + size_t curr_off = 0; + size_t stop = length + offset; - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) - goto out; + BUG_ON(!folio_test_locked(folio)); /* * Check for overflow */ - BUG_ON(stop > PAGE_SIZE || stop < length); + BUG_ON(stop > folio_size(folio) || stop < length); + + head = folio_buffers(folio); + if (!head) + return; - head = page_buffers(page); bh = head; do { - unsigned int next_off = curr_off + bh->b_size; + size_t next_off = curr_off + bh->b_size; next = bh->b_this_page; /* @@ -1535,16 +1534,16 @@ void block_invalidatepage(struct page *page, unsigned int offset, } while (bh != head); /* - * We release buffers only if the entire page is being invalidated. + * We release buffers only if the entire folio is being invalidated. * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (length == PAGE_SIZE) - try_to_release_page(page, 0); + if (length == folio_size(folio)) + filemap_release_folio(folio, 0); out: return; } -EXPORT_SYMBOL(block_invalidatepage); +EXPORT_SYMBOL(block_invalidate_folio); /* diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 7d85e64ea62f..bf7f35b375b7 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -546,6 +546,7 @@ const struct address_space_operations ecryptfs_aops = { */ #ifdef CONFIG_BLOCK .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, #endif .writepage = ecryptfs_writepage, .readpage = ecryptfs_readpage, diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index df805bd05508..5ed471eb973b 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -491,6 +491,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations exfat_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = exfat_readpage, .readahead = exfat_readahead, .writepage = exfat_writepage, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 602578b72d8c..1e14777c3ca6 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -968,6 +968,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc const struct address_space_operations ext2_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_writepage, @@ -983,6 +984,7 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_nobh_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 57800ecbe466..07ef3f84db9e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -137,8 +137,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode, new_size); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); @@ -1571,16 +1569,18 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + struct folio *folio = page_folio(page); - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); + BUG_ON(!folio_test_locked(folio)); + BUG_ON(folio_test_writeback(folio)); if (invalidate) { - if (page_mapped(page)) - clear_page_dirty_for_io(page); - block_invalidatepage(page, 0, PAGE_SIZE); - ClearPageUptodate(page); + if (folio_mapped(folio)) + folio_clear_dirty_for_io(folio); + block_invalidate_folio(folio, 0, + folio_size(folio)); + folio_clear_uptodate(folio); } - unlock_page(page); + folio_unlock(folio); } pagevec_release(&pvec); } @@ -3183,15 +3183,15 @@ static void ext4_readahead(struct readahead_control *rac) ext4_mpage_readpages(inode, rac, NULL); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ext4_invalidate_folio(struct folio *folio, size_t offset, + size_t length) { - trace_ext4_invalidatepage(page, offset, length); + trace_ext4_invalidatepage(&folio->page, offset, length); /* No journalling happens on data buffers when this function is used */ - WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); + WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); - block_invalidatepage(page, offset, length); + block_invalidate_folio(folio, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, @@ -3583,7 +3583,7 @@ static const struct address_space_operations ext4_aops = { .write_end = ext4_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, - .invalidatepage = ext4_invalidatepage, + .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, @@ -3618,7 +3618,7 @@ static const struct address_space_operations ext4_da_aops = { .write_end = ext4_da_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, - .invalidatepage = ext4_invalidatepage, + .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a6f1c6d426d1..1e2f1e24a073 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -343,6 +343,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations fat_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = fat_readpage, .readahead = fat_readahead, .writepage = fat_writepage, diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 72d30a682ece..d23c8b035447 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -90,12 +90,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb const struct address_space_operations gfs2_meta_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; const struct address_space_operations gfs2_rgrp_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 2a5143246282..029d1869a224 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -160,6 +160,7 @@ static int hfs_writepages(struct address_space *mapping, const struct address_space_operations hfs_btree_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, .write_begin = hfs_write_begin, @@ -170,6 +171,7 @@ const struct address_space_operations hfs_btree_aops = { const struct address_space_operations hfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, .write_begin = hfs_write_begin, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index d08a8d1d40a4..a91b9b5e92a8 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -157,6 +157,7 @@ static int hfsplus_writepages(struct address_space *mapping, const struct address_space_operations hfsplus_btree_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, @@ -167,6 +168,7 @@ const struct address_space_operations hfsplus_btree_aops = { const struct address_space_operations hfsplus_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index fb37f57130aa..cf68f5e76ddd 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -246,6 +246,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, const struct address_space_operations hpfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hpfs_readpage, .writepage = hpfs_writepage, .readahead = hpfs_readahead, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 57ab424c05ff..3950b3d610a0 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -358,6 +358,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations jfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = jfs_readpage, .readahead = jfs_readahead, .writepage = jfs_writepage, diff --git a/fs/minix/inode.c b/fs/minix/inode.c index a71f1cf894b9..2295804d1893 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -443,6 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations minix_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = minix_readpage, .writepage = minix_writepage, .write_begin = minix_write_begin, diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index e3d807d5b83a..153f0569dcf2 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -304,7 +304,7 @@ const struct address_space_operations nilfs_aops = { .write_begin = nilfs_write_begin, .write_end = nilfs_write_end, /* .releasepage = nilfs_releasepage, */ - .invalidatepage = block_invalidatepage, + .invalidate_folio = block_invalidate_folio, .direct_IO = nilfs_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, }; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 4b3d33cf0041..72adca629bc9 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -435,6 +435,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) static const struct address_space_operations def_mdt_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = nilfs_mdt_write_page, }; diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index bb0a43860ad2..6858bf6df49a 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -1350,12 +1350,13 @@ retry_writepage: /* Is the page fully outside i_size? (truncate in progress) */ if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> PAGE_SHIFT)) { + struct folio *folio = page_folio(page); /* * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0, PAGE_SIZE); - unlock_page(page); + block_invalidate_folio(folio, 0, folio_size(folio)); + folio_unlock(folio); ntfs_debug("Write outside i_size - truncated?"); return 0; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 498da317580a..b274061e22a7 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2461,7 +2461,7 @@ const struct address_space_operations ocfs2_aops = { .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, - .invalidatepage = block_invalidatepage, + .invalidate_folio = block_invalidate_folio, .releasepage = ocfs2_releasepage, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 89725b15a64b..139d6a21dca1 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -373,6 +373,7 @@ const struct inode_operations omfs_file_inops = { const struct address_space_operations omfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = omfs_readpage, .readahead = omfs_readahead, .writepage = omfs_writepage, diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 749385015a8d..d39984a1d4d3 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -496,6 +496,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations sysv_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = sysv_readpage, .writepage = sysv_writepage, .write_begin = sysv_write_begin, diff --git a/fs/udf/file.c b/fs/udf/file.c index 1baff8ddb754..a91011a7bb88 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -126,6 +126,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin const struct address_space_operations udf_adinicb_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, .write_begin = udf_adinicb_write_begin, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ea8f6cd01f50..ab98c7aaf9f9 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -236,6 +236,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations udf_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = udf_readpage, .readahead = udf_readahead, .writepage = udf_writepage, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index ac628de69601..2d005788c24d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -527,6 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations ufs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ufs_readpage, .writepage = ufs_writepage, .write_begin = ufs_write_begin, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 79d465057889..9ee9d003d736 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -217,8 +217,7 @@ extern int buffer_heads_over_limit; * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int __block_write_full_page(struct inode *inode, struct page *page, diff --git a/mm/truncate.c b/mm/truncate.c index b9ad298e6ce7..28650151091a 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -163,10 +163,6 @@ void folio_invalidate(struct folio *folio, size_t offset, size_t length) } invalidatepage = aops->invalidatepage; -#ifdef CONFIG_BLOCK - if (!invalidatepage) - invalidatepage = block_invalidatepage; -#endif if (invalidatepage) (*invalidatepage)(&folio->page, offset, length); } -- cgit From e621900ad28b748e058b81d6078a5d5eb37b3973 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:12 +0000 Subject: fs: Convert __set_page_dirty_buffers to block_dirty_folio Convert all callers; mostly this is just changing the aops to point at it, but a few implementations need a little more work. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- block/fops.c | 2 +- fs/adfs/inode.c | 2 +- fs/affs/file.c | 4 ++-- fs/bfs/file.c | 2 +- fs/buffer.c | 33 +++++++++++++++------------------ fs/ecryptfs/mmap.c | 2 +- fs/exfat/inode.c | 2 +- fs/ext2/inode.c | 8 ++++---- fs/ext4/inode.c | 12 ++++++------ fs/fat/inode.c | 2 +- fs/gfs2/aops.c | 16 +++++----------- fs/gfs2/meta_io.c | 4 ++-- fs/hfs/inode.c | 4 ++-- fs/hfsplus/inode.c | 4 ++-- fs/hpfs/file.c | 2 +- fs/jfs/inode.c | 2 +- fs/minix/inode.c | 2 +- fs/mpage.c | 2 +- fs/nilfs2/mdt.c | 4 ++-- fs/ntfs/aops.c | 12 ++++++------ fs/ntfs3/inode.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/omfs/file.c | 2 +- fs/reiserfs/inode.c | 14 +++++++------- fs/sysv/itree.c | 2 +- fs/udf/file.c | 2 +- fs/udf/inode.c | 2 +- fs/ufs/inode.c | 2 +- include/linux/buffer_head.h | 2 +- mm/filemap.c | 4 ++-- mm/page-writeback.c | 2 +- mm/rmap.c | 4 ++-- 32 files changed, 76 insertions(+), 85 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/block/fops.c b/block/fops.c index 8ce1dccd15b9..796a78fd1583 100644 --- a/block/fops.c +++ b/block/fops.c @@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping, } const struct address_space_operations def_blk_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = blkdev_readpage, .readahead = blkdev_readahead, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 5c423254895a..561bc748c04a 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations adfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = adfs_readpage, .writepage = adfs_writepage, diff --git a/fs/affs/file.c b/fs/affs/file.c index 6d4921f97162..b3f81d84ff4c 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations affs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage, .writepage = affs_writepage, @@ -835,7 +835,7 @@ err_bh: } const struct address_space_operations affs_aops_ofs = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage_ofs, //.writepage = affs_writepage_ofs, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 2e42b82edb58..03139344568f 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations bfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = bfs_readpage, .writepage = bfs_writepage, diff --git a/fs/buffer.c b/fs/buffer.c index 5fe02e5a9807..28b9739b719b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. */ -int __set_page_dirty_buffers(struct page *page) +bool block_dirty_folio(struct address_space *mapping, struct folio *folio) { - int newly_dirty; - struct address_space *mapping = page_mapping(page); - - if (unlikely(!mapping)) - return !TestSetPageDirty(page); + struct buffer_head *head; + bool newly_dirty; spin_lock(&mapping->private_lock); - if (page_has_buffers(page)) { - struct buffer_head *head = page_buffers(page); + head = folio_buffers(folio); + if (head) { struct buffer_head *bh = head; do { @@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page) * Lock out page's memcg migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - lock_page_memcg(page); - newly_dirty = !TestSetPageDirty(page); + folio_memcg_lock(folio); + newly_dirty = !folio_test_set_dirty(folio); spin_unlock(&mapping->private_lock); if (newly_dirty) - __set_page_dirty(page, mapping, 1); + __folio_mark_dirty(folio, mapping, 1); - unlock_page_memcg(page); + folio_memcg_unlock(folio); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return newly_dirty; } -EXPORT_SYMBOL(__set_page_dirty_buffers); +EXPORT_SYMBOL(block_dirty_folio); /* * Write out and wait upon a list of buffers. @@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio); /* * We attach and possibly dirty the buffers atomically wrt - * __set_page_dirty_buffers() via private_lock. try_to_free_buffers + * block_dirty_folio() via private_lock. try_to_free_buffers * is already excluded via the page lock. */ void create_empty_buffers(struct page *page, @@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page, (1 << BH_Dirty)|(1 << BH_Uptodate)); /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers + * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; + * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ @@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer); * * The same applies to regular filesystem pages: if all the buffers are * clean then we set the page clean and proceed. To do that, we require - * total exclusion from __set_page_dirty_buffers(). That is obtained with + * total exclusion from block_dirty_folio(). That is obtained with * private_lock. * * try_to_free_buffers() is non-blocking. @@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page) * the page also. * * private_lock must be held over this entire operation in order - * to synchronise against __set_page_dirty_buffers and prevent the + * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index bf7f35b375b7..9aabcb2f52e9 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -545,7 +545,7 @@ const struct address_space_operations ecryptfs_aops = { * feedback. */ #ifdef CONFIG_BLOCK - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, #endif .writepage = ecryptfs_writepage, diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 5ed471eb973b..fc0ea1684880 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -490,7 +490,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) } static const struct address_space_operations exfat_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = exfat_readpage, .readahead = exfat_readahead, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 9b579ee56eaf..d9452a051198 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -967,8 +967,8 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc } const struct address_space_operations ext2_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_writepage, @@ -983,8 +983,8 @@ const struct address_space_operations ext2_aops = { }; const struct address_space_operations ext2_nobh_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c48dbbf0e9b2..4c34104a94f0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3560,11 +3560,11 @@ static bool ext4_journalled_dirty_folio(struct address_space *mapping, return filemap_dirty_folio(mapping, folio); } -static int ext4_set_page_dirty(struct page *page) +static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio) { - WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); - WARN_ON_ONCE(!page_has_buffers(page)); - return __set_page_dirty_buffers(page); + WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio)); + WARN_ON_ONCE(!folio_buffers(folio)); + return block_dirty_folio(mapping, folio); } static int ext4_iomap_swap_activate(struct swap_info_struct *sis, @@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_aops = { .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, - .set_page_dirty = ext4_set_page_dirty, + .dirty_folio = ext4_dirty_folio, .bmap = ext4_bmap, .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, @@ -3616,7 +3616,7 @@ static const struct address_space_operations ext4_da_aops = { .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, - .set_page_dirty = ext4_set_page_dirty, + .dirty_folio = ext4_dirty_folio, .bmap = ext4_bmap, .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 1e2f1e24a073..86957dd07bda 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -342,7 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) } static const struct address_space_operations fat_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = fat_readpage, .readahead = fat_readahead, diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 7c096a75d703..72c9f31ce724 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -606,18 +606,12 @@ out: gfs2_trans_end(sdp); } -/** - * jdata_set_page_dirty - Page dirtying function - * @page: The page to dirty - * - * Returns: 1 if it dirtyed the page, or 0 otherwise - */ - -static int jdata_set_page_dirty(struct page *page) +static bool jdata_dirty_folio(struct address_space *mapping, + struct folio *folio) { if (current->journal_info) - SetPageChecked(page); - return __set_page_dirty_buffers(page); + folio_set_checked(folio); + return block_dirty_folio(mapping, folio); } /** @@ -795,7 +789,7 @@ static const struct address_space_operations gfs2_jdata_aops = { .writepages = gfs2_jdata_writepages, .readpage = gfs2_readpage, .readahead = gfs2_readahead, - .set_page_dirty = jdata_set_page_dirty, + .dirty_folio = jdata_dirty_folio, .bmap = gfs2_bmap, .invalidate_folio = gfs2_invalidate_folio, .releasepage = gfs2_releasepage, diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index d23c8b035447..ac4d27ccd87d 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -89,14 +89,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb } const struct address_space_operations gfs2_meta_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; const struct address_space_operations gfs2_rgrp_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 029d1869a224..55f45e9b4930 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -159,7 +159,7 @@ static int hfs_writepages(struct address_space *mapping, } const struct address_space_operations hfs_btree_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, @@ -170,7 +170,7 @@ const struct address_space_operations hfs_btree_aops = { }; const struct address_space_operations hfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index a91b9b5e92a8..446a816aa8e1 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -156,7 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping, } const struct address_space_operations hfsplus_btree_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, @@ -167,7 +167,7 @@ const struct address_space_operations hfsplus_btree_aops = { }; const struct address_space_operations hfsplus_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index cf68f5e76ddd..99493a23c5d0 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -245,7 +245,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } const struct address_space_operations hpfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hpfs_readpage, .writepage = hpfs_writepage, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 3950b3d610a0..27be2e8ba237 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -357,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } const struct address_space_operations jfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = jfs_readpage, .readahead = jfs_readahead, diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 2295804d1893..1e41fba68dcf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -442,7 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations minix_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = minix_readpage, .writepage = minix_writepage, diff --git a/fs/mpage.c b/fs/mpage.c index 87f5cfef6caa..571862da9f56 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -504,7 +504,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, if (!buffer_mapped(bh)) { /* * unmapped dirty buffers are created by - * __set_page_dirty_buffers -> mmapped data + * block_dirty_folio -> mmapped data */ if (buffer_dirty(bh)) goto confused; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 72adca629bc9..78db33decd72 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -434,8 +434,8 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) static const struct address_space_operations def_mdt_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .writepage = nilfs_mdt_write_page, }; diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index dd71f6ac0272..d154dcfe06af 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) iblock = initialized_size >> blocksize_bits; /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers + * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; + * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ @@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) // Update initialized size in the attribute and // in the inode. // Again, for each page do: - // __set_page_dirty_buffers(); + // block_dirty_folio(); // put_page() // We don't need to wait on the writes. // Update iblock. @@ -1654,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = { .readpage = ntfs_readpage, #ifdef NTFS_RW .writepage = ntfs_writepage, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, #endif /* NTFS_RW */ .bmap = ntfs_bmap, .migratepage = buffer_migrate_page, @@ -1669,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = { .readpage = ntfs_readpage, #ifdef NTFS_RW .writepage = ntfs_writepage, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, #endif /* NTFS_RW */ .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, @@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { set_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); spin_unlock(&mapping->private_lock); - __set_page_dirty_nobuffers(page); + block_dirty_folio(mapping, page_folio(page)); if (unlikely(buffers_to_free)) { do { bh = buffers_to_free->b_this_page; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index a87ab3ad3cd3..9eab11e3b034 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -1950,7 +1950,7 @@ const struct address_space_operations ntfs_aops = { .write_end = ntfs_write_end, .direct_IO = ntfs_direct_IO, .bmap = ntfs_bmap, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, }; const struct address_space_operations ntfs_aops_cmpr = { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b274061e22a7..fc890ca2e17e 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2453,7 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } const struct address_space_operations ocfs2_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .readpage = ocfs2_readpage, .readahead = ocfs2_readahead, .writepage = ocfs2_writepage, diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 139d6a21dca1..3f297b541713 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -372,7 +372,7 @@ const struct inode_operations omfs_file_inops = { }; const struct address_space_operations omfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = omfs_readpage, .readahead = omfs_readahead, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index f7fa70b419d2..e4221fa85ea2 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3201,14 +3201,14 @@ out: return; } -static int reiserfs_set_page_dirty(struct page *page) +static bool reiserfs_dirty_folio(struct address_space *mapping, + struct folio *folio) { - struct inode *inode = page->mapping->host; - if (reiserfs_file_data_log(inode)) { - SetPageChecked(page); - return __set_page_dirty_nobuffers(page); + if (reiserfs_file_data_log(mapping->host)) { + folio_set_checked(folio); + return filemap_dirty_folio(mapping, folio); } - return __set_page_dirty_buffers(page); + return block_dirty_folio(mapping, folio); } /* @@ -3435,5 +3435,5 @@ const struct address_space_operations reiserfs_address_space_operations = { .write_end = reiserfs_write_end, .bmap = reiserfs_aop_bmap, .direct_IO = reiserfs_direct_IO, - .set_page_dirty = reiserfs_set_page_dirty, + .dirty_folio = reiserfs_dirty_folio, }; diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index d39984a1d4d3..409ab5e17803 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -495,7 +495,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations sysv_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = sysv_readpage, .writepage = sysv_writepage, diff --git a/fs/udf/file.c b/fs/udf/file.c index a91011a7bb88..0f6bf2504437 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -125,7 +125,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin } const struct address_space_operations udf_adinicb_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ab98c7aaf9f9..ca4fa710e562 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -235,7 +235,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations udf_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = udf_readpage, .readahead = udf_readahead, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 2d005788c24d..d0dda01620f0 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -526,7 +526,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations ufs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = ufs_readpage, .writepage = ufs_writepage, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9ee9d003d736..bcb4fe9b8575 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -397,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size) return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } -extern int __set_page_dirty_buffers(struct page *page); +bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #else /* CONFIG_BLOCK */ diff --git a/mm/filemap.c b/mm/filemap.c index 9639b844dd31..bb4e91bf5492 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -72,7 +72,7 @@ * Lock ordering: * * ->i_mmap_rwsem (truncate_pagecache) - * ->private_lock (__free_pte->__set_page_dirty_buffers) + * ->private_lock (__free_pte->block_dirty_folio) * ->swap_lock (exclusive_swap_page, others) * ->i_pages lock * @@ -115,7 +115,7 @@ * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) - * ->private_lock (zap_pte_range->__set_page_dirty_buffers) + * ->private_lock (zap_pte_range->block_dirty_folio) * * ->i_mmap_rwsem * ->tasklist_lock (memory_failure, collect_procs_ao) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 27a87ae4502c..e890db239fae 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2530,7 +2530,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, * This is also sometimes used by filesystems which use buffer_heads when * a single buffer is being dirtied: we want to set the folio dirty in * that case, but not all the buffers. This is a "bottom-up" dirtying, - * whereas __set_page_dirty_buffers() is a "top-down" dirtying. + * whereas block_dirty_folio() is a "top-down" dirtying. * * The caller must ensure this doesn't race with truncation. Most will * simply hold the folio lock, but e.g. zap_pte_range() calls with the diff --git a/mm/rmap.c b/mm/rmap.c index 6a1e8c7f6213..4f3391fa4ca9 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -31,8 +31,8 @@ * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) - * mapping->private_lock (in __set_page_dirty_buffers) - * lock_page_memcg move_lock (in __set_page_dirty_buffers) + * mapping->private_lock (in block_dirty_folio) + * folio_lock_memcg move_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) -- cgit From b3992d1e2ebcd478e0614494a6abd95e902a029b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:25:12 -0500 Subject: fs: Remove aop flags parameter from block_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- block/fops.c | 3 +-- fs/bfs/file.c | 3 +-- fs/buffer.c | 6 +++--- fs/ext2/inode.c | 3 +-- fs/minix/inode.c | 3 +-- fs/nilfs2/inode.c | 3 +-- fs/nilfs2/recovery.c | 2 +- fs/ntfs3/inode.c | 4 ++-- fs/omfs/file.c | 3 +-- fs/sysv/itree.c | 2 +- fs/udf/inode.c | 2 +- fs/ufs/inode.c | 3 +-- include/linux/buffer_head.h | 2 +- 13 files changed, 16 insertions(+), 23 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/block/fops.c b/block/fops.c index 9f2ecec406b0..b432756570c6 100644 --- a/block/fops.c +++ b/block/fops.c @@ -401,8 +401,7 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - return block_write_begin(mapping, pos, len, flags, pagep, - blkdev_get_block); + return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); } static int blkdev_write_end(struct file *file, struct address_space *mapping, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 03139344568f..9408f45225cb 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -174,8 +174,7 @@ static int bfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - bfs_get_block); + ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); if (unlikely(ret)) bfs_write_failed(mapping, pos + len); diff --git a/fs/buffer.c b/fs/buffer.c index 2b5561ae5d0b..4ec6eb03c0eb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2104,13 +2104,13 @@ static int __block_commit_write(struct inode *inode, struct page *page, * The filesystem needs to handle block truncation upon failure. */ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - unsigned flags, struct page **pagep, get_block_t *get_block) + struct page **pagep, get_block_t *get_block) { pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index, 0); if (!page) return -ENOMEM; @@ -2460,7 +2460,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, (*bytes)++; } - return block_write_begin(mapping, pos, len, flags, pagep, get_block); + return block_write_begin(mapping, pos, len, pagep, get_block); } EXPORT_SYMBOL(cont_write_begin); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 52377a0ee735..97192932ea56 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -892,8 +892,7 @@ ext2_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - ext2_get_block); + ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block); if (ret < 0) ext2_write_failed(mapping, pos + len); return ret; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index f1a6610e4ee6..5e8d7ba661cf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -428,8 +428,7 @@ static int minix_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - minix_get_block); + ret = block_write_begin(mapping, pos, len, pagep, minix_get_block); if (unlikely(ret)) minix_write_failed(mapping, pos + len); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6045cea21f52..be09a0d10f04 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -258,8 +258,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping, if (unlikely(err)) return err; - err = block_write_begin(mapping, pos, len, flags, pagep, - nilfs_get_block); + err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); if (unlikely(err)) { nilfs_write_failed(mapping, pos + len); nilfs_transaction_abort(inode->i_sb); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 9e2ed76c0f25..0955b657938f 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -511,7 +511,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, pos = rb->blkoff << inode->i_blkbits; err = block_write_begin(inode->i_mapping, pos, blocksize, - 0, &page, nilfs_get_block); + &page, nilfs_get_block); if (unlikely(err)) { loff_t isize = inode->i_size; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 9eab11e3b034..3914138fd8ba 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -894,7 +894,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping, goto out; } - err = block_write_begin(mapping, pos, len, flags, pagep, + err = block_write_begin(mapping, pos, len, pagep, ntfs_get_block_write_begin); out: @@ -975,7 +975,7 @@ int reset_log_file(struct inode *inode) len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE; - err = block_write_begin(mapping, pos, len, 0, &page, + err = block_write_begin(mapping, pos, len, &page, ntfs_get_block_write_begin); if (err) goto out; diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 3f297b541713..349b96d89c44 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -321,8 +321,7 @@ static int omfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - omfs_get_block); + ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); if (unlikely(ret)) omfs_write_failed(mapping, pos + len); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 409ab5e17803..96b7fd4facf3 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -482,7 +482,7 @@ static int sysv_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); + ret = block_write_begin(mapping, pos, len, pagep, get_block); if (unlikely(ret)) sysv_write_failed(mapping, pos + len); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ca4fa710e562..88a95886ce8a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -209,7 +209,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); + ret = block_write_begin(mapping, pos, len, pagep, udf_get_block); if (unlikely(ret)) udf_write_failed(mapping, pos + len); return ret; diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d0dda01620f0..bd0e0c66f93d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -500,8 +500,7 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - ufs_getfrag_block); + ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block); if (unlikely(ret)) ufs_write_failed(mapping, pos + len); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bcb4fe9b8575..63e49dfa7738 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -226,7 +226,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, int block_read_full_page(struct page*, get_block_t*); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - unsigned flags, struct page **pagep, get_block_t *get_block); + struct page **pagep, get_block_t *get_block); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block); int block_write_end(struct file *, struct address_space *, -- cgit From be3bbbc588118bdc10e21fdd7bfa6ee6b8c2555d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:25:12 -0500 Subject: fs: Remove aop flags parameter from cont_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/adfs/inode.c | 2 +- fs/affs/file.c | 2 +- fs/buffer.c | 2 +- fs/exfat/inode.c | 2 +- fs/fat/inode.c | 2 +- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/hpfs/file.c | 2 +- include/linux/buffer_head.h | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 561bc748c04a..b6912496bb19 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -58,7 +58,7 @@ static int adfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, adfs_get_block, &ADFS_I(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/fs/affs/file.c b/fs/affs/file.c index b3f81d84ff4c..704911d6aeba 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -420,7 +420,7 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, affs_get_block, &AFFS_I(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/fs/buffer.c b/fs/buffer.c index 4ec6eb03c0eb..fb97646d1977 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2441,7 +2441,7 @@ out: * We may have to extend the file. */ int cont_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata, get_block_t *get_block, loff_t *bytes) { diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index fc0ea1684880..8ed3c4b700cd 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -395,7 +395,7 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, exfat_get_block, &EXFAT_I(mapping->host)->i_size_ondisk); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index bf6051bdf1d1..9b34ccef2501 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -232,7 +232,7 @@ static int fat_write_begin(struct file *file, struct address_space *mapping, int err; *pagep = NULL; - err = cont_write_begin(file, mapping, pos, len, flags, + err = cont_write_begin(file, mapping, pos, len, pagep, fsdata, fat_get_block, &MSDOS_I(mapping->host)->mmu_private); if (err < 0) diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 55f45e9b4930..396735dd3407 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -56,7 +56,7 @@ static int hfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hfs_get_block, &HFS_I(mapping->host)->phys_size); if (unlikely(ret)) diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 446a816aa8e1..435b6202532a 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -50,7 +50,7 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hfsplus_get_block, &HFSPLUS_I(mapping->host)->phys_size); if (unlikely(ret)) diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 99493a23c5d0..8740b4ea0b52 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -200,7 +200,7 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hpfs_get_block, &hpfs_i(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 63e49dfa7738..127b60fad77e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -238,7 +238,7 @@ int generic_write_end(struct file *, struct address_space *, void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); void clean_page_buffers(struct page *page); int cont_write_begin(struct file *, struct address_space *, loff_t, - unsigned, unsigned, struct page **, void **, + unsigned, struct page **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); int block_commit_write(struct page *page, unsigned from, unsigned to); -- cgit From 8371f30cf774a20fd627a0f7b1ecf00e8257f3bc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:54:56 -0500 Subject: fs: Remove aop flags parameter from nobh_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/buffer.c | 3 +-- fs/ext2/inode.c | 2 +- fs/jfs/inode.c | 3 +-- include/linux/buffer_head.h | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/fs/buffer.c b/fs/buffer.c index 01630218c75f..02b50e3e4fbb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2568,8 +2568,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) * On exit the page is fully uptodate in the areas outside (from,to) * The filesystem needs to handle block truncation upon failure. */ -int nobh_write_begin(struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata, get_block_t *get_block) { diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 97192932ea56..bfa69c52ce2c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -917,7 +917,7 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, + ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, ext2_get_block); if (ret < 0) ext2_write_failed(mapping, pos + len); diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index d1943a7b4b04..e16f77b4e84c 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -319,8 +319,7 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, - jfs_get_block); + ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, jfs_get_block); if (unlikely(ret)) jfs_write_failed(mapping, pos + len); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 127b60fad77e..6e5a64005fef 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -258,7 +258,7 @@ static inline vm_fault_t block_page_mkwrite_return(int err) } sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int block_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, +int nobh_write_begin(struct address_space *, loff_t, unsigned len, struct page **, void **, get_block_t*); int nobh_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, -- cgit From 520f301c54faa3484e820b80d4505d48ee587163 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Jan 2022 14:35:22 -0500 Subject: fs: Convert is_dirty_writeback() to take a folio Pass a folio instead of a page to aops->is_dirty_writeback(). Convert both implementations and the caller. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- Documentation/filesystems/vfs.rst | 10 +++++----- fs/buffer.c | 16 ++++++++-------- fs/nfs/file.c | 21 +++++++++------------ include/linux/buffer_head.h | 2 +- include/linux/fs.h | 2 +- mm/vmscan.c | 2 +- 6 files changed, 25 insertions(+), 28 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 30f303180a7d..469882f72fc1 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined: bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); - void (*is_dirty_writeback) (struct page *, bool *, bool *); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); int (*error_remove_page) (struct mapping *mapping, struct page *page); int (*swap_activate)(struct file *); int (*swap_deactivate)(struct file *); @@ -932,14 +932,14 @@ cache in your filesystem. The following members are defined: without needing I/O to bring the whole page up to date. ``is_dirty_writeback`` - Called by the VM when attempting to reclaim a page. The VM uses + Called by the VM when attempting to reclaim a folio. The VM uses dirty and writeback information to determine if it needs to stall to allow flushers a chance to complete some IO. - Ordinarily it can use PageDirty and PageWriteback but some - filesystems have more complex state (unstable pages in NFS + Ordinarily it can use folio_test_dirty and folio_test_writeback but + some filesystems have more complex state (unstable folios in NFS prevent reclaim) or do not set those flags due to locking problems. This callback allows a filesystem to indicate to the - VM if a page should be treated as dirty or writeback for the + VM if a folio should be treated as dirty or writeback for the purposes of stalling. ``error_remove_page`` diff --git a/fs/buffer.c b/fs/buffer.c index d538495a0553..fb4df259c92d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh) EXPORT_SYMBOL(unlock_buffer); /* - * Returns if the page has dirty or writeback buffers. If all the buffers - * are unlocked and clean then the PageDirty information is stale. If - * any of the pages are locked, it is assumed they are locked for IO. + * Returns if the folio has dirty or writeback buffers. If all the buffers + * are unlocked and clean then the folio_test_dirty information is stale. If + * any of the buffers are locked, it is assumed they are locked for IO. */ -void buffer_check_dirty_writeback(struct page *page, +void buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct buffer_head *head, *bh; *dirty = false; *writeback = false; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); - if (!page_has_buffers(page)) + head = folio_buffers(folio); + if (!head) return; - if (PageWriteback(page)) + if (folio_test_writeback(folio)) *writeback = true; - head = page_buffers(page); bh = head; do { if (buffer_locked(bh)) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 314d2d7ba84a..f05c4b18b681 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -430,19 +430,16 @@ static int nfs_release_page(struct page *page, gfp_t gfp) return nfs_fscache_release_page(page, gfp); } -static void nfs_check_dirty_writeback(struct page *page, +static void nfs_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct nfs_inode *nfsi; - struct address_space *mapping = page_file_mapping(page); - - if (!mapping || PageSwapCache(page)) - return; + struct address_space *mapping = folio->mapping; /* - * Check if an unstable page is currently being committed and - * if so, have the VM treat it as if the page is under writeback - * so it will not block due to pages that will shortly be freeable. + * Check if an unstable folio is currently being committed and + * if so, have the VM treat it as if the folio is under writeback + * so it will not block due to folios that will shortly be freeable. */ nfsi = NFS_I(mapping->host); if (atomic_read(&nfsi->commit_info.rpcs_out)) { @@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page, } /* - * If PagePrivate() is set, then the page is not freeable and as the - * inode is not being committed, it's not going to be cleaned in the - * near future so treat it as dirty + * If the private flag is set, then the folio is not freeable + * and as the inode is not being committed, it's not going to + * be cleaned in the near future so treat it as dirty */ - if (PagePrivate(page)) + if (folio_test_private(folio)) *dirty = true; } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6e5a64005fef..805c4e12700a 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) #define page_has_buffers(page) PagePrivate(page) #define folio_buffers(folio) folio_get_private(folio) -void buffer_check_dirty_writeback(struct page *page, +void buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback); /* diff --git a/include/linux/fs.h b/include/linux/fs.h index b35ce086a7a1..2be852661a29 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -369,7 +369,7 @@ struct address_space_operations { int (*launder_folio)(struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); - void (*is_dirty_writeback) (struct page *, bool *, bool *); + void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb); int (*error_remove_page)(struct address_space *, struct page *); /* swapfile support */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 1678802e03e7..27851232e00c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping = folio_mapping(folio); if (mapping && mapping->a_ops->is_dirty_writeback) - mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback); + mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } static struct page *alloc_demote_page(struct page *page, unsigned long node) -- cgit From 2c69e2057962b6bd76d72446453862eb59325b49 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 10:40:40 -0400 Subject: fs: Convert block_read_full_page() to block_read_full_folio() This function is NOT converted to handle large folios, so include an assert that the filesystem isn't passing one in. Otherwise, use the folio functions instead of the page functions, where they exist. Convert all filesystems which use block_read_full_page(). Signed-off-by: Matthew Wilcox (Oracle) --- block/fops.c | 6 ++--- fs/adfs/inode.c | 6 ++--- fs/affs/file.c | 6 ++--- fs/befs/linuxvfs.c | 10 ++++----- fs/bfs/file.c | 6 ++--- fs/buffer.c | 53 ++++++++++++++++++++++++--------------------- fs/efs/inode.c | 8 ++++--- fs/ext4/readpage.c | 4 ++-- fs/freevxfs/vxfs_subr.c | 17 +++++++-------- fs/hfs/inode.c | 8 +++---- fs/hfsplus/inode.c | 8 +++---- fs/iomap/buffered-io.c | 2 +- fs/minix/inode.c | 6 ++--- fs/mpage.c | 10 ++++----- fs/ntfs/compress.c | 4 ++-- fs/ocfs2/aops.c | 6 ++--- fs/ocfs2/refcounttree.c | 6 +++-- fs/omfs/file.c | 6 ++--- fs/qnx4/inode.c | 7 +++--- fs/reiserfs/file.c | 2 +- fs/reiserfs/inode.c | 12 +++++----- fs/sysv/itree.c | 6 ++--- fs/ufs/inode.c | 8 +++---- include/linux/buffer_head.h | 2 +- 24 files changed, 108 insertions(+), 101 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/block/fops.c b/block/fops.c index 712affe56e29..06feb41d798b 100644 --- a/block/fops.c +++ b/block/fops.c @@ -387,9 +387,9 @@ static int blkdev_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, blkdev_get_block, wbc); } -static int blkdev_readpage(struct file * file, struct page * page) +static int blkdev_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, blkdev_get_block); + return block_read_full_folio(folio, blkdev_get_block); } static void blkdev_readahead(struct readahead_control *rac) @@ -425,7 +425,7 @@ static int blkdev_writepages(struct address_space *mapping, const struct address_space_operations def_blk_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = blkdev_readpage, + .read_folio = blkdev_read_folio, .readahead = blkdev_readahead, .writepage = blkdev_writepage, .write_begin = blkdev_write_begin, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index f7959b1a2d52..ee22278b0cfc 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -38,9 +38,9 @@ static int adfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, adfs_get_block, wbc); } -static int adfs_readpage(struct file *file, struct page *page) +static int adfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, adfs_get_block); + return block_read_full_folio(folio, adfs_get_block); } static void adfs_write_failed(struct address_space *mapping, loff_t to) @@ -75,7 +75,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations adfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = adfs_readpage, + .read_folio = adfs_read_folio, .writepage = adfs_writepage, .write_begin = adfs_write_begin, .write_end = generic_write_end, diff --git a/fs/affs/file.c b/fs/affs/file.c index b952f65c3f06..5da562cc7fb7 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -375,9 +375,9 @@ static int affs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, affs_get_block, wbc); } -static int affs_readpage(struct file *file, struct page *page) +static int affs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, affs_get_block); + return block_read_full_folio(folio, affs_get_block); } static void affs_write_failed(struct address_space *mapping, loff_t to) @@ -455,7 +455,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations affs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = affs_readpage, + .read_folio = affs_read_folio, .writepage = affs_writepage, .write_begin = affs_write_begin, .write_end = affs_write_end, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b4b3567ac655..25350dd22cda 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -40,7 +40,7 @@ MODULE_LICENSE("GPL"); static int befs_readdir(struct file *, struct dir_context *); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); -static int befs_readpage(struct file *file, struct page *page); +static int befs_read_folio(struct file *file, struct folio *folio); static sector_t befs_bmap(struct address_space *mapping, sector_t block); static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int); @@ -87,7 +87,7 @@ static const struct inode_operations befs_dir_inode_operations = { }; static const struct address_space_operations befs_aops = { - .readpage = befs_readpage, + .read_folio = befs_read_folio, .bmap = befs_bmap, }; @@ -102,16 +102,16 @@ static const struct export_operations befs_export_operations = { }; /* - * Called by generic_file_read() to read a page of data + * Called by generic_file_read() to read a folio of data * * In turn, simply calls a generic block read function and * passes it the address of befs_get_block, for mapping file * positions to disk blocks. */ static int -befs_readpage(struct file *file, struct page *page) +befs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, befs_get_block); + return block_read_full_folio(folio, befs_get_block); } static sector_t diff --git a/fs/bfs/file.c b/fs/bfs/file.c index dc97c9b8f23b..57ae5ee6deec 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -155,9 +155,9 @@ static int bfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, bfs_get_block, wbc); } -static int bfs_readpage(struct file *file, struct page *page) +static int bfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, bfs_get_block); + return block_read_full_folio(folio, bfs_get_block); } static void bfs_write_failed(struct address_space *mapping, loff_t to) @@ -189,7 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations bfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = bfs_readpage, + .read_folio = bfs_read_folio, .writepage = bfs_writepage, .write_begin = bfs_write_begin, .write_end = generic_write_end, diff --git a/fs/buffer.c b/fs/buffer.c index 225d03cd622d..ec0c52c8848e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -314,7 +314,7 @@ static void decrypt_bh(struct work_struct *work) } /* - * I/O completion handler for block_read_full_page() - pages + * I/O completion handler for block_read_full_folio() - pages * which come unlocked at the end of I/O. */ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) @@ -1060,8 +1060,8 @@ __getblk_slow(struct block_device *bdev, sector_t block, * Also. When blockdev buffers are explicitly read with bread(), they * individually become uptodate. But their backing page remains not * uptodate - even if all of its buffers are uptodate. A subsequent - * block_read_full_page() against that page will discover all the uptodate - * buffers, will set the page uptodate and will perform no I/O. + * block_read_full_folio() against that folio will discover all the uptodate + * buffers, will set the folio uptodate and will perform no I/O. */ /** @@ -2088,7 +2088,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, /* * If this is a partial write which happened to make all buffers - * uptodate then we can optimize away a bogus readpage() for + * uptodate then we can optimize away a bogus read_folio() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ @@ -2137,12 +2137,12 @@ int block_write_end(struct file *file, struct address_space *mapping, if (unlikely(copied < len)) { /* - * The buffers that were written will now be uptodate, so we - * don't have to worry about a readpage reading them and - * overwriting a partial write. However if we have encountered - * a short write and only partially written into a buffer, it - * will not be marked uptodate, so a readpage might come in and - * destroy our partial write. + * The buffers that were written will now be uptodate, so + * we don't have to worry about a read_folio reading them + * and overwriting a partial write. However if we have + * encountered a short write and only partially written + * into a buffer, it will not be marked uptodate, so a + * read_folio might come in and destroy our partial write. * * Do the simplest thing, and just treat any short write to a * non uptodate page as a zero-length write, and force the @@ -2245,26 +2245,28 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) EXPORT_SYMBOL(block_is_partially_uptodate); /* - * Generic "read page" function for block devices that have the normal + * Generic "read_folio" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. - * Reads the page asynchronously --- the unlock_buffer() and + * Reads the folio asynchronously --- the unlock_buffer() and * set/clear_buffer_uptodate() functions propagate buffer state into the - * page struct once IO has completed. + * folio once IO has completed. */ -int block_read_full_page(struct page *page, get_block_t *get_block) +int block_read_full_folio(struct folio *folio, get_block_t *get_block) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; unsigned int blocksize, bbits; int nr, i; int fully_mapped = 1; - head = create_page_buffers(page, inode, 0); + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + + head = create_page_buffers(&folio->page, inode, 0); blocksize = head->b_size; bbits = block_size_bits(blocksize); - iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); + iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits); lblock = (i_size_read(inode)+blocksize-1) >> bbits; bh = head; nr = 0; @@ -2282,10 +2284,11 @@ int block_read_full_page(struct page *page, get_block_t *get_block) WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) - SetPageError(page); + folio_set_error(folio); } if (!buffer_mapped(bh)) { - zero_user(page, i * blocksize, blocksize); + folio_zero_range(folio, i * blocksize, + blocksize); if (!err) set_buffer_uptodate(bh); continue; @@ -2301,16 +2304,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } while (i++, iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) - SetPageMappedToDisk(page); + folio_set_mappedtodisk(folio); if (!nr) { /* - * All buffers are uptodate - we can set the page uptodate + * All buffers are uptodate - we can set the folio uptodate * as well. But not if get_block() returned an error. */ - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); + if (!folio_test_error(folio)) + folio_mark_uptodate(folio); + folio_unlock(folio); return 0; } @@ -2335,7 +2338,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } return 0; } -EXPORT_SYMBOL(block_read_full_page); +EXPORT_SYMBOL(block_read_full_folio); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to diff --git a/fs/efs/inode.c b/fs/efs/inode.c index 89e73a6f0d36..3ba94bb005a6 100644 --- a/fs/efs/inode.c +++ b/fs/efs/inode.c @@ -14,16 +14,18 @@ #include "efs.h" #include -static int efs_readpage(struct file *file, struct page *page) +static int efs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,efs_get_block); + return block_read_full_folio(folio, efs_get_block); } + static sector_t _efs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,efs_get_block); } + static const struct address_space_operations efs_aops = { - .readpage = efs_readpage, + .read_folio = efs_read_folio, .bmap = _efs_bmap }; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index af491e170c4a..e02a5f14e021 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -163,7 +163,7 @@ static bool bio_post_read_required(struct bio *bio) * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls - * back to block_read_full_page(). + * back to block_read_full_folio(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the @@ -394,7 +394,7 @@ int ext4_mpage_readpages(struct inode *inode, bio = NULL; } if (!PageUptodate(page)) - block_read_full_page(page, ext4_get_block); + block_read_full_folio(page_folio(page), ext4_get_block); else unlock_page(page); next_page: diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index e806694d4145..6143ebab940d 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c @@ -38,11 +38,11 @@ #include "vxfs_extern.h" -static int vxfs_readpage(struct file *, struct page *); +static int vxfs_read_folio(struct file *, struct folio *); static sector_t vxfs_bmap(struct address_space *, sector_t); const struct address_space_operations vxfs_aops = { - .readpage = vxfs_readpage, + .read_folio = vxfs_read_folio, .bmap = vxfs_bmap, }; @@ -141,24 +141,23 @@ vxfs_getblk(struct inode *ip, sector_t iblock, } /** - * vxfs_readpage - read one page synchronously into the pagecache + * vxfs_read_folio - read one page synchronously into the pagecache * @file: file context (unused) - * @page: page frame to fill in. + * @folio: folio to fill in. * * Description: - * The vxfs_readpage routine reads @page synchronously into the + * The vxfs_read_folio routine reads @folio synchronously into the * pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: - * @page is locked and will be unlocked. + * @folio is locked and will be unlocked. */ -static int -vxfs_readpage(struct file *file, struct page *page) +static int vxfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, vxfs_getblk); + return block_read_full_folio(folio, vxfs_getblk); } /** diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 9a26b9510da0..ba3ff9cd7cfc 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -34,9 +34,9 @@ static int hfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, hfs_get_block, wbc); } -static int hfs_readpage(struct file *file, struct page *page) +static int hfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, hfs_get_block); + return block_read_full_folio(folio, hfs_get_block); } static void hfs_write_failed(struct address_space *mapping, loff_t to) @@ -160,7 +160,7 @@ static int hfs_writepages(struct address_space *mapping, const struct address_space_operations hfs_btree_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfs_readpage, + .read_folio = hfs_read_folio, .writepage = hfs_writepage, .write_begin = hfs_write_begin, .write_end = generic_write_end, @@ -171,7 +171,7 @@ const struct address_space_operations hfs_btree_aops = { const struct address_space_operations hfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfs_readpage, + .read_folio = hfs_read_folio, .writepage = hfs_writepage, .write_begin = hfs_write_begin, .write_end = generic_write_end, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 905ae3660315..982b34eefec7 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -23,9 +23,9 @@ #include "hfsplus_raw.h" #include "xattr.h" -static int hfsplus_readpage(struct file *file, struct page *page) +static int hfsplus_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, hfsplus_get_block); + return block_read_full_folio(folio, hfsplus_get_block); } static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) @@ -157,7 +157,7 @@ static int hfsplus_writepages(struct address_space *mapping, const struct address_space_operations hfsplus_btree_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfsplus_readpage, + .read_folio = hfsplus_read_folio, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, .write_end = generic_write_end, @@ -168,7 +168,7 @@ const struct address_space_operations hfsplus_btree_aops = { const struct address_space_operations hfsplus_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfsplus_readpage, + .read_folio = hfsplus_read_folio, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, .write_end = generic_write_end, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 72f63d719c7c..75eb0c27a0e8 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -349,7 +349,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) } /* - * Just like mpage_readahead and block_read_full_page, we always + * Just like mpage_readahead and block_read_full_folio, we always * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 3add78bccedc..da8bdd1712a7 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -402,9 +402,9 @@ static int minix_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, minix_get_block, wbc); } -static int minix_readpage(struct file *file, struct page *page) +static int minix_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,minix_get_block); + return block_read_full_folio(folio, minix_get_block); } int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -443,7 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations minix_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = minix_readpage, + .read_folio = minix_read_folio, .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, diff --git a/fs/mpage.c b/fs/mpage.c index 1fe56f8c495f..a04439b84ae2 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -36,7 +36,7 @@ * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls - * back to block_read_full_page(). + * back to block_read_full_folio(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the @@ -68,7 +68,7 @@ static struct bio *mpage_bio_submit(struct bio *bio) /* * support function for mpage_readahead. The fs supplied get_block might * return an up to date buffer. This is used to map that buffer into - * the page, which allows readpage to avoid triggering a duplicate call + * the page, which allows read_folio to avoid triggering a duplicate call * to get_block. * * The idea is to avoid adding buffers to pages that don't already have @@ -296,7 +296,7 @@ confused: if (args->bio) args->bio = mpage_bio_submit(args->bio); if (!PageUptodate(page)) - block_read_full_page(page, args->get_block); + block_read_full_folio(page_folio(page), args->get_block); else unlock_page(page); goto out; @@ -425,7 +425,7 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) /* * we cannot drop the bh if the page is not uptodate or a concurrent - * readpage would fail to serialize with the bh and it would read from + * read_folio would fail to serialize with the bh and it would read from * disk before we reach the platter. */ if (buffer_heads_over_limit && PageUptodate(page)) @@ -510,7 +510,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, /* * Page has buffers, but they are all unmapped. The page was * created by pagein or read over a hole which was handled by - * block_read_full_page(). If this address_space is also + * block_read_full_folio(). If this address_space is also * using mpage_readahead then this can rarely happen. */ goto confused; diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index d2f9d6a0ee32..a60f543e7557 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -780,12 +780,12 @@ lock_retry_remap: /* Uncompressed cb, copy it to the destination pages. */ /* * TODO: As a big optimization, we could detect this case - * before we read all the pages and use block_read_full_page() + * before we read all the pages and use block_read_full_folio() * on all full pages instead (we still have to treat partial * pages especially but at least we are getting rid of the * synchronous io for the majority of pages. * Or if we choose not to do the read-ahead/-behind stuff, we - * could just return block_read_full_page(pages[xpage]) as long + * could just return block_read_full_folio(pages[xpage]) as long * as PAGE_SIZE <= cb_size. */ if (cb_max_ofs) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 7cffe9dcad17..7bf4b6fd93bf 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -309,7 +309,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. - * block_read_full_page->get_block freaks out if it is asked to read + * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. @@ -326,7 +326,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else - ret = block_read_full_page(page, ocfs2_get_block); + ret = block_read_full_folio(page_folio(page), ocfs2_get_block); unlock = 0; out_alloc: @@ -1897,7 +1897,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping, /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within - * ocfs2_write() will be safe against ->readpage(). This + * ocfs2_write() will be safe against ->read_folio(). This * should also serve to lock out allocation from a shared * writeable region. */ diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 7f6355cbb587..e04358a46b68 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2961,12 +2961,14 @@ retry: } if (!PageUptodate(page)) { - ret = block_read_full_page(page, ocfs2_get_block); + struct folio *folio = page_folio(page); + + ret = block_read_full_folio(folio, ocfs2_get_block); if (ret) { mlog_errno(ret); goto unlock; } - lock_page(page); + folio_lock(folio); } if (page_has_buffers(page)) { diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 980b0a72c172..fa7fe2393ff6 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -284,9 +284,9 @@ out: return ret; } -static int omfs_readpage(struct file *file, struct page *page) +static int omfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, omfs_get_block); + return block_read_full_folio(folio, omfs_get_block); } static void omfs_readahead(struct readahead_control *rac) @@ -373,7 +373,7 @@ const struct inode_operations omfs_file_inops = { const struct address_space_operations omfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = omfs_readpage, + .read_folio = omfs_read_folio, .readahead = omfs_readahead, .writepage = omfs_writepage, .writepages = omfs_writepages, diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index a635bb6615e9..391ea402920d 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -245,17 +245,18 @@ static void qnx4_kill_sb(struct super_block *sb) } } -static int qnx4_readpage(struct file *file, struct page *page) +static int qnx4_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,qnx4_get_block); + return block_read_full_folio(folio, qnx4_get_block); } static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,qnx4_get_block); } + static const struct address_space_operations qnx4_aops = { - .readpage = qnx4_readpage, + .read_folio = qnx4_read_folio, .bmap = qnx4_bmap }; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 203a47232707..6e228bfbe7ef 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -227,7 +227,7 @@ drop_write_lock: } /* * If this is a partial write which happened to make all buffers - * uptodate then we can optimize away a bogus readpage() for + * uptodate then we can optimize away a bogus read_folio() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 46ba4892030a..33a9555f77b9 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -167,10 +167,10 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key, * cutting the code is fine, since it really isn't in use yet and is easy * to add back in. But, Vladimir has a really good idea here. Think * about what happens for reading a file. For each page, - * The VFS layer calls reiserfs_readpage, who searches the tree to find + * The VFS layer calls reiserfs_read_folio, who searches the tree to find * an indirect item. This indirect item has X number of pointers, where * X is a big number if we've done the block allocation right. But, - * we only use one or two of these pointers during each call to readpage, + * we only use one or two of these pointers during each call to read_folio, * needlessly researching again later on. * * The size of the cache could be dynamic based on the size of the file. @@ -966,7 +966,7 @@ research: * it is important the set_buffer_uptodate is done * after the direct2indirect. The buffer might * contain valid data newer than the data on disk - * (read by readpage, changed, and then sent here by + * (read by read_folio, changed, and then sent here by * writepage). direct2indirect needs to know if unbh * was already up to date, so it can decide if the * data in unbh needs to be replaced with data from @@ -2733,9 +2733,9 @@ fail: goto done; } -static int reiserfs_readpage(struct file *f, struct page *page) +static int reiserfs_read_folio(struct file *f, struct folio *folio) { - return block_read_full_page(page, reiserfs_get_block); + return block_read_full_folio(folio, reiserfs_get_block); } static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) @@ -3421,7 +3421,7 @@ out: const struct address_space_operations reiserfs_address_space_operations = { .writepage = reiserfs_writepage, - .readpage = reiserfs_readpage, + .read_folio = reiserfs_read_folio, .readahead = reiserfs_readahead, .releasepage = reiserfs_releasepage, .invalidate_folio = reiserfs_invalidate_folio, diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 96ad24fe0ffb..d4ec9bb97de9 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -456,9 +456,9 @@ static int sysv_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page,get_block,wbc); } -static int sysv_readpage(struct file *file, struct page *page) +static int sysv_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,get_block); + return block_read_full_folio(folio, get_block); } int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -497,7 +497,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations sysv_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = sysv_readpage, + .read_folio = sysv_read_folio, .writepage = sysv_writepage, .write_begin = sysv_write_begin, .write_end = generic_write_end, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 6c973b71cab2..a873de7dec1c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -390,7 +390,7 @@ out: /** * ufs_getfrag_block() - `get_block_t' function, interface between UFS and - * readpage, writepage and so on + * read_folio, writepage and so on */ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) @@ -472,9 +472,9 @@ static int ufs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page,ufs_getfrag_block,wbc); } -static int ufs_readpage(struct file *file, struct page *page) +static int ufs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,ufs_getfrag_block); + return block_read_full_folio(folio, ufs_getfrag_block); } int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -527,7 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations ufs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = ufs_readpage, + .read_folio = ufs_read_folio, .writepage = ufs_writepage, .write_begin = ufs_write_begin, .write_end = ufs_write_end, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 805c4e12700a..31d82fd9abe8 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -223,7 +223,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); -int block_read_full_page(struct page*, get_block_t*); +int block_read_full_folio(struct folio *, get_block_t *); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, get_block_t *get_block); -- cgit From 68189fef88c7d02eb92e038be3d6428ebd0d2945 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 01:08:08 -0400 Subject: fs: Change try_to_free_buffers() to take a folio All but two of the callers already have a folio; pass a folio into try_to_free_buffers(). This removes the last user of cancel_dirty_page() so remove that wrapper function too. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton --- fs/buffer.c | 42 +++++++++++++++++++++--------------------- fs/ext4/inode.c | 2 +- fs/gfs2/aops.c | 2 +- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/jbd2/commit.c | 2 +- fs/jbd2/transaction.c | 4 ++-- fs/mpage.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/reiserfs/journal.c | 2 +- include/linux/buffer_head.h | 4 ++-- include/linux/pagemap.h | 4 ---- mm/filemap.c | 2 +- mm/migrate.c | 2 +- mm/vmscan.c | 2 +- 16 files changed, 37 insertions(+), 41 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/fs/buffer.c b/fs/buffer.c index 786ef5b98c80..701af0035802 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, size); goto done; } - if (!try_to_free_buffers(page)) + if (!try_to_free_buffers(page_folio(page))) goto failed; } @@ -3155,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh) EXPORT_SYMBOL(sync_dirty_buffer); /* - * try_to_free_buffers() checks if all the buffers on this particular page + * try_to_free_buffers() checks if all the buffers on this particular folio * are unused, and releases them if so. * * Exclusion against try_to_free_buffers may be obtained by either - * locking the page or by holding its mapping's private_lock. + * locking the folio or by holding its mapping's private_lock. * - * If the page is dirty but all the buffers are clean then we need to - * be sure to mark the page clean as well. This is because the page + * If the folio is dirty but all the buffers are clean then we need to + * be sure to mark the folio clean as well. This is because the folio * may be against a block device, and a later reattachment of buffers - * to a dirty page will set *all* buffers dirty. Which would corrupt + * to a dirty folio will set *all* buffers dirty. Which would corrupt * filesystem data on the same device. * - * The same applies to regular filesystem pages: if all the buffers are - * clean then we set the page clean and proceed. To do that, we require + * The same applies to regular filesystem folios: if all the buffers are + * clean then we set the folio clean and proceed. To do that, we require * total exclusion from block_dirty_folio(). That is obtained with * private_lock. * @@ -3207,40 +3207,40 @@ failed: return 0; } -int try_to_free_buffers(struct page *page) +bool try_to_free_buffers(struct folio *folio) { - struct address_space * const mapping = page->mapping; + struct address_space * const mapping = folio->mapping; struct buffer_head *buffers_to_free = NULL; - int ret = 0; + bool ret = 0; - BUG_ON(!PageLocked(page)); - if (PageWriteback(page)) - return 0; + BUG_ON(!folio_test_locked(folio)); + if (folio_test_writeback(folio)) + return false; if (mapping == NULL) { /* can this still happen? */ - ret = drop_buffers(page, &buffers_to_free); + ret = drop_buffers(&folio->page, &buffers_to_free); goto out; } spin_lock(&mapping->private_lock); - ret = drop_buffers(page, &buffers_to_free); + ret = drop_buffers(&folio->page, &buffers_to_free); /* * If the filesystem writes its buffers by hand (eg ext3) - * then we can have clean buffers against a dirty page. We - * clean the page here; otherwise the VM will never notice + * then we can have clean buffers against a dirty folio. We + * clean the folio here; otherwise the VM will never notice * that the filesystem did any IO at all. * * Also, during truncate, discard_buffer will have marked all - * the page's buffers clean. We discover that here and clean - * the page also. + * the folio's buffers clean. We discover that here and clean + * the folio also. * * private_lock must be held over this entire operation in order * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) - cancel_dirty_page(page); + folio_cancel_dirty(folio); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 943937cb5302..987ea77e672d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3255,7 +3255,7 @@ static bool ext4_release_folio(struct folio *folio, gfp_t wait) if (journal) return jbd2_journal_try_to_free_buffers(journal, folio); else - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } static bool ext4_inode_datasync_dirty(struct inode *inode) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 95a674d70c04..106e90a36583 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -757,7 +757,7 @@ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) } while (bh != head); gfs2_log_unlock(sdp); - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); cannot_release: gfs2_log_unlock(sdp); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 86fd50e5fccb..c4526f16355d 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -124,7 +124,7 @@ static bool hfs_release_folio(struct folio *folio, gfp_t mask) } while (--i && nidx < tree->node_count); spin_unlock(&tree->hash_lock); } - return res ? try_to_free_buffers(&folio->page) : false; + return res ? try_to_free_buffers(folio) : false; } static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index f723e0e91d51..aeab83ed1c9c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -121,7 +121,7 @@ static bool hfsplus_release_folio(struct folio *folio, gfp_t mask) } while (--i && nidx < tree->node_count); spin_unlock(&tree->hash_lock); } - return res ? try_to_free_buffers(&folio->page) : false; + return res ? try_to_free_buffers(folio) : false; } static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 2f37108da0ec..eb315e81f1a6 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -82,7 +82,7 @@ static void release_buffer_page(struct buffer_head *bh) folio_get(folio); __brelse(bh); - try_to_free_buffers(&folio->page); + try_to_free_buffers(folio); folio_unlock(folio); folio_put(folio); return; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ee33d277d51e..e49bb0938376 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2175,7 +2175,7 @@ bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio) goto busy; } while ((bh = bh->b_this_page) != head); - ret = try_to_free_buffers(&folio->page); + ret = try_to_free_buffers(folio); busy: return ret; } @@ -2482,7 +2482,7 @@ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio, } while (bh != head); if (!partial_page) { - if (may_free && try_to_free_buffers(&folio->page)) + if (may_free && try_to_free_buffers(folio)) J_ASSERT(!folio_buffers(folio)); } return 0; diff --git a/fs/mpage.c b/fs/mpage.c index 6df9c3aa5728..0d25f44f5707 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -431,7 +431,7 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) * disk before we reach the platter. */ if (buffer_heads_over_limit && PageUptodate(page)) - try_to_free_buffers(page); + try_to_free_buffers(page_folio(page)); } /* diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 7d7b86ca078f..35d40a67204c 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -502,7 +502,7 @@ static bool ocfs2_release_folio(struct folio *folio, gfp_t wait) { if (!folio_buffers(folio)) return false; - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9cf2e1420a74..0cffe054b78e 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3234,7 +3234,7 @@ static bool reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) bh = bh->b_this_page; } while (bh != head); if (ret) - ret = try_to_free_buffers(&folio->page); + ret = try_to_free_buffers(folio); spin_unlock(&j->j_dirty_buffers_lock); return ret; } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 99ba495b0f28..d8cc9a366124 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -606,7 +606,7 @@ static void release_buffer_page(struct buffer_head *bh) folio_get(folio); put_bh(bh); if (!folio->mapping) - try_to_free_buffers(&folio->page); + try_to_free_buffers(folio); folio_unlock(folio); folio_put(folio); } else { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 31d82fd9abe8..c9d1463bb20f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -158,7 +158,7 @@ void mark_buffer_write_io_error(struct buffer_head *bh); void touch_buffer(struct buffer_head *bh); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); -int try_to_free_buffers(struct page *); +bool try_to_free_buffers(struct folio *); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, bool retry); void create_empty_buffers(struct page *, unsigned long, @@ -402,7 +402,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #else /* CONFIG_BLOCK */ static inline void buffer_init(void) {} -static inline int try_to_free_buffers(struct page *page) { return 1; } +static inline bool try_to_free_buffers(struct folio *folio) { return true; } static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 831b28dab01a..82dfb279e0c4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1067,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio) if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } -static inline void cancel_dirty_page(struct page *page) -{ - folio_cancel_dirty(page_folio(page)); -} bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); diff --git a/mm/filemap.c b/mm/filemap.c index ee892853a214..d335a154a0d9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3957,6 +3957,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) if (mapping && mapping->a_ops->release_folio) return mapping->a_ops->release_folio(folio, gfp); - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); diff --git a/mm/migrate.c b/mm/migrate.c index 6c31ee1e1c9b..21d82636c291 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1013,7 +1013,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, if (!page->mapping) { VM_BUG_ON_PAGE(PageAnon(page), page); if (page_has_private(page)) { - try_to_free_buffers(page); + try_to_free_buffers(folio); goto out_unlock_both; } } else if (page_mapped(page)) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 27851232e00c..f3f7ce2c4068 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1181,7 +1181,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping) * folio->mapping == NULL while being dirty with clean buffers. */ if (folio_test_private(folio)) { - if (try_to_free_buffers(&folio->page)) { + if (try_to_free_buffers(folio)) { folio_clear_dirty(folio); pr_info("%s: orphaned folio\n", __func__); return PAGE_CLEAN; -- cgit