diff options
author | Christoph Hellwig <hch@lst.de> | 2023-06-28 17:31:40 +0200 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2023-08-21 14:52:15 +0200 |
commit | 44962ca37c8cf882ba72e4cd67f30eaecb920a52 (patch) | |
tree | 7ba42738bbb7cd9e43d05b262c8a71c2ac0b2f2b /fs/btrfs/inode.c | |
parent | f778b6b8e0136c0da9ffcf560c8cd8619b1cabf4 (diff) |
btrfs: don't redirty pages in compress_file_range
compress_file_range needs to clear the dirty bit before handing off work
to the compression worker threads to prevent processes coming in through
mmap and changing the file contents while the compression is accessing
the data (See commit 4adaa611020f ("Btrfs: fix race between mmap writes
and compression").
But when compress_file_range decides to not compress the data, it falls
back to submit_uncompressed_range which uses extent_write_locked_range
to write the uncompressed data. extent_write_locked_range currently
expects all pages to be marked dirty so that it can clear the dirty
bit itself, and thus compress_file_range has to redirty the page range.
Redirtying the page range is rather inefficient and also pointless,
so instead pass a pages_dirty parameter to extent_write_locked_range
and skip the redirty game entirely.
Note that compress_file_range was even redirtying the locked_page twice
given that extent_range_clear_dirty_for_io already redirties all pages
in the range, which must include locked_page if there is one.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r-- | fs/btrfs/inode.c | 43 |
1 files changed, 9 insertions, 34 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cda511d76084..7447f3b44cd2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -846,11 +846,17 @@ static void compress_file_range(struct btrfs_work *work) unsigned int poff; int i; int compress_type = fs_info->compress_type; - int redirty = 0; inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); /* + * We need to call clear_page_dirty_for_io on each page in the range. + * Otherwise applications with the file mmap'd can wander in and change + * the page contents while we are compressing them. + */ + extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); + + /* * We need to save i_size before now because it could change in between * us evaluating the size and assigning it. This is because we lock and * unlock the page in truncate and fallocate, and then modify the i_size @@ -929,22 +935,6 @@ again: else if (inode->prop_compress) compress_type = inode->prop_compress; - /* - * We need to call clear_page_dirty_for_io on each page in the range. - * Otherwise applications with the file mmap'd can wander in and change - * the page contents while we are compressing them. - * - * If the compression fails for any reason, we set the pages dirty again - * later on. - * - * Note that the remaining part is redirtied, the start pointer has - * moved, the end is the original one. - */ - if (!redirty) { - extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); - redirty = 1; - } - /* Compression level is applied here. */ ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4), mapping, start, pages, &nr_pages, &total_in, @@ -1040,21 +1030,6 @@ mark_incompressible: if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) inode->flags |= BTRFS_INODE_NOCOMPRESS; cleanup_and_bail_uncompressed: - /* - * No compression, but we still need to write the pages in the file - * we've been given so far. redirty the locked page if it corresponds - * to our extent and set things up for the async work queue to run - * cow_file_range to do the normal delalloc dance. - */ - if (async_chunk->locked_page && - (page_offset(async_chunk->locked_page) >= start && - page_offset(async_chunk->locked_page)) <= end) { - __set_page_dirty_nobuffers(async_chunk->locked_page); - /* unlocked later on in the async handlers */ - } - - if (redirty) - extent_range_redirty_for_io(&inode->vfs_inode, start, end); add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, BTRFS_COMPRESS_NONE); free_pages: @@ -1130,7 +1105,7 @@ static void submit_uncompressed_range(struct btrfs_inode *inode, /* All pages will be unlocked, including @locked_page */ wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); - extent_write_locked_range(&inode->vfs_inode, start, end, &wbc); + extent_write_locked_range(&inode->vfs_inode, start, end, &wbc, false); wbc_detach_inode(&wbc); } @@ -1754,7 +1729,7 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode, } locked_page_done = true; extent_write_locked_range(&inode->vfs_inode, start, done_offset, - wbc); + wbc, true); start = done_offset + 1; } |