diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 76 |
1 files changed, 27 insertions, 49 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3e5bb0cdd3cd..16045ea86fc1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> #include <linux/slab.h> #include <linux/bio.h> @@ -109,7 +110,6 @@ struct extent_page_data { struct bio *bio; struct extent_io_tree *tree; get_extent_t *get_extent; - unsigned long bio_flags; /* tells writepage not to lock the state bits for this range * it still does the unlocking @@ -2761,8 +2761,8 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page, */ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, struct writeback_control *wbc, - struct page *page, sector_t sector, - size_t size, unsigned long offset, + struct page *page, u64 offset, + size_t size, unsigned long pg_offset, struct block_device *bdev, struct bio **bio_ret, bio_end_io_t end_io_func, @@ -2776,6 +2776,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, int contig = 0; int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; size_t page_size = min_t(size_t, size, PAGE_SIZE); + sector_t sector = offset >> 9; if (bio_ret && *bio_ret) { bio = *bio_ret; @@ -2786,8 +2787,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, if (prev_bio_flags != bio_flags || !contig || force_bio_submit || - merge_bio(tree, page, offset, page_size, bio, bio_flags) || - bio_add_page(bio, page, page_size, offset) < page_size) { + merge_bio(tree, page, pg_offset, page_size, bio, bio_flags) || + bio_add_page(bio, page, page_size, pg_offset) < page_size) { ret = submit_one_bio(bio, mirror_num, prev_bio_flags); if (ret < 0) { *bio_ret = NULL; @@ -2801,8 +2802,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, } } - bio = btrfs_bio_alloc(bdev, sector << 9); - bio_add_page(bio, page, page_size, offset); + bio = btrfs_bio_alloc(bdev, offset); + bio_add_page(bio, page, page_size, pg_offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; bio->bi_write_hint = page->mapping->host->i_write_hint; @@ -2892,7 +2893,6 @@ static int __do_readpage(struct extent_io_tree *tree, u64 last_byte = i_size_read(inode); u64 block_start; u64 cur_end; - sector_t sector; struct extent_map *em; struct block_device *bdev; int ret = 0; @@ -2928,6 +2928,7 @@ static int __do_readpage(struct extent_io_tree *tree, } while (cur <= end) { bool force_bio_submit = false; + u64 offset; if (cur >= last_byte) { char *userpage; @@ -2967,9 +2968,9 @@ static int __do_readpage(struct extent_io_tree *tree, iosize = ALIGN(iosize, blocksize); if (this_bio_flag & EXTENT_BIO_COMPRESSED) { disk_io_size = em->block_len; - sector = em->block_start >> 9; + offset = em->block_start; } else { - sector = (em->block_start + extent_offset) >> 9; + offset = em->block_start + extent_offset; disk_io_size = iosize; } bdev = em->bdev; @@ -3062,8 +3063,8 @@ static int __do_readpage(struct extent_io_tree *tree, } ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL, - page, sector, disk_io_size, pg_offset, - bdev, bio, + page, offset, disk_io_size, + pg_offset, bdev, bio, end_bio_extent_readpage, mirror_num, *bio_flags, this_bio_flag, @@ -3324,7 +3325,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, u64 extent_offset; u64 block_start; u64 iosize; - sector_t sector; struct extent_map *em; struct block_device *bdev; size_t pg_offset = 0; @@ -3367,6 +3367,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, while (cur <= end) { u64 em_end; + u64 offset; if (cur >= i_size) { if (tree->ops && tree->ops->writepage_end_io_hook) @@ -3388,7 +3389,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, BUG_ON(end < cur); iosize = min(em_end - cur, end - cur + 1); iosize = ALIGN(iosize, blocksize); - sector = (em->block_start + extent_offset) >> 9; + offset = em->block_start + extent_offset; bdev = em->bdev; block_start = em->block_start; compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); @@ -3431,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, } ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, - page, sector, iosize, pg_offset, + page, offset, iosize, pg_offset, bdev, &epd->bio, end_bio_extent_writepage, 0, 0, 0, false); @@ -3471,8 +3472,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unsigned int write_flags = 0; unsigned long nr_written = 0; - if (wbc->sync_mode == WB_SYNC_ALL) - write_flags = REQ_SYNC; + write_flags = wbc_to_write_flags(wbc); trace___extent_writepage(page, inode, wbc); @@ -3716,16 +3716,13 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, u64 offset = eb->start; u32 nritems; unsigned long i, num_pages; - unsigned long bio_flags = 0; unsigned long start, end; - unsigned int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META; + unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META; int ret = 0; clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); num_pages = num_extent_pages(eb->start, eb->len); atomic_set(&eb->io_pages, num_pages); - if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) - bio_flags = EXTENT_BIO_TREE_LOG; /* set btree blocks beyond nritems with 0 to avoid stale content. */ nritems = btrfs_header_nritems(eb); @@ -3749,11 +3746,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, - p, offset >> 9, PAGE_SIZE, 0, bdev, + p, offset, PAGE_SIZE, 0, bdev, &epd->bio, end_bio_extent_buffer_writepage, - 0, epd->bio_flags, bio_flags, false); - epd->bio_flags = bio_flags; + 0, 0, 0, false); if (ret) { set_btree_ioerr(p); if (PageWriteback(p)) @@ -3790,7 +3786,6 @@ int btree_write_cache_pages(struct address_space *mapping, .tree = tree, .extent_locked = 0, .sync_io = wbc->sync_mode == WB_SYNC_ALL, - .bio_flags = 0, }; int ret = 0; int done = 0; @@ -3802,7 +3797,7 @@ int btree_write_cache_pages(struct address_space *mapping, int scanned = 0; int tag; - pagevec_init(&pvec, 0); + pagevec_init(&pvec); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; @@ -3819,8 +3814,8 @@ retry: if (wbc->sync_mode == WB_SYNC_ALL) tag_pages_for_writeback(mapping, index, end); while (!done && !nr_to_write_done && (index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { + (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, + tag))) { unsigned i; scanned = 1; @@ -3830,11 +3825,6 @@ retry: if (!PagePrivate(page)) continue; - if (!wbc->range_cyclic && page->index > end) { - done = 1; - break; - } - spin_lock(&mapping->private_lock); if (!PagePrivate(page)) { spin_unlock(&mapping->private_lock); @@ -3946,7 +3936,7 @@ static int extent_write_cache_pages(struct address_space *mapping, if (!igrab(inode)) return 0; - pagevec_init(&pvec, 0); + pagevec_init(&pvec); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; @@ -3966,8 +3956,8 @@ retry: tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && !nr_to_write_done && (index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { + (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, + &index, end, tag))) { unsigned i; scanned = 1; @@ -3992,12 +3982,6 @@ retry: continue; } - if (!wbc->range_cyclic && page->index > end) { - done = 1; - unlock_page(page); - continue; - } - if (wbc->sync_mode != WB_SYNC_NONE) { if (PageWriteback(page)) flush_fn(data); @@ -4063,10 +4047,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd) if (epd->bio) { int ret; - bio_set_op_attrs(epd->bio, REQ_OP_WRITE, - epd->sync_io ? REQ_SYNC : 0); - - ret = submit_one_bio(epd->bio, 0, epd->bio_flags); + ret = submit_one_bio(epd->bio, 0, 0); BUG_ON(ret < 0); /* -ENOMEM */ epd->bio = NULL; } @@ -4089,7 +4070,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, .get_extent = get_extent, .extent_locked = 0, .sync_io = wbc->sync_mode == WB_SYNC_ALL, - .bio_flags = 0, }; ret = __extent_writepage(page, wbc, &epd); @@ -4114,7 +4094,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, .get_extent = get_extent, .extent_locked = 1, .sync_io = mode == WB_SYNC_ALL, - .bio_flags = 0, }; struct writeback_control wbc_writepages = { .sync_mode = mode, @@ -4154,7 +4133,6 @@ int extent_writepages(struct extent_io_tree *tree, .get_extent = get_extent, .extent_locked = 0, .sync_io = wbc->sync_mode == WB_SYNC_ALL, - .bio_flags = 0, }; ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd, |