diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-08-30 16:06:38 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-08-30 16:06:38 -0700 |
commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /fs/btrfs/compression.c | |
parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r-- | fs/btrfs/compression.c | 299 |
1 files changed, 103 insertions, 196 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f42f31f22d13..2d0493f0a184 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -37,6 +37,8 @@ #include "file-item.h" #include "super.h" +struct bio_set btrfs_compressed_bioset; + static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) @@ -54,6 +56,25 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type) return NULL; } +static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) +{ + return container_of(bbio, struct compressed_bio, bbio); +} + +static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, + u64 start, blk_opf_t op, + btrfs_bio_end_io_t end_io) +{ + struct btrfs_bio *bbio; + + bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, + GFP_NOFS, &btrfs_compressed_bioset)); + btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL); + bbio->inode = inode; + bbio->file_offset = start; + return to_compressed_bio(bbio); +} + bool btrfs_compress_is_valid_type(const char *str, size_t len) { int i; @@ -139,32 +160,25 @@ static int compression_decompress(int type, struct list_head *ws, } } +static void btrfs_free_compressed_pages(struct compressed_bio *cb) +{ + for (unsigned int i = 0; i < cb->nr_pages; i++) + put_page(cb->compressed_pages[i]); + kfree(cb->compressed_pages); +} + static int btrfs_decompress_bio(struct compressed_bio *cb); static void end_compressed_bio_read(struct btrfs_bio *bbio) { - struct compressed_bio *cb = bbio->private; - unsigned int index; - struct page *page; + struct compressed_bio *cb = to_compressed_bio(bbio); + blk_status_t status = bbio->bio.bi_status; - if (bbio->bio.bi_status) - cb->status = bbio->bio.bi_status; - else - cb->status = errno_to_blk_status(btrfs_decompress_bio(cb)); - - /* Release the compressed pages */ - for (index = 0; index < cb->nr_pages; index++) { - page = cb->compressed_pages[index]; - page->mapping = NULL; - put_page(page); - } - - /* Do io completion on the original bio */ - btrfs_bio_end_io(btrfs_bio(cb->orig_bio), cb->status); + if (!status) + status = errno_to_blk_status(btrfs_decompress_bio(cb)); - /* Finally free the cb struct */ - kfree(cb->compressed_pages); - kfree(cb); + btrfs_free_compressed_pages(cb); + btrfs_bio_end_io(cb->orig_bbio, status); bio_put(&bbio->bio); } @@ -172,14 +186,14 @@ static void end_compressed_bio_read(struct btrfs_bio *bbio) * Clear the writeback bits on all of the file * pages for a compressed write */ -static noinline void end_compressed_writeback(struct inode *inode, - const struct compressed_bio *cb) +static noinline void end_compressed_writeback(const struct compressed_bio *cb) { + struct inode *inode = &cb->bbio.inode->vfs_inode; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long index = cb->start >> PAGE_SHIFT; unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; - const int errno = blk_status_to_errno(cb->status); + const int errno = blk_status_to_errno(cb->bbio.bio.bi_status); int i; int ret; @@ -207,45 +221,25 @@ static noinline void end_compressed_writeback(struct inode *inode, /* the inode may be gone now */ } -static void finish_compressed_bio_write(struct compressed_bio *cb) +static void btrfs_finish_compressed_write_work(struct work_struct *work) { - struct inode *inode = cb->inode; - unsigned int index; + struct compressed_bio *cb = + container_of(work, struct compressed_bio, write_end_work); /* * Ok, we're the last bio for this extent, step one is to call back * into the FS and do all the end_io operations. */ - btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL, + btrfs_writepage_endio_finish_ordered(cb->bbio.inode, NULL, cb->start, cb->start + cb->len - 1, - cb->status == BLK_STS_OK); + cb->bbio.bio.bi_status == BLK_STS_OK); if (cb->writeback) - end_compressed_writeback(inode, cb); + end_compressed_writeback(cb); /* Note, our inode could be gone now */ - /* - * Release the compressed pages, these came from alloc_page and - * are not attached to the inode at all - */ - for (index = 0; index < cb->nr_pages; index++) { - struct page *page = cb->compressed_pages[index]; - - page->mapping = NULL; - put_page(page); - } - - /* Finally free the cb struct */ - kfree(cb->compressed_pages); - kfree(cb); -} - -static void btrfs_finish_compressed_write_work(struct work_struct *work) -{ - struct compressed_bio *cb = - container_of(work, struct compressed_bio, write_end_work); - - finish_compressed_bio_write(cb); + btrfs_free_compressed_pages(cb); + bio_put(&cb->bbio.bio); } /* @@ -257,13 +251,25 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work) */ static void end_compressed_bio_write(struct btrfs_bio *bbio) { - struct compressed_bio *cb = bbio->private; - struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); + struct compressed_bio *cb = to_compressed_bio(bbio); + struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; - cb->status = bbio->bio.bi_status; queue_work(fs_info->compressed_write_workers, &cb->write_end_work); +} - bio_put(&bbio->bio); +static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) +{ + struct bio *bio = &cb->bbio.bio; + u32 offset = 0; + + while (offset < cb->compressed_len) { + u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE); + + /* Maximum compressed extent is smaller than bio size limit. */ + __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT], + len, 0); + offset += len; + } } /* @@ -275,28 +281,24 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) * This also checksums the file bytes and gets things ready for * the end io hooks. */ -blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, +void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned int len, u64 disk_start, unsigned int compressed_len, struct page **compressed_pages, unsigned int nr_pages, blk_opf_t write_flags, - struct cgroup_subsys_state *blkcg_css, bool writeback) { struct btrfs_fs_info *fs_info = inode->root->fs_info; - struct bio *bio = NULL; struct compressed_bio *cb; - u64 cur_disk_bytenr = disk_start; - blk_status_t ret = BLK_STS_OK; ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); - cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS); - if (!cb) - return BLK_STS_RESOURCE; - cb->status = BLK_STS_OK; - cb->inode = &inode->vfs_inode; + + write_flags |= REQ_BTRFS_ONE_ORDERED; + + cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, + end_compressed_bio_write); cb->start = start; cb->len = len; cb->compressed_pages = compressed_pages; @@ -304,56 +306,10 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, cb->writeback = writeback; INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; + cb->bbio.bio.bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; + btrfs_add_compressed_bio_pages(cb); - if (blkcg_css) { - kthread_associate_blkcg(blkcg_css); - write_flags |= REQ_CGROUP_PUNT; - } - - write_flags |= REQ_BTRFS_ONE_ORDERED; - bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_WRITE | write_flags, - BTRFS_I(cb->inode), end_compressed_bio_write, cb); - bio->bi_iter.bi_sector = cur_disk_bytenr >> SECTOR_SHIFT; - btrfs_bio(bio)->file_offset = start; - - while (cur_disk_bytenr < disk_start + compressed_len) { - u64 offset = cur_disk_bytenr - disk_start; - unsigned int index = offset >> PAGE_SHIFT; - unsigned int real_size; - unsigned int added; - struct page *page = compressed_pages[index]; - - /* - * We have various limits on the real read size: - * - page boundary - * - compressed length boundary - */ - real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); - real_size = min_t(u64, real_size, compressed_len - offset); - ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); - - added = bio_add_page(bio, page, real_size, offset_in_page(offset)); - /* - * Maximum compressed extent is smaller than bio size limit, - * thus bio_add_page() should always success. - */ - ASSERT(added == real_size); - cur_disk_bytenr += added; - } - - /* Finished the range. */ - ASSERT(bio->bi_iter.bi_size); - btrfs_submit_bio(bio, 0); - if (blkcg_css) - kthread_associate_blkcg(NULL); - return ret; -} - -static u64 bio_end_offset(struct bio *bio) -{ - struct bio_vec *last = bio_last_bvec_all(bio); - - return page_offset(last->bv_page) + last->bv_len + last->bv_offset; + btrfs_submit_bio(&cb->bbio, 0); } /* @@ -374,7 +330,8 @@ static noinline int add_ra_bio_pages(struct inode *inode, { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long end_index; - u64 cur = bio_end_offset(cb->orig_bio); + struct bio *orig_bio = &cb->orig_bbio->bio; + u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); int ret; struct page *page; @@ -464,7 +421,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, */ if (!em || cur < em->start || (cur + fs_info->sectorsize > extent_map_end(em)) || - (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { + (em->block_start >> 9) != orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, cur, page_end, NULL); unlock_page(page); @@ -484,7 +441,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, } add_size = min(em->start + em->len, page_end + 1) - cur; - ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur)); + ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); if (ret != add_size) { unlock_extent(tree, cur, page_end, NULL); unlock_page(page); @@ -515,17 +472,14 @@ static noinline int add_ra_bio_pages(struct inode *inode, * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ -void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, - int mirror_num) +void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_map_tree *em_tree; + struct btrfs_inode *inode = bbio->inode; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; - struct bio *comp_bio; - const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; - u64 cur_disk_byte = disk_bytenr; - u64 file_offset; + u64 file_offset = bbio->file_offset; u64 em_len; u64 em_start; struct extent_map *em; @@ -533,12 +487,6 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int memstall = 0; blk_status_t ret; int ret2; - int i; - - em_tree = &BTRFS_I(inode)->extent_tree; - - file_offset = bio_first_bvec_all(bio)->bv_offset + - page_offset(bio_first_page_all(bio)); /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); @@ -551,102 +499,54 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ASSERT(em->compress_type != BTRFS_COMPRESS_NONE); compressed_len = em->block_len; - cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS); - if (!cb) { - ret = BLK_STS_RESOURCE; - goto out; - } - cb->status = BLK_STS_OK; - cb->inode = inode; + cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, + end_compressed_bio_read); cb->start = em->orig_start; em_len = em->len; em_start = em->start; - cb->len = bio->bi_iter.bi_size; + cb->len = bbio->bio.bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = em->compress_type; - cb->orig_bio = bio; + cb->orig_bbio = bbio; free_extent_map(em); - em = NULL; cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) { ret = BLK_STS_RESOURCE; - goto fail; + goto out_free_bio; } ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages); if (ret2) { ret = BLK_STS_RESOURCE; - goto fail; + goto out_free_compressed_pages; } - add_ra_bio_pages(inode, em_start + em_len, cb, &memstall, &pflags); + add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall, + &pflags); /* include any pages we added in add_ra-bio_pages */ - cb->len = bio->bi_iter.bi_size; - - comp_bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, BTRFS_I(cb->inode), - end_compressed_bio_read, cb); - comp_bio->bi_iter.bi_sector = (cur_disk_byte >> SECTOR_SHIFT); - - while (cur_disk_byte < disk_bytenr + compressed_len) { - u64 offset = cur_disk_byte - disk_bytenr; - unsigned int index = offset >> PAGE_SHIFT; - unsigned int real_size; - unsigned int added; - struct page *page = cb->compressed_pages[index]; - - /* - * We have various limit on the real read size: - * - page boundary - * - compressed length boundary - */ - real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); - real_size = min_t(u64, real_size, compressed_len - offset); - ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); - - added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset)); - /* - * Maximum compressed extent is smaller than bio size limit, - * thus bio_add_page() should always success. - */ - ASSERT(added == real_size); - cur_disk_byte += added; - } + cb->len = bbio->bio.bi_iter.bi_size; + cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector; + btrfs_add_compressed_bio_pages(cb); if (memstall) psi_memstall_leave(&pflags); - /* - * Stash the initial offset of this chunk, as there is no direct - * correlation between compressed pages and the original file offset. - * The field is only used for printing error messages anyway. - */ - btrfs_bio(comp_bio)->file_offset = file_offset; - - ASSERT(comp_bio->bi_iter.bi_size); - btrfs_submit_bio(comp_bio, mirror_num); + btrfs_submit_bio(&cb->bbio, mirror_num); return; -fail: - if (cb->compressed_pages) { - for (i = 0; i < cb->nr_pages; i++) { - if (cb->compressed_pages[i]) - __free_page(cb->compressed_pages[i]); - } - } - +out_free_compressed_pages: kfree(cb->compressed_pages); - kfree(cb); +out_free_bio: + bio_put(&cb->bbio.bio); out: - free_extent_map(em); - btrfs_bio_end_io(btrfs_bio(bio), ret); - return; + btrfs_bio_end_io(bbio, ret); } /* @@ -1038,6 +938,8 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) ret = compression_decompress_bio(workspace, cb); put_workspace(type, workspace); + if (!ret) + zero_fill_bio(&cb->orig_bbio->bio); return ret; } @@ -1062,6 +964,10 @@ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, int __init btrfs_init_compress(void) { + if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE, + offsetof(struct compressed_bio, bbio.bio), + BIOSET_NEED_BVECS)) + return -ENOMEM; btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); @@ -1075,6 +981,7 @@ void __cold btrfs_exit_compress(void) btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); zstd_cleanup_workspace_manager(); + bioset_exit(&btrfs_compressed_bioset); } /* @@ -1110,7 +1017,7 @@ void __cold btrfs_exit_compress(void) int btrfs_decompress_buf2page(const char *buf, u32 buf_len, struct compressed_bio *cb, u32 decompressed) { - struct bio *orig_bio = cb->orig_bio; + struct bio *orig_bio = &cb->orig_bbio->bio; /* Offset inside the full decompressed extent */ u32 cur_offset; |