diff options
-rw-r--r-- | fs/btrfs/extent_io.c | 105 | ||||
-rw-r--r-- | fs/btrfs/subpage.c | 6 |
2 files changed, 104 insertions, 7 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 643243a0a010..cc4c55c0733c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1226,13 +1226,23 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages, static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, struct page *page, struct writeback_control *wbc) { + struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode); + struct folio *folio = page_folio(page); + const bool is_subpage = btrfs_is_subpage(fs_info, page->mapping); const u64 page_start = page_offset(page); const u64 page_end = page_start + PAGE_SIZE - 1; + /* + * Save the last found delalloc end. As the delalloc end can go beyond + * page boundary, thus we cannot rely on subpage bitmap to locate the + * last delalloc end. + */ + u64 last_delalloc_end = 0; u64 delalloc_start = page_start; u64 delalloc_end = page_end; u64 delalloc_to_write = 0; int ret = 0; + /* Lock all (subpage) delalloc ranges inside the page first. */ while (delalloc_start < page_end) { delalloc_end = page_end; if (!find_lock_delalloc_range(&inode->vfs_inode, page, @@ -1240,15 +1250,95 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, delalloc_start = delalloc_end + 1; continue; } - - ret = btrfs_run_delalloc_range(inode, page, delalloc_start, - delalloc_end, wbc); - if (ret < 0) - return ret; - + btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start, + min(delalloc_end, page_end) + 1 - + delalloc_start); + last_delalloc_end = delalloc_end; delalloc_start = delalloc_end + 1; } + delalloc_start = page_start; + + if (!last_delalloc_end) + goto out; + + /* Run the delalloc ranges for the above locked ranges. */ + while (delalloc_start < page_end) { + u64 found_start; + u32 found_len; + bool found; + + if (!is_subpage) { + /* + * For non-subpage case, the found delalloc range must + * cover this page and there must be only one locked + * delalloc range. + */ + found_start = page_start; + found_len = last_delalloc_end + 1 - found_start; + found = true; + } else { + found = btrfs_subpage_find_writer_locked(fs_info, folio, + delalloc_start, &found_start, &found_len); + } + if (!found) + break; + /* + * The subpage range covers the last sector, the delalloc range may + * end beyond the page boundary, use the saved delalloc_end + * instead. + */ + if (found_start + found_len >= page_end) + found_len = last_delalloc_end + 1 - found_start; + + if (ret >= 0) { + /* No errors hit so far, run the current delalloc range. */ + ret = btrfs_run_delalloc_range(inode, page, found_start, + found_start + found_len - 1, + wbc); + } else { + /* + * We've hit an error during previous delalloc range, + * have to cleanup the remaining locked ranges. + */ + unlock_extent(&inode->io_tree, found_start, + found_start + found_len - 1, NULL); + __unlock_for_delalloc(&inode->vfs_inode, page, found_start, + found_start + found_len - 1); + } + + /* + * We can hit btrfs_run_delalloc_range() with >0 return value. + * + * This happens when either the IO is already done and page + * unlocked (inline) or the IO submission and page unlock would + * be handled as async (compression). + * + * Inline is only possible for regular sectorsize for now. + * + * Compression is possible for both subpage and regular cases, + * but even for subpage compression only happens for page aligned + * range, thus the found delalloc range must go beyond current + * page. + */ + if (ret > 0) + ASSERT(!is_subpage || found_start + found_len >= page_end); + + /* + * Above btrfs_run_delalloc_range() may have unlocked the page, + * thus for the last range, we cannot touch the page anymore. + */ + if (found_start + found_len >= last_delalloc_end + 1) + break; + delalloc_start = found_start + found_len; + } + if (ret < 0) + return ret; +out: + if (last_delalloc_end) + delalloc_end = last_delalloc_end; + else + delalloc_end = page_end; /* * delalloc_end is already one less than the total length, so * we don't subtract one from PAGE_SIZE @@ -1520,7 +1610,8 @@ done: PAGE_SIZE, !ret); mapping_set_error(page->mapping, ret); } - unlock_page(page); + + btrfs_folio_end_all_writers(inode_to_fs_info(inode), folio); ASSERT(ret <= 0); return ret; } diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 93ae7b7367d5..fc7db52e8f58 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -864,6 +864,7 @@ out: */ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio) { + struct btrfs_subpage *subpage = folio_get_private(folio); u64 folio_start = folio_pos(folio); u64 cur = folio_start; @@ -873,6 +874,11 @@ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct fol return; } + /* The page has no new delalloc range locked on it. Just plain unlock. */ + if (atomic_read(&subpage->writers) == 0) { + folio_unlock(folio); + return; + } while (cur < folio_start + PAGE_SIZE) { u64 found_start; u32 found_len; |