diff options
Diffstat (limited to 'fs/btrfs/transaction.c')
| -rw-r--r-- | fs/btrfs/transaction.c | 286 |
1 files changed, 147 insertions, 139 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 52ada47aff50..acff6bb49a97 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -16,12 +16,12 @@ #include "transaction.h" #include "locking.h" #include "tree-log.h" -#include "inode-map.h" #include "volumes.h" #include "dev-replace.h" #include "qgroup.h" #include "block-group.h" #include "space-info.h" +#include "zoned.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -108,6 +108,11 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { __TRANS_JOIN | __TRANS_JOIN_NOLOCK | __TRANS_JOIN_NOSTART), + [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START | + __TRANS_ATTACH | + __TRANS_JOIN | + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), [TRANS_STATE_COMPLETED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | @@ -155,6 +160,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root, *tmp; + struct btrfs_caching_control *caching_ctl, *next; down_write(&fs_info->commit_root_sem); list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, @@ -162,8 +168,6 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) list_del_init(&root->dirty_list); free_extent_buffer(root->commit_root); root->commit_root = btrfs_root_node(root); - if (is_fstree(root->root_key.objectid)) - btrfs_unpin_free_ino(root); extent_io_tree_release(&root->dirty_log_pages); btrfs_qgroup_clean_swapped_blocks(root); } @@ -180,6 +184,47 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) spin_lock(&cur_trans->dropped_roots_lock); } spin_unlock(&cur_trans->dropped_roots_lock); + + /* + * We have to update the last_byte_to_unpin under the commit_root_sem, + * at the same time we swap out the commit roots. + * + * This is because we must have a real view of the last spot the caching + * kthreads were while caching. Consider the following views of the + * extent tree for a block group + * + * commit root + * +----+----+----+----+----+----+----+ + * |\\\\| |\\\\|\\\\| |\\\\|\\\\| + * +----+----+----+----+----+----+----+ + * 0 1 2 3 4 5 6 7 + * + * new commit root + * +----+----+----+----+----+----+----+ + * | | | |\\\\| | |\\\\| + * +----+----+----+----+----+----+----+ + * 0 1 2 3 4 5 6 7 + * + * If the cache_ctl->progress was at 3, then we are only allowed to + * unpin [0,1) and [2,3], because the caching thread has already + * processed those extents. We are not allowed to unpin [5,6), because + * the caching thread will re-start it's search from 3, and thus find + * the hole from [4,6) to add to the free space cache. + */ + spin_lock(&fs_info->block_group_cache_lock); + list_for_each_entry_safe(caching_ctl, next, + &fs_info->caching_block_groups, list) { + struct btrfs_block_group *cache = caching_ctl->block_group; + + if (btrfs_block_group_done(cache)) { + cache->last_byte_to_unpin = (u64)-1; + list_del_init(&caching_ctl->list); + btrfs_put_caching_control(caching_ctl); + } else { + cache->last_byte_to_unpin = caching_ctl->progress; + } + } + spin_unlock(&fs_info->block_group_cache_lock); up_write(&fs_info->commit_root_sem); } @@ -336,6 +381,8 @@ loop: spin_lock_init(&cur_trans->dirty_bgs_lock); INIT_LIST_HEAD(&cur_trans->deleted_bgs); spin_lock_init(&cur_trans->dropped_roots_lock); + INIT_LIST_HEAD(&cur_trans->releasing_ebs); + spin_lock_init(&cur_trans->releasing_ebs_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); @@ -787,10 +834,11 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root) return trans; } -/* wait for a transaction commit to be fully complete */ -static noinline void wait_for_commit(struct btrfs_transaction *commit) +/* Wait for a transaction commit to reach at least the given state. */ +static noinline void wait_for_commit(struct btrfs_transaction *commit, + const enum btrfs_trans_state min_state) { - wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); + wait_event(commit->commit_wait, commit->state >= min_state); } int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) @@ -845,7 +893,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) goto out; /* nothing committing|committed */ } - wait_for_commit(cur_trans); + wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); btrfs_put_transaction(cur_trans); out: return ret; @@ -856,24 +904,23 @@ void btrfs_throttle(struct btrfs_fs_info *fs_info) wait_current_trans(fs_info); } -static int should_end_transaction(struct btrfs_trans_handle *trans) +static bool should_end_transaction(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; if (btrfs_check_space_for_delayed_refs(fs_info)) - return 1; + return true; return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5); } -int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) +bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) { struct btrfs_transaction *cur_trans = trans->transaction; - smp_mb(); if (cur_trans->state >= TRANS_STATE_COMMIT_START || - cur_trans->delayed_refs.flushing) - return 1; + test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) + return true; return should_end_transaction(trans); } @@ -1191,10 +1238,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) if (ret) return ret; - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) - return ret; - ret = btrfs_run_dev_stats(trans); if (ret) return ret; @@ -1209,10 +1252,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) if (ret) return ret; - /* run_qgroups might have added some more refs */ - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) - return ret; again: while (!list_empty(&fs_info->dirty_cowonly_roots)) { struct btrfs_root *root; @@ -1227,15 +1266,24 @@ again: ret = update_cowonly_root(trans, root); if (ret) return ret; - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) - return ret; } + /* Now flush any delayed refs generated by updating all of the roots */ + ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); + if (ret) + return ret; + while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { ret = btrfs_write_dirty_block_groups(trans); if (ret) return ret; + + /* + * We're writing the dirty block groups, which could generate + * delayed refs, which could generate more dirty block groups, + * so we want to keep this flushing in this loop to make sure + * everything gets run. + */ ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); if (ret) return ret; @@ -1280,7 +1328,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) struct btrfs_root *gang[8]; int i; int ret; - int err = 0; spin_lock(&fs_info->fs_roots_radix_lock); while (1) { @@ -1292,6 +1339,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) break; for (i = 0; i < ret; i++) { struct btrfs_root *root = gang[i]; + int ret2; + radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); @@ -1300,8 +1349,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) btrfs_free_log(trans, root); btrfs_update_reloc_root(trans, root); - btrfs_save_ino_cache(root, trans); - /* see comments in should_cow_block() */ clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); smp_mb__after_atomic(); @@ -1313,17 +1360,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) root->node); } - err = btrfs_update_root(trans, fs_info->tree_root, + ret2 = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); + if (ret2) + return ret2; spin_lock(&fs_info->fs_roots_radix_lock); - if (err) - break; btrfs_qgroup_free_meta_all_pertrans(root); } } spin_unlock(&fs_info->fs_roots_radix_lock); - return err; + return 0; } /* @@ -1396,6 +1443,23 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, record_root_in_trans(trans, src, 1); /* + * btrfs_qgroup_inherit relies on a consistent view of the usage for the + * src root, so we must run the delayed refs here. + * + * However this isn't particularly fool proof, because there's no + * synchronization keeping us from changing the tree after this point + * before we do the qgroup_inherit, or even from making changes while + * we're doing the qgroup_inherit. But that's a problem for the future, + * for now flush the delayed refs to narrow the race window where the + * qgroup counters could end up wrong. + */ + ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + + /* * We are going to commit transaction, see btrfs_commit_transaction() * comment for reason locking tree_log_mutex */ @@ -1488,7 +1552,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ASSERT(pending->root_item); new_root_item = pending->root_item; - pending->error = btrfs_find_free_objectid(tree_root, &objectid); + pending->error = btrfs_get_free_objectid(tree_root, &objectid); if (pending->error) goto no_free_objectid; @@ -1598,8 +1662,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - btrfs_set_lock_blocking_write(old); - ret = btrfs_copy_root(trans, root, old, &tmp, objectid); /* clean up in any case */ btrfs_tree_unlock(old); @@ -1650,12 +1712,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto fail; - } - /* * Do special qgroup accounting for snapshot, as we do some qgroup * snapshot hack to do fast snapshot. @@ -1681,7 +1737,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, dentry->d_name.len * 2); parent_inode->i_mtime = parent_inode->i_ctime = current_time(parent_inode); - ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); + ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode)); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; @@ -1703,12 +1759,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, } } - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto fail; - } - fail: pending->error = ret; dir_item_existed: @@ -1761,6 +1811,8 @@ static void update_super_roots(struct btrfs_fs_info *fs_info) super->root_level = root_item->level; if (btrfs_test_opt(fs_info, SPACE_CACHE)) super->cache_generation = root_item->generation; + else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags)) + super->cache_generation = 0; if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) super->uuid_tree_generation = root_item->generation; } @@ -1956,10 +2008,8 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) } } -static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) +static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) { - struct btrfs_fs_info *fs_info = trans->fs_info; - /* * We use writeback_inodes_sb here because if we used * btrfs_start_delalloc_roots we would deadlock with fs freeze. @@ -1969,50 +2019,15 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) * from already being in a transaction and our join_transaction doesn't * have to re-take the fs freeze lock. */ - if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { + if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); - } else { - struct btrfs_pending_snapshot *pending; - struct list_head *head = &trans->transaction->pending_snapshots; - - /* - * Flush dellaloc for any root that is going to be snapshotted. - * This is done to avoid a corrupted version of files, in the - * snapshots, that had both buffered and direct IO writes (even - * if they were done sequentially) due to an unordered update of - * the inode's size on disk. - */ - list_for_each_entry(pending, head, list) { - int ret; - - ret = btrfs_start_delalloc_snapshot(pending->root); - if (ret) - return ret; - } - } return 0; } -static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans) +static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) { - struct btrfs_fs_info *fs_info = trans->fs_info; - - if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { + if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); - } else { - struct btrfs_pending_snapshot *pending; - struct list_head *head = &trans->transaction->pending_snapshots; - - /* - * Wait for any dellaloc that we started previously for the roots - * that are going to be snapshotted. This is to avoid a corrupted - * version of files in the snapshots that had both buffered and - * direct IO writes (even if they were done sequentially). - */ - list_for_each_entry(pending, head, list) - btrfs_wait_ordered_extents(pending->root, - U64_MAX, 0, U64_MAX); - } } int btrfs_commit_transaction(struct btrfs_trans_handle *trans) @@ -2042,32 +2057,25 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) btrfs_trans_release_metadata(trans); trans->block_rsv = NULL; - /* make a pass through all the delayed refs we have so far - * any runnings procs may add more while we are here - */ - ret = btrfs_run_delayed_refs(trans, 0); - if (ret) { - btrfs_end_transaction(trans); - return ret; - } - - cur_trans = trans->transaction; - /* - * set the flushing flag so procs in this transaction have to - * start sending their work down. + * We only want one transaction commit doing the flushing so we do not + * waste a bunch of time on lock contention on the extent root node. */ - cur_trans->delayed_refs.flushing = 1; - smp_wmb(); + if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING, + &cur_trans->delayed_refs.flags)) { + /* + * Make a pass through all the delayed refs we have so far. + * Any running threads may add more while we are here. + */ + ret = btrfs_run_delayed_refs(trans, 0); + if (ret) { + btrfs_end_transaction(trans); + return ret; + } + } btrfs_create_pending_block_groups(trans); - ret = btrfs_run_delayed_refs(trans, 0); - if (ret) { - btrfs_end_transaction(trans); - return ret; - } - if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { int run_it = 0; @@ -2101,11 +2109,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) spin_lock(&fs_info->trans_lock); if (cur_trans->state >= TRANS_STATE_COMMIT_START) { + enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; + spin_unlock(&fs_info->trans_lock); refcount_inc(&cur_trans->use_count); - ret = btrfs_end_transaction(trans); - wait_for_commit(cur_trans); + if (trans->in_fsync) + want_state = TRANS_STATE_SUPER_COMMITTED; + ret = btrfs_end_transaction(trans); + wait_for_commit(cur_trans, want_state); if (TRANS_ABORTED(cur_trans)) ret = cur_trans->aborted; @@ -2119,13 +2131,19 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wake_up(&fs_info->transaction_blocked_wait); if (cur_trans->list.prev != &fs_info->trans_list) { + enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; + + if (trans->in_fsync) + want_state = TRANS_STATE_SUPER_COMMITTED; + prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); - if (prev_trans->state != TRANS_STATE_COMPLETED) { + if (prev_trans->state < want_state) { refcount_inc(&prev_trans->use_count); spin_unlock(&fs_info->trans_lock); - wait_for_commit(prev_trans); + wait_for_commit(prev_trans, want_state); + ret = READ_ONCE(prev_trans->aborted); btrfs_put_transaction(prev_trans); @@ -2150,7 +2168,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) extwriter_counter_dec(cur_trans, trans->type); - ret = btrfs_start_delalloc_flush(trans); + ret = btrfs_start_delalloc_flush(fs_info); if (ret) goto cleanup_transaction; @@ -2166,7 +2184,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) if (ret) goto cleanup_transaction; - btrfs_wait_delalloc_flush(trans); + btrfs_wait_delalloc_flush(fs_info); /* * Wait for all ordered extents started by a fast fsync that joined this @@ -2265,14 +2283,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) btrfs_free_log_root_tree(trans, fs_info); /* - * commit_fs_roots() can call btrfs_save_ino_cache(), which generates - * new delayed refs. Must handle them or qgroup can be wrong. - */ - ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); - if (ret) - goto unlock_tree_log; - - /* * Since fs roots are all committed, we can get a quite accurate * new_roots. So let's do quota accounting. */ @@ -2293,8 +2303,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) goto unlock_tree_log; } - btrfs_prepare_extent_commit(fs_info); - cur_trans = fs_info->running_transaction; btrfs_set_root_node(&fs_info->tree_root->root_item, @@ -2345,6 +2353,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) goto scrub_continue; } + /* + * At this point, we should have written all the tree blocks allocated + * in this transaction. So it's now safe to free the redirtyied extent + * buffers. + */ + btrfs_free_redirty_list(cur_trans); + ret = write_all_supers(fs_info, 0); /* * the super is written, we can safely allow the tree-loggers @@ -2354,6 +2369,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) if (ret) goto scrub_continue; + /* + * We needn't acquire the lock here because there is no other task + * which can change it. + */ + cur_trans->state = TRANS_STATE_SUPER_COMMITTED; + wake_up(&cur_trans->commit_wait); + btrfs_finish_extent_commit(trans); if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) @@ -2435,10 +2457,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); btrfs_kill_all_delayed_nodes(root); - if (root->ino_cache_inode) { - iput(root->ino_cache_inode); - root->ino_cache_inode = NULL; - } if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) @@ -2459,16 +2477,6 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) if (!prev) return; - bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; - if (prev & bit) - btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); - prev &= ~bit; - - bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; - if (prev & bit) - btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); - prev &= ~bit; - bit = 1 << BTRFS_PENDING_COMMIT; if (prev & bit) btrfs_debug(fs_info, "pending commit done"); |