diff options
Diffstat (limited to 'fs/btrfs/disk-io.c')
| -rw-r--r-- | fs/btrfs/disk-io.c | 390 | 
1 files changed, 295 insertions, 95 deletions
| diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 514ead6e93b6..87a5addbedf6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -665,9 +665,6 @@ static int validate_subpage_buffer(struct page *page, u64 start, u64 end,  	if (ret < 0)  		goto err; -	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) -		btree_readahead_hook(eb, ret); -  	set_extent_buffer_uptodate(eb);  	free_extent_buffer(eb); @@ -715,10 +712,6 @@ int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio,  	}  	ret = validate_extent_buffer(eb);  err: -	if (reads_done && -	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) -		btree_readahead_hook(eb, ret); -  	if (ret) {  		/*  		 * our io error hook is going to dec the io pages @@ -1140,11 +1133,16 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,  			 u64 objectid)  {  	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); + +	memset(&root->root_key, 0, sizeof(root->root_key)); +	memset(&root->root_item, 0, sizeof(root->root_item)); +	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));  	root->fs_info = fs_info; +	root->root_key.objectid = objectid;  	root->node = NULL;  	root->commit_root = NULL;  	root->state = 0; -	root->orphan_cleanup_state = 0; +	RB_CLEAR_NODE(&root->rb_node);  	root->last_trans = 0;  	root->free_objectid = 0; @@ -1152,7 +1150,8 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,  	root->nr_ordered_extents = 0;  	root->inode_tree = RB_ROOT;  	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); -	root->block_rsv = NULL; + +	btrfs_init_root_block_rsv(root);  	INIT_LIST_HEAD(&root->dirty_list);  	INIT_LIST_HEAD(&root->root_list); @@ -1190,6 +1189,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,  	root->log_transid = 0;  	root->log_transid_committed = -1;  	root->last_log_commit = 0; +	root->anon_dev = 0;  	if (!dummy) {  		extent_io_tree_init(fs_info, &root->dirty_log_pages,  				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); @@ -1197,12 +1197,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,  				    IO_TREE_LOG_CSUM_RANGE, NULL);  	} -	memset(&root->root_key, 0, sizeof(root->root_key)); -	memset(&root->root_item, 0, sizeof(root->root_item)); -	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); -	root->root_key.objectid = objectid; -	root->anon_dev = 0; -  	spin_lock_init(&root->root_item_lock);  	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);  #ifdef CONFIG_BTRFS_DEBUG @@ -1242,6 +1236,81 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)  }  #endif +static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node) +{ +	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node); +	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node); + +	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key); +} + +static int global_root_key_cmp(const void *k, const struct rb_node *node) +{ +	const struct btrfs_key *key = k; +	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node); + +	return btrfs_comp_cpu_keys(key, &root->root_key); +} + +int btrfs_global_root_insert(struct btrfs_root *root) +{ +	struct btrfs_fs_info *fs_info = root->fs_info; +	struct rb_node *tmp; + +	write_lock(&fs_info->global_root_lock); +	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp); +	write_unlock(&fs_info->global_root_lock); +	ASSERT(!tmp); + +	return tmp ? -EEXIST : 0; +} + +void btrfs_global_root_delete(struct btrfs_root *root) +{ +	struct btrfs_fs_info *fs_info = root->fs_info; + +	write_lock(&fs_info->global_root_lock); +	rb_erase(&root->rb_node, &fs_info->global_root_tree); +	write_unlock(&fs_info->global_root_lock); +} + +struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info, +				     struct btrfs_key *key) +{ +	struct rb_node *node; +	struct btrfs_root *root = NULL; + +	read_lock(&fs_info->global_root_lock); +	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp); +	if (node) +		root = container_of(node, struct btrfs_root, rb_node); +	read_unlock(&fs_info->global_root_lock); + +	return root; +} + +struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr) +{ +	struct btrfs_key key = { +		.objectid = BTRFS_CSUM_TREE_OBJECTID, +		.type = BTRFS_ROOT_ITEM_KEY, +		.offset = 0, +	}; + +	return btrfs_global_root(fs_info, &key); +} + +struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr) +{ +	struct btrfs_key key = { +		.objectid = BTRFS_EXTENT_TREE_OBJECTID, +		.type = BTRFS_ROOT_ITEM_KEY, +		.offset = 0, +	}; + +	return btrfs_global_root(fs_info, &key); +} +  struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,  				     u64 objectid)  { @@ -1554,25 +1623,33 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,  static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,  						u64 objectid)  { +	struct btrfs_key key = { +		.objectid = objectid, +		.type = BTRFS_ROOT_ITEM_KEY, +		.offset = 0, +	}; +  	if (objectid == BTRFS_ROOT_TREE_OBJECTID)  		return btrfs_grab_root(fs_info->tree_root);  	if (objectid == BTRFS_EXTENT_TREE_OBJECTID) -		return btrfs_grab_root(fs_info->extent_root); +		return btrfs_grab_root(btrfs_global_root(fs_info, &key));  	if (objectid == BTRFS_CHUNK_TREE_OBJECTID)  		return btrfs_grab_root(fs_info->chunk_root);  	if (objectid == BTRFS_DEV_TREE_OBJECTID)  		return btrfs_grab_root(fs_info->dev_root);  	if (objectid == BTRFS_CSUM_TREE_OBJECTID) -		return btrfs_grab_root(fs_info->csum_root); +		return btrfs_grab_root(btrfs_global_root(fs_info, &key));  	if (objectid == BTRFS_QUOTA_TREE_OBJECTID)  		return btrfs_grab_root(fs_info->quota_root) ?  			fs_info->quota_root : ERR_PTR(-ENOENT);  	if (objectid == BTRFS_UUID_TREE_OBJECTID)  		return btrfs_grab_root(fs_info->uuid_root) ?  			fs_info->uuid_root : ERR_PTR(-ENOENT); -	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) -		return btrfs_grab_root(fs_info->free_space_root) ? -			fs_info->free_space_root : ERR_PTR(-ENOENT); +	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) { +		struct btrfs_root *root = btrfs_global_root(fs_info, &key); + +		return btrfs_grab_root(root) ? root : ERR_PTR(-ENOENT); +	}  	return NULL;  } @@ -1619,6 +1696,18 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)  #endif  } +static void free_global_roots(struct btrfs_fs_info *fs_info) +{ +	struct btrfs_root *root; +	struct rb_node *node; + +	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) { +		root = rb_entry(node, struct btrfs_root, rb_node); +		rb_erase(&root->rb_node, &fs_info->global_root_tree); +		btrfs_put_root(root); +	} +} +  void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)  {  	percpu_counter_destroy(&fs_info->dirty_metadata_bytes); @@ -1630,14 +1719,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)  	btrfs_free_ref_cache(fs_info);  	kfree(fs_info->balance_ctl);  	kfree(fs_info->delayed_root); -	btrfs_put_root(fs_info->extent_root); +	free_global_roots(fs_info);  	btrfs_put_root(fs_info->tree_root);  	btrfs_put_root(fs_info->chunk_root);  	btrfs_put_root(fs_info->dev_root); -	btrfs_put_root(fs_info->csum_root);  	btrfs_put_root(fs_info->quota_root);  	btrfs_put_root(fs_info->uuid_root); -	btrfs_put_root(fs_info->free_space_root);  	btrfs_put_root(fs_info->fs_root);  	btrfs_put_root(fs_info->data_reloc_root);  	btrfs_check_leaked_roots(fs_info); @@ -1732,6 +1819,14 @@ again:  	}  	return root;  fail: +	/* +	 * If our caller provided us an anonymous device, then it's his +	 * responsability to free it in case we fail. So we have to set our +	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() +	 * and once again by our caller. +	 */ +	if (anon_dev) +		root->anon_dev = 0;  	btrfs_put_root(root);  	return ERR_PTR(ret);  } @@ -1927,7 +2022,8 @@ static int transaction_kthread(void *arg)  		}  		delta = ktime_get_seconds() - cur->start_time; -		if (cur->state < TRANS_STATE_COMMIT_START && +		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) && +		    cur->state < TRANS_STATE_COMMIT_START &&  		    delta < fs_info->commit_interval) {  			spin_unlock(&fs_info->trans_lock);  			delay -= msecs_to_jiffies((delta - 1) * 1000); @@ -1999,6 +2095,8 @@ static void backup_super_roots(struct btrfs_fs_info *info)  {  	const int next_backup = info->backup_root_index;  	struct btrfs_root_backup *root_backup; +	struct btrfs_root *extent_root = btrfs_extent_root(info, 0); +	struct btrfs_root *csum_root = btrfs_csum_root(info, 0);  	root_backup = info->super_for_commit->super_roots + next_backup; @@ -2023,11 +2121,11 @@ static void backup_super_roots(struct btrfs_fs_info *info)  	btrfs_set_backup_chunk_root_level(root_backup,  			       btrfs_header_level(info->chunk_root->node)); -	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); +	btrfs_set_backup_extent_root(root_backup, extent_root->node->start);  	btrfs_set_backup_extent_root_gen(root_backup, -			       btrfs_header_generation(info->extent_root->node)); +			       btrfs_header_generation(extent_root->node));  	btrfs_set_backup_extent_root_level(root_backup, -			       btrfs_header_level(info->extent_root->node)); +			       btrfs_header_level(extent_root->node));  	/*  	 * we might commit during log recovery, which happens before we set @@ -2048,11 +2146,11 @@ static void backup_super_roots(struct btrfs_fs_info *info)  	btrfs_set_backup_dev_root_level(root_backup,  				       btrfs_header_level(info->dev_root->node)); -	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); +	btrfs_set_backup_csum_root(root_backup, csum_root->node->start);  	btrfs_set_backup_csum_root_gen(root_backup, -			       btrfs_header_generation(info->csum_root->node)); +				       btrfs_header_generation(csum_root->node));  	btrfs_set_backup_csum_root_level(root_backup, -			       btrfs_header_level(info->csum_root->node)); +					 btrfs_header_level(csum_root->node));  	btrfs_set_backup_total_bytes(root_backup,  			     btrfs_super_total_bytes(info->super_copy)); @@ -2127,7 +2225,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)  	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);  	btrfs_destroy_workqueue(fs_info->delayed_workers);  	btrfs_destroy_workqueue(fs_info->caching_workers); -	btrfs_destroy_workqueue(fs_info->readahead_workers);  	btrfs_destroy_workqueue(fs_info->flush_workers);  	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);  	if (fs_info->discard_ctl.discard_workers) @@ -2151,21 +2248,29 @@ static void free_root_extent_buffers(struct btrfs_root *root)  	}  } +static void free_global_root_pointers(struct btrfs_fs_info *fs_info) +{ +	struct btrfs_root *root, *tmp; + +	rbtree_postorder_for_each_entry_safe(root, tmp, +					     &fs_info->global_root_tree, +					     rb_node) +		free_root_extent_buffers(root); +} +  /* helper to cleanup tree roots */  static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)  {  	free_root_extent_buffers(info->tree_root); +	free_global_root_pointers(info);  	free_root_extent_buffers(info->dev_root); -	free_root_extent_buffers(info->extent_root); -	free_root_extent_buffers(info->csum_root);  	free_root_extent_buffers(info->quota_root);  	free_root_extent_buffers(info->uuid_root);  	free_root_extent_buffers(info->fs_root);  	free_root_extent_buffers(info->data_reloc_root);  	if (free_chunk_root)  		free_root_extent_buffers(info->chunk_root); -	free_root_extent_buffers(info->free_space_root);  }  void btrfs_put_root(struct btrfs_root *root) @@ -2283,8 +2388,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)  	mutex_init(&fs_info->qgroup_rescan_lock);  } -static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, -		struct btrfs_fs_devices *fs_devices) +static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)  {  	u32 max_active = fs_info->thread_pool_size;  	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; @@ -2333,9 +2437,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,  	fs_info->delayed_workers =  		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,  				      max_active, 0); -	fs_info->readahead_workers = -		btrfs_alloc_workqueue(fs_info, "readahead", flags, -				      max_active, 2);  	fs_info->qgroup_rescan_workers =  		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);  	fs_info->discard_ctl.discard_workers = @@ -2347,9 +2448,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,  	      fs_info->endio_meta_write_workers &&  	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&  	      fs_info->endio_freespace_worker && fs_info->rmw_workers && -	      fs_info->caching_workers && fs_info->readahead_workers && -	      fs_info->fixup_workers && fs_info->delayed_workers && -	      fs_info->qgroup_rescan_workers && +	      fs_info->caching_workers && fs_info->fixup_workers && +	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&  	      fs_info->discard_ctl.discard_workers)) {  		return -ENOMEM;  	} @@ -2427,6 +2527,104 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,  	return 0;  } +static int load_global_roots_objectid(struct btrfs_root *tree_root, +				      struct btrfs_path *path, u64 objectid, +				      const char *name) +{ +	struct btrfs_fs_info *fs_info = tree_root->fs_info; +	struct btrfs_root *root; +	int ret; +	struct btrfs_key key = { +		.objectid = objectid, +		.type = BTRFS_ROOT_ITEM_KEY, +		.offset = 0, +	}; +	bool found = false; + +	/* If we have IGNOREDATACSUMS skip loading these roots. */ +	if (objectid == BTRFS_CSUM_TREE_OBJECTID && +	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { +		set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state); +		return 0; +	} + +	while (1) { +		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); +		if (ret < 0) +			break; + +		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { +			ret = btrfs_next_leaf(tree_root, path); +			if (ret) { +				if (ret > 0) +					ret = 0; +				break; +			} +		} +		ret = 0; + +		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); +		if (key.objectid != objectid) +			break; +		btrfs_release_path(path); + +		found = true; +		root = read_tree_root_path(tree_root, path, &key); +		if (IS_ERR(root)) { +			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) +				ret = PTR_ERR(root); +			break; +		} +		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); +		ret = btrfs_global_root_insert(root); +		if (ret) { +			btrfs_put_root(root); +			break; +		} +		key.offset++; +	} +	btrfs_release_path(path); + +	if (!found || ret) { +		if (objectid == BTRFS_CSUM_TREE_OBJECTID) +			set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state); + +		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) +			ret = ret ? ret : -ENOENT; +		else +			ret = 0; +		btrfs_err(fs_info, "failed to load root %s", name); +	} +	return ret; +} + +static int load_global_roots(struct btrfs_root *tree_root) +{ +	struct btrfs_path *path; +	int ret = 0; + +	path = btrfs_alloc_path(); +	if (!path) +		return -ENOMEM; + +	ret = load_global_roots_objectid(tree_root, path, +					 BTRFS_EXTENT_TREE_OBJECTID, "extent"); +	if (ret) +		goto out; +	ret = load_global_roots_objectid(tree_root, path, +					 BTRFS_CSUM_TREE_OBJECTID, "csum"); +	if (ret) +		goto out; +	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE)) +		goto out; +	ret = load_global_roots_objectid(tree_root, path, +					 BTRFS_FREE_SPACE_TREE_OBJECTID, +					 "free space"); +out: +	btrfs_free_path(path); +	return ret; +} +  static int btrfs_read_roots(struct btrfs_fs_info *fs_info)  {  	struct btrfs_root *tree_root = fs_info->tree_root; @@ -2436,7 +2634,11 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)  	BUG_ON(!fs_info->tree_root); -	location.objectid = BTRFS_EXTENT_TREE_OBJECTID; +	ret = load_global_roots(tree_root); +	if (ret) +		return ret; + +	location.objectid = BTRFS_DEV_TREE_OBJECTID;  	location.type = BTRFS_ROOT_ITEM_KEY;  	location.offset = 0; @@ -2448,38 +2650,11 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)  		}  	} else {  		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); -		fs_info->extent_root = root; -	} - -	location.objectid = BTRFS_DEV_TREE_OBJECTID; -	root = btrfs_read_tree_root(tree_root, &location); -	if (IS_ERR(root)) { -		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { -			ret = PTR_ERR(root); -			goto out; -		} -	} else { -		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);  		fs_info->dev_root = root;  	}  	/* Initialize fs_info for all devices in any case */  	btrfs_init_devices_late(fs_info); -	/* If IGNOREDATACSUMS is set don't bother reading the csum root. */ -	if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { -		location.objectid = BTRFS_CSUM_TREE_OBJECTID; -		root = btrfs_read_tree_root(tree_root, &location); -		if (IS_ERR(root)) { -			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { -				ret = PTR_ERR(root); -				goto out; -			} -		} else { -			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); -			fs_info->csum_root = root; -		} -	} -  	/*  	 * This tree can share blocks with some other fs tree during relocation  	 * and we need a proper setup by btrfs_get_fs_root @@ -2517,20 +2692,6 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)  		fs_info->uuid_root = root;  	} -	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { -		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; -		root = btrfs_read_tree_root(tree_root, &location); -		if (IS_ERR(root)) { -			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { -				ret = PTR_ERR(root); -				goto out; -			} -		}  else { -			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); -			fs_info->free_space_root = root; -		} -	} -  	return 0;  out:  	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", @@ -2850,6 +3011,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)  		/* All successful */  		fs_info->generation = generation;  		fs_info->last_trans_committed = generation; +		fs_info->last_reloc_trans = 0;  		/* Always begin writing backup roots after the one being used */  		if (backup_index < 0) { @@ -2885,6 +3047,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)  	spin_lock_init(&fs_info->zone_active_bgs_lock);  	spin_lock_init(&fs_info->relocation_bg_lock);  	rwlock_init(&fs_info->tree_mod_log_lock); +	rwlock_init(&fs_info->global_root_lock);  	mutex_init(&fs_info->unused_bg_unpin_mutex);  	mutex_init(&fs_info->reclaim_bgs_lock);  	mutex_init(&fs_info->reloc_mutex); @@ -2916,9 +3079,9 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)  	atomic_set(&fs_info->async_delalloc_pages, 0);  	atomic_set(&fs_info->defrag_running, 0); -	atomic_set(&fs_info->reada_works_cnt, 0);  	atomic_set(&fs_info->nr_delayed_iputs, 0);  	atomic64_set(&fs_info->tree_mod_seq, 0); +	fs_info->global_root_tree = RB_ROOT;  	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;  	fs_info->metadata_ratio = 0;  	fs_info->defrag_inodes = RB_ROOT; @@ -2926,9 +3089,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)  	fs_info->tree_mod_log = RB_ROOT;  	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;  	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ -	/* readahead state */ -	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); -	spin_lock_init(&fs_info->reada_lock);  	btrfs_init_ref_verify(fs_info);  	fs_info->thread_pool_size = min_t(unsigned long, @@ -2950,7 +3110,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)  	extent_io_tree_init(fs_info, &fs_info->excluded_extents,  			    IO_TREE_FS_EXCLUDED_EXTENTS, NULL); -	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);  	mutex_init(&fs_info->ordered_operations_mutex);  	mutex_init(&fs_info->tree_log_mutex); @@ -2985,9 +3144,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)  	spin_lock_init(&fs_info->swapfile_pins_lock);  	fs_info->swapfile_pins = RB_ROOT; -	spin_lock_init(&fs_info->send_reloc_lock); -	fs_info->send_in_progress = 0; -  	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;  	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);  } @@ -3415,7 +3571,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device  		fs_info->subpage_info = subpage_info;  	} -	ret = btrfs_init_workqueues(fs_info, fs_devices); +	ret = btrfs_init_workqueues(fs_info);  	if (ret) {  		err = ret;  		goto fail_sb_buffer; @@ -3563,6 +3719,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device  		goto fail_sysfs;  	} +	btrfs_free_zone_cache(fs_info); +  	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&  	    !btrfs_check_rw_degradable(fs_info, NULL)) {  		btrfs_warn(fs_info, @@ -4325,6 +4483,48 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)  	return btrfs_commit_transaction(trans);  } +static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info) +{ +	struct btrfs_transaction *trans; +	struct btrfs_transaction *tmp; +	bool found = false; + +	if (list_empty(&fs_info->trans_list)) +		return; + +	/* +	 * This function is only called at the very end of close_ctree(), +	 * thus no other running transaction, no need to take trans_lock. +	 */ +	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)); +	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) { +		struct extent_state *cached = NULL; +		u64 dirty_bytes = 0; +		u64 cur = 0; +		u64 found_start; +		u64 found_end; + +		found = true; +		while (!find_first_extent_bit(&trans->dirty_pages, cur, +			&found_start, &found_end, EXTENT_DIRTY, &cached)) { +			dirty_bytes += found_end + 1 - found_start; +			cur = found_end + 1; +		} +		btrfs_warn(fs_info, +	"transaction %llu (with %llu dirty metadata bytes) is not committed", +			   trans->transid, dirty_bytes); +		btrfs_cleanup_one_transaction(trans, fs_info); + +		if (trans == fs_info->running_transaction) +			fs_info->running_transaction = NULL; +		list_del_init(&trans->list); + +		btrfs_put_transaction(trans); +		trace_btrfs_transaction_commit(fs_info); +	} +	ASSERT(!found); +} +  void __cold close_ctree(struct btrfs_fs_info *fs_info)  {  	int ret; @@ -4433,7 +4633,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)  	btrfs_stop_all_workers(fs_info);  	/* We shouldn't have any transaction open at this point */ -	ASSERT(list_empty(&fs_info->trans_list)); +	warn_about_uncommitted_trans(fs_info);  	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);  	free_root_pointers(fs_info, true); @@ -4981,7 +5181,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)  		spin_unlock(&fs_info->trans_lock);  		btrfs_put_transaction(t); -		trace_btrfs_transaction_commit(fs_info->tree_root); +		trace_btrfs_transaction_commit(fs_info);  		spin_lock(&fs_info->trans_lock);  	}  	spin_unlock(&fs_info->trans_lock); |