diff options
Diffstat (limited to 'fs/btrfs/inode.c')
| -rw-r--r-- | fs/btrfs/inode.c | 32 | 
1 files changed, 23 insertions, 9 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 81737eff92f3..d50448bf8eed 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3195,6 +3195,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)  						ordered_extent->file_offset,  						ordered_extent->file_offset +  						logical_len); +		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, +						  ordered_extent->disk_num_bytes);  	} else {  		BUG_ON(root == fs_info->tree_root);  		ret = insert_ordered_extent_file_extent(trans, ordered_extent); @@ -3576,7 +3578,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)  	u64 last_objectid = 0;  	int ret = 0, nr_unlink = 0; -	/* Bail out if the cleanup is already running. */  	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))  		return 0; @@ -3659,17 +3660,17 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)  			 *  			 * btrfs_find_orphan_roots() ran before us, which has  			 * found all deleted roots and loaded them into -			 * fs_info->fs_roots. So here we can find if an +			 * fs_info->fs_roots_radix. So here we can find if an  			 * orphan item corresponds to a deleted root by looking -			 * up the root from that xarray. +			 * up the root from that radix tree.  			 */ -			spin_lock(&fs_info->fs_roots_lock); -			dead_root = xa_load(&fs_info->fs_roots, -					    (unsigned long)found_key.objectid); +			spin_lock(&fs_info->fs_roots_radix_lock); +			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, +							 (unsigned long)found_key.objectid);  			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)  				is_dead_root = 1; -			spin_unlock(&fs_info->fs_roots_lock); +			spin_unlock(&fs_info->fs_roots_radix_lock);  			if (is_dead_root) {  				/* prevent this orphan from being found again */ @@ -3909,7 +3910,7 @@ cache_index:  	 * cache.  	 *  	 * This is required for both inode re-read from disk and delayed inode -	 * in the delayed_nodes xarray. +	 * in delayed_nodes_tree.  	 */  	if (BTRFS_I(inode)->last_trans == fs_info->generation)  		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, @@ -7679,7 +7680,19 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,  	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||  	    em->block_start == EXTENT_MAP_INLINE) {  		free_extent_map(em); -		ret = -ENOTBLK; +		/* +		 * If we are in a NOWAIT context, return -EAGAIN in order to +		 * fallback to buffered IO. This is not only because we can +		 * block with buffered IO (no support for NOWAIT semantics at +		 * the moment) but also to avoid returning short reads to user +		 * space - this happens if we were able to read some data from +		 * previous non-compressed extents and then when we fallback to +		 * buffered IO, at btrfs_file_read_iter() by calling +		 * filemap_read(), we fail to fault in pages for the read buffer, +		 * in which case filemap_read() returns a short read (the number +		 * of bytes previously read is > 0, so it does not return -EFAULT). +		 */ +		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;  		goto unlock_err;  	} @@ -9897,6 +9910,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(  	extent_info.file_offset = file_offset;  	extent_info.extent_buf = (char *)&stack_fi;  	extent_info.is_new_extent = true; +	extent_info.update_times = true;  	extent_info.qgroup_reserved = qgroup_released;  	extent_info.insertions = 0;  |