diff options
Diffstat (limited to 'fs/btrfs/file.c')
| -rw-r--r-- | fs/btrfs/file.c | 130 | 
1 files changed, 70 insertions, 60 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f80254d82f40..eb1bac7c8553 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)  	}  } +static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, +					 const u64 start, +					 const u64 len, +					 struct extent_state **cached_state) +{ +	u64 search_start = start; +	const u64 end = start + len - 1; + +	while (search_start < end) { +		const u64 search_len = end - search_start + 1; +		struct extent_map *em; +		u64 em_len; +		int ret = 0; + +		em = btrfs_get_extent(inode, NULL, 0, search_start, +				      search_len, 0); +		if (IS_ERR(em)) +			return PTR_ERR(em); + +		if (em->block_start != EXTENT_MAP_HOLE) +			goto next; + +		em_len = em->len; +		if (em->start < search_start) +			em_len -= search_start - em->start; +		if (em_len > search_len) +			em_len = search_len; + +		ret = set_extent_bit(&inode->io_tree, search_start, +				     search_start + em_len - 1, +				     EXTENT_DELALLOC_NEW, +				     NULL, cached_state, GFP_NOFS); +next: +		search_start = extent_map_end(em); +		free_extent_map(em); +		if (ret) +			return ret; +	} +	return 0; +} +  /*   * after copy_from_user, pages need to be dirtied and we need to make   * sure holes are created between the current EOF and the start of @@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,  	u64 end_of_last_block;  	u64 end_pos = pos + write_bytes;  	loff_t isize = i_size_read(inode); +	unsigned int extra_bits = 0;  	start_pos = pos & ~((u64) fs_info->sectorsize - 1);  	num_bytes = round_up(write_bytes + pos - start_pos,  			     fs_info->sectorsize);  	end_of_last_block = start_pos + num_bytes - 1; + +	if (!btrfs_is_free_space_inode(BTRFS_I(inode))) { +		if (start_pos >= isize && +		    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) { +			/* +			 * There can't be any extents following eof in this case +			 * so just set the delalloc new bit for the range +			 * directly. +			 */ +			extra_bits |= EXTENT_DELALLOC_NEW; +		} else { +			err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode), +							    start_pos, +							    num_bytes, cached); +			if (err) +				return err; +		} +	} +  	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, -					cached, 0); +					extra_bits, cached, 0);  	if (err)  		return err; @@ -1404,47 +1465,6 @@ fail:  } -static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, -					 const u64 start, -					 const u64 len, -					 struct extent_state **cached_state) -{ -	u64 search_start = start; -	const u64 end = start + len - 1; - -	while (search_start < end) { -		const u64 search_len = end - search_start + 1; -		struct extent_map *em; -		u64 em_len; -		int ret = 0; - -		em = btrfs_get_extent(inode, NULL, 0, search_start, -				      search_len, 0); -		if (IS_ERR(em)) -			return PTR_ERR(em); - -		if (em->block_start != EXTENT_MAP_HOLE) -			goto next; - -		em_len = em->len; -		if (em->start < search_start) -			em_len -= search_start - em->start; -		if (em_len > search_len) -			em_len = search_len; - -		ret = set_extent_bit(&inode->io_tree, search_start, -				     search_start + em_len - 1, -				     EXTENT_DELALLOC_NEW, -				     NULL, cached_state, GFP_NOFS); -next: -		search_start = extent_map_end(em); -		free_extent_map(em); -		if (ret) -			return ret; -	} -	return 0; -} -  /*   * This function locks the extent and properly waits for data=ordered extents   * to finish before allowing the pages to be modified if need. @@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,  		+ round_up(pos + write_bytes - start_pos,  			   fs_info->sectorsize) - 1; -	if (start_pos < inode->vfs_inode.i_size || -	    (inode->flags & BTRFS_INODE_PREALLOC)) { +	if (start_pos < inode->vfs_inode.i_size) {  		struct btrfs_ordered_extent *ordered; -		unsigned int clear_bits;  		lock_extent_bits(&inode->io_tree, start_pos, last_pos,  				cached_state); @@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,  		}  		if (ordered)  			btrfs_put_ordered_extent(ordered); -		ret = btrfs_find_new_delalloc_bytes(inode, start_pos, -						    last_pos - start_pos + 1, -						    cached_state); -		clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC | -			EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG; -		if (ret) -			clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED; -		clear_extent_bit(&inode->io_tree, start_pos, -				 last_pos, clear_bits, -				 (clear_bits & EXTENT_LOCKED) ? 1 : 0, -				 0, cached_state, GFP_NOFS); -		if (ret) -			return ret; +		clear_extent_bit(&inode->io_tree, start_pos, last_pos, +				 EXTENT_DIRTY | EXTENT_DELALLOC | +				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, +				 0, 0, cached_state, GFP_NOFS);  		*lockstart = start_pos;  		*lockend = last_pos;  		ret = 1; @@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	len = (u64)end - (u64)start + 1;  	trace_btrfs_sync_file(file, datasync); +	btrfs_init_log_ctx(&ctx, inode); +  	/*  	 * We write the dirty pages in the range and wait until they complete  	 * out of the ->i_mutex. If so, we can flush the dirty pages by @@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	}  	trans->sync = true; -	btrfs_init_log_ctx(&ctx, inode); -  	ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);  	if (ret < 0) {  		/* Fallthrough and commit/free transaction. */ @@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  		ret = btrfs_end_transaction(trans);  	}  out: +	ASSERT(list_empty(&ctx.list));  	err = file_check_and_advance_wb_err(file);  	if (!ret)  		ret = err;  |