diff options
Diffstat (limited to 'fs/btrfs/file.c')
| -rw-r--r-- | fs/btrfs/file.c | 81 | 
1 files changed, 45 insertions, 36 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ea9f10bb089c..2234e88cf674 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,  		reserve_bytes = round_up(write_bytes + sector_offset,  				root->sectorsize); -		if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | -					      BTRFS_INODE_PREALLOC)) && -		    check_can_nocow(inode, pos, &write_bytes) > 0) { -			/* -			 * For nodata cow case, no need to reserve -			 * data space. -			 */ -			only_release_metadata = true; -			/* -			 * our prealloc extent may be smaller than -			 * write_bytes, so scale down. -			 */ -			num_pages = DIV_ROUND_UP(write_bytes + offset, -						 PAGE_SIZE); -			reserve_bytes = round_up(write_bytes + sector_offset, -					root->sectorsize); -			goto reserve_metadata; -		} -  		ret = btrfs_check_data_free_space(inode, pos, write_bytes); -		if (ret < 0) -			break; +		if (ret < 0) { +			if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | +						      BTRFS_INODE_PREALLOC)) && +			    check_can_nocow(inode, pos, &write_bytes) > 0) { +				/* +				 * For nodata cow case, no need to reserve +				 * data space. +				 */ +				only_release_metadata = true; +				/* +				 * our prealloc extent may be smaller than +				 * write_bytes, so scale down. +				 */ +				num_pages = DIV_ROUND_UP(write_bytes + offset, +							 PAGE_SIZE); +				reserve_bytes = round_up(write_bytes + +							 sector_offset, +							 root->sectorsize); +			} else { +				break; +			} +		} -reserve_metadata:  		ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);  		if (ret) {  			if (!only_release_metadata) @@ -1596,6 +1596,13 @@ again:  		copied = btrfs_copy_from_user(pos, write_bytes, pages, i); +		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, +						reserve_bytes); +		dirty_sectors = round_up(copied + sector_offset, +					root->sectorsize); +		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, +						dirty_sectors); +  		/*  		 * if we have trouble faulting in the pages, fall  		 * back to one page at a time @@ -1605,6 +1612,7 @@ again:  		if (copied == 0) {  			force_page_uptodate = true; +			dirty_sectors = 0;  			dirty_pages = 0;  		} else {  			force_page_uptodate = false; @@ -1615,20 +1623,19 @@ again:  		/*  		 * If we had a short copy we need to release the excess delaloc  		 * bytes we reserved.  We need to increment outstanding_extents -		 * because btrfs_delalloc_release_space will decrement it, but +		 * because btrfs_delalloc_release_space and +		 * btrfs_delalloc_release_metadata will decrement it, but  		 * we still have an outstanding extent for the chunk we actually  		 * managed to copy.  		 */ -		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, -						reserve_bytes); -		dirty_sectors = round_up(copied + sector_offset, -					root->sectorsize); -		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, -						dirty_sectors); -  		if (num_sectors > dirty_sectors) { -			release_bytes = (write_bytes - copied) -				& ~((u64)root->sectorsize - 1); +			/* +			 * we round down because we don't want to count +			 * any partial blocks actually sent through the +			 * IO machines +			 */ +			release_bytes = round_down(release_bytes - copied, +				      root->sectorsize);  			if (copied > 0) {  				spin_lock(&BTRFS_I(inode)->lock);  				BTRFS_I(inode)->outstanding_extents++; @@ -1696,7 +1703,9 @@ again:  			btrfs_end_write_no_snapshoting(root);  			btrfs_delalloc_release_metadata(inode, release_bytes);  		} else { -			btrfs_delalloc_release_space(inode, pos, release_bytes); +			btrfs_delalloc_release_space(inode, +						round_down(pos, root->sectorsize), +						release_bytes);  		}  	} @@ -2020,7 +2029,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	     BTRFS_I(inode)->last_trans  	     <= root->fs_info->last_trans_committed)) {  		/* -		 * We'v had everything committed since the last time we were +		 * We've had everything committed since the last time we were  		 * modified so clear this flag in case it was set for whatever  		 * reason, it's no longer relevant.  		 */ @@ -2368,7 +2377,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)  	/* Check the aligned pages after the first unaligned page,  	 * if offset != orig_start, which means the first unaligned page -	 * including serveral following pages are already in holes, +	 * including several following pages are already in holes,  	 * the extra check can be skipped */  	if (offset == orig_start) {  		/* after truncate page, check hole again */ @@ -2952,7 +2961,7 @@ const struct file_operations btrfs_file_operations = {  	.fallocate	= btrfs_fallocate,  	.unlocked_ioctl	= btrfs_ioctl,  #ifdef CONFIG_COMPAT -	.compat_ioctl	= btrfs_ioctl, +	.compat_ioctl	= btrfs_compat_ioctl,  #endif  	.copy_file_range = btrfs_copy_file_range,  	.clone_file_range = btrfs_clone_file_range,  |