aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c160
1 files changed, 83 insertions, 77 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb156ce..9404121fd5f7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -132,7 +132,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
static inline int __need_auto_defrag(struct btrfs_root *root)
{
- if (!btrfs_test_opt(root, AUTO_DEFRAG))
+ if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(root->fs_info))
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
size_t copied = 0;
size_t total_copied = 0;
int pg = 0;
- int offset = pos & (PAGE_CACHE_SIZE - 1);
+ int offset = pos & (PAGE_SIZE - 1);
while (write_bytes > 0) {
size_t count = min_t(size_t,
- PAGE_CACHE_SIZE - offset, write_bytes);
+ PAGE_SIZE - offset, write_bytes);
struct page *page = prepared_pages[pg];
/*
* Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
if (unlikely(copied == 0))
break;
- if (copied < PAGE_CACHE_SIZE - offset) {
+ if (copied < PAGE_SIZE - offset) {
offset += copied;
} else {
pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
*/
ClearPageChecked(pages[i]);
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
}
@@ -950,7 +950,7 @@ delete_extent_item:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
@@ -974,7 +974,7 @@ delete_extent_item:
path->slots[0] = del_slot;
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
leaf = path->nodes[0];
@@ -1190,7 +1190,7 @@ again:
goto again;
}
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1278,7 +1278,7 @@ again:
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
{
int ret = 0;
- if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+ if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
size_t write_bytes, bool force_uptodate)
{
int i;
- unsigned long index = pos >> PAGE_CACHE_SHIFT;
+ unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int err = 0;
int faili;
@@ -1345,7 +1345,7 @@ again:
err = prepare_uptodate_page(inode, pages[i],
pos + write_bytes, false);
if (err) {
- page_cache_release(pages[i]);
+ put_page(pages[i]);
if (err == -EAGAIN) {
err = 0;
goto again;
@@ -1360,7 +1360,7 @@ again:
fail:
while (faili >= 0) {
unlock_page(pages[faili]);
- page_cache_release(pages[faili]);
+ put_page(pages[faili]);
faili--;
}
return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
cached_state, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
bool force_page_uptodate = false;
bool need_unlock;
- nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
- PAGE_CACHE_SIZE / (sizeof(struct page *)));
+ nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
+ PAGE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
return -ENOMEM;
while (iov_iter_count(i) > 0) {
- size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+ size_t offset = pos & (PAGE_SIZE - 1);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i),
- nrptrs * (size_t)PAGE_CACHE_SIZE -
+ nrptrs * (size_t)PAGE_SIZE -
offset);
size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
size_t reserve_bytes;
size_t dirty_pages;
size_t copied;
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
- if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) &&
- check_can_nocow(inode, pos, &write_bytes) > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
- only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_CACHE_SIZE);
- reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
- goto reserve_metadata;
- }
-
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
- if (ret < 0)
- break;
+ if (ret < 0) {
+ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_PREALLOC)) &&
+ check_can_nocow(inode, pos, &write_bytes) > 0) {
+ /*
+ * For nodata cow case, no need to reserve
+ * data space.
+ */
+ only_release_metadata = true;
+ /*
+ * our prealloc extent may be smaller than
+ * write_bytes, so scale down.
+ */
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_SIZE);
+ reserve_bytes = round_up(write_bytes +
+ sector_offset,
+ root->sectorsize);
+ } else {
+ break;
+ }
+ }
-reserve_metadata:
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
@@ -1596,6 +1596,13 @@ again:
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
+ num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+ reserve_bytes);
+ dirty_sectors = round_up(copied + sector_offset,
+ root->sectorsize);
+ dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+ dirty_sectors);
+
/*
* if we have trouble faulting in the pages, fall
* back to one page at a time
@@ -1605,30 +1612,28 @@ again:
if (copied == 0) {
force_page_uptodate = true;
+ dirty_sectors = 0;
dirty_pages = 0;
} else {
force_page_uptodate = false;
dirty_pages = DIV_ROUND_UP(copied + offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
}
/*
* If we had a short copy we need to release the excess delaloc
* bytes we reserved. We need to increment outstanding_extents
- * because btrfs_delalloc_release_space will decrement it, but
+ * because btrfs_delalloc_release_space and
+ * btrfs_delalloc_release_metadata will decrement it, but
* we still have an outstanding extent for the chunk we actually
* managed to copy.
*/
- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- reserve_bytes);
- dirty_sectors = round_up(copied + sector_offset,
- root->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- dirty_sectors);
-
if (num_sectors > dirty_sectors) {
- release_bytes = (write_bytes - copied)
- & ~((u64)root->sectorsize - 1);
+
+ /* release everything except the sectors we dirtied */
+ release_bytes -= dirty_sectors <<
+ root->fs_info->sb->s_blocksize_bits;
+
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@@ -1641,7 +1646,7 @@ again:
u64 __pos;
__pos = round_down(pos, root->sectorsize) +
- (dirty_pages << PAGE_CACHE_SHIFT);
+ (dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos,
release_bytes);
}
@@ -1682,7 +1687,7 @@ again:
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
+ if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(root);
pos += copied;
@@ -1696,25 +1701,26 @@ again:
btrfs_end_write_no_snapshoting(root);
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
- btrfs_delalloc_release_space(inode, pos, release_bytes);
+ btrfs_delalloc_release_space(inode,
+ round_down(pos, root->sectorsize),
+ release_bytes);
}
}
return num_written ? num_written : ret;
}
-static ssize_t __btrfs_direct_write(struct kiocb *iocb,
- struct iov_iter *from,
- loff_t pos)
+static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ loff_t pos = iocb->ki_pos;
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
int err;
- written = generic_file_direct_write(iocb, from, pos);
+ written = generic_file_direct_write(iocb, from);
if (written < 0 || !iov_iter_count(from))
return written;
@@ -1738,8 +1744,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
goto out;
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
- invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
out:
return written ? written : err;
}
@@ -1832,7 +1838,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
atomic_inc(&BTRFS_I(inode)->sync_writers);
if (iocb->ki_flags & IOCB_DIRECT) {
- num_written = __btrfs_direct_write(iocb, from, pos);
+ num_written = __btrfs_direct_write(iocb, from);
} else {
num_written = __btrfs_buffered_write(file, from, pos);
if (num_written > 0)
@@ -1852,11 +1858,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_sub_trans = root->log_transid;
spin_unlock(&BTRFS_I(inode)->lock);
- if (num_written > 0) {
- err = generic_write_sync(file, pos, num_written);
- if (err < 0)
- num_written = err;
- }
+ if (num_written > 0)
+ num_written = generic_write_sync(iocb, num_written);
if (sync)
atomic_dec(&BTRFS_I(inode)->sync_writers);
@@ -1905,7 +1908,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
@@ -2024,7 +2027,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
BTRFS_I(inode)->last_trans
<= root->fs_info->last_trans_committed)) {
/*
- * We'v had everything committed since the last time we were
+ * We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
* reason, it's no longer relevant.
*/
@@ -2372,7 +2375,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* Check the aligned pages after the first unaligned page,
* if offset != orig_start, which means the first unaligned page
- * including serveral following pages are already in holes,
+ * including several following pages are already in holes,
* the extra check can be skipped */
if (offset == orig_start) {
/* after truncate page, check hole again */
@@ -2474,7 +2477,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size);
+ min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2517,7 +2520,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size);
+ rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@@ -2682,9 +2685,12 @@ static long btrfs_fallocate(struct file *file, int mode,
return ret;
inode_lock(inode);
- ret = inode_newsize_ok(inode, alloc_end);
- if (ret)
- goto out;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
+ ret = inode_newsize_ok(inode, offset + len);
+ if (ret)
+ goto out;
+ }
/*
* TODO: Move these two operations after we have checked
@@ -2953,7 +2959,7 @@ const struct file_operations btrfs_file_operations = {
.fallocate = btrfs_fallocate,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = btrfs_ioctl,
+ .compat_ioctl = btrfs_compat_ioctl,
#endif
.copy_file_range = btrfs_copy_file_range,
.clone_file_range = btrfs_clone_file_range,
@@ -2969,7 +2975,7 @@ int btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;