diff options
Diffstat (limited to 'fs')
43 files changed, 332 insertions, 246 deletions
diff --git a/fs/attr.c b/fs/attr.c index 473d21b3a86d..66899b6e9bd8 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -35,7 +35,7 @@ static bool chown_ok(struct user_namespace *mnt_userns, kuid_t uid) { kuid_t kuid = i_uid_into_mnt(mnt_userns, inode); - if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid)) + if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, inode->i_uid)) return true; if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN)) return true; @@ -62,7 +62,7 @@ static bool chgrp_ok(struct user_namespace *mnt_userns, { kgid_t kgid = i_gid_into_mnt(mnt_userns, inode); if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) && - (in_group_p(gid) || gid_eq(gid, kgid))) + (in_group_p(gid) || gid_eq(gid, inode->i_gid))) return true; if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN)) return true; diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 309516e6a968..43c89952b7d2 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, ordered_list); if (!test_bit(WORK_DONE_BIT, &work->flags)) break; + /* + * Orders all subsequent loads after reading WORK_DONE_BIT, + * paired with the smp_mb__before_atomic in btrfs_work_helper + * this guarantees that the ordered function will see all + * updates from ordinary work function. + */ + smp_rmb(); /* * we are going to call the ordered done function, but @@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work) thresh_exec_hook(wq); work->func(work); if (need_order) { + /* + * Ensures all memory accesses done in the work function are + * ordered before setting the WORK_DONE_BIT. Ensuring the thread + * which is going to executed the ordered work sees them. + * Pairs with the smp_rmb in run_ordered_work. + */ + smp_mb__before_atomic(); set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq, work); } else { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 59c3be8c1f4c..514ead6e93b6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio) */ static void write_dev_flush(struct btrfs_device *device) { - struct request_queue *q = bdev_get_queue(device->bdev); struct bio *bio = device->flush_bio; +#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY + /* + * When a disk has write caching disabled, we skip submission of a bio + * with flush and sync requests before writing the superblock, since + * it's not needed. However when the integrity checker is enabled, this + * results in reports that there are metadata blocks referred by a + * superblock that were not properly flushed. So don't skip the bio + * submission only when the integrity checker is enabled for the sake + * of simplicity, since this is a debug tool and not meant for use in + * non-debug builds. + */ + struct request_queue *q = bdev_get_queue(device->bdev); if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) return; +#endif bio_reset(bio); bio->bi_end_io = btrfs_end_empty_barrier; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fb8cc9642ac4..92138ac2a4e2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3985,6 +3985,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) bool need_unlock; /* for mut. excl. ops lock */ int ret; + if (!arg) + btrfs_warn(fs_info, + "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18"); + if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 65cb0766e62d..0fb90cbe7669 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf) static int copy_compressed_data_to_page(char *compressed_data, size_t compressed_size, struct page **out_pages, + unsigned long max_nr_page, u32 *cur_out, const u32 sectorsize) { @@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data, struct page *cur_page; char *kaddr; + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + /* * We never allow a segment header crossing sector boundary, previous * run should ensure we have enough space left inside the sector. @@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data, orig_out + compressed_size - *cur_out); kunmap(cur_page); + + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + cur_page = out_pages[*cur_out / PAGE_SIZE]; /* Allocate a new page */ if (!cur_page) { @@ -203,6 +211,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; struct page *page_in = NULL; char *sizes_ptr; + const unsigned long max_nr_page = *out_pages; int ret = 0; /* Points to the file offset of input data */ u64 cur_in = start; @@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, u32 cur_out = 0; u32 len = *total_out; + ASSERT(max_nr_page > 0); *out_pages = 0; *total_out = 0; *total_in = 0; @@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, } ret = copy_compressed_data_to_page(workspace->cbuf, out_len, - pages, &cur_out, sectorsize); + pages, max_nr_page, + &cur_out, sectorsize); if (ret < 0) goto out; @@ -279,6 +290,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, *total_out = cur_out; *total_in = cur_in - start; out: + if (page_in) + put_page(page_in); *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE); return ret; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index cf82ea6f54fb..8f6ceea33969 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -73,8 +73,8 @@ struct scrub_page { u64 physical_for_dev_replace; atomic_t refs; u8 mirror_num; - int have_csum:1; - int io_error:1; + unsigned int have_csum:1; + unsigned int io_error:1; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 61ac57bcbf1a..0997e3cd74e9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7559,6 +7559,19 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) fs_info->fs_devices->total_rw_bytes = 0; /* + * Lockdep complains about possible circular locking dependency between + * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores + * used for freeze procection of a fs (struct super_block.s_writers), + * which we take when starting a transaction, and extent buffers of the + * chunk tree if we call read_one_dev() while holding a lock on an + * extent buffer of the chunk tree. Since we are mounting the filesystem + * and at this point there can't be any concurrent task modifying the + * chunk tree, to keep it simple, just skip locking on the chunk tree. + */ + ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); + path->skip_locking = 1; + + /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id * is smaller than the lowest possible object id for a chunk @@ -7583,10 +7596,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) goto error; break; } - /* - * The nodes on level 1 are not locked but we don't need to do - * that during mount time as nothing else can access the tree - */ node = path->nodes[1]; if (node) { if (last_ra_node != node->start) { @@ -7614,7 +7623,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * requirement for chunk allocation, see the comment on * top of btrfs_chunk_alloc() for details. */ - ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(&found_key, leaf, chunk); if (ret) diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c index 12bde7bfda86..23a1ed2fb769 100644 --- a/fs/cifs/cifs_swn.c +++ b/fs/cifs/cifs_swn.c @@ -393,26 +393,14 @@ static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg) static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state) { - int i; - switch (state) { case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name); - for (i = 0; i < swnreg->tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting) - swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(swnreg->tcon->ses); break; case CIFS_SWN_RESOURCE_STATE_AVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name); - for (i = 0; i < swnreg->tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting) - swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(swnreg->tcon->ses); break; case CIFS_SWN_RESOURCE_STATE_UNKNOWN: cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index b50da1901ebd..9e5d9e192ef0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -152,5 +152,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type, extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.33" +#define CIFS_VERSION "2.34" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f3073a62ce57..4f5a3e857df4 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -599,6 +599,7 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) bool is_server_using_iface(struct TCP_Server_Info *server, struct cifs_server_iface *iface); bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface); +void cifs_ses_mark_for_reconnect(struct cifs_ses *ses); void extract_unc_hostname(const char *unc, const char **h, size_t *len); int copy_path_name(char *dst, const char *src); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 82577a7a5bb1..6b705026da1a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1271,10 +1271,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context * { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; - if (ctx->nosharesock) { - server->nosharesock = true; + if (ctx->nosharesock) return 0; - } /* this server does not share socket */ if (server->nosharesock) @@ -1438,6 +1436,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, goto out_err; } + if (ctx->nosharesock) + tcp_ses->nosharesock = true; + tcp_ses->ops = ctx->ops; tcp_ses->vals = ctx->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); @@ -1452,8 +1453,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, tcp_ses->max_in_flight = 0; tcp_ses->credits = 1; if (primary_server) { + spin_lock(&cifs_tcp_ses_lock); ++primary_server->srv_count; tcp_ses->primary_server = primary_server; + spin_unlock(&cifs_tcp_ses_lock); } init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -4111,18 +4114,6 @@ cifs_prune_tlinks(struct work_struct *work) } #ifdef CONFIG_CIFS_DFS_UPCALL -static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon) -{ - int i; - - for (i = 0; i < tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (tcon->ses->chans[i].server->tcpStatus != CifsExiting) - tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } -} - /* Update dfs referral path of superblock */ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb, const char *target) @@ -4299,7 +4290,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco */ if (rc && server->current_fullpath != server->origin_fullpath) { server->current_fullpath = server->origin_fullpath; - mark_tcon_tcp_ses_for_reconnect(tcon); + cifs_ses_mark_for_reconnect(tcon->ses); } dfs_cache_free_tgts(tl); diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 5c1259d2eeac..e9b0fa2a9614 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1355,12 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach } cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); - for (i = 0; i < tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (tcon->ses->chans[i].server->tcpStatus != CifsExiting) - tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(tcon->ses); } /* Refresh dfs referral of tcon and mark it for reconnect if needed */ diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 2c10b186ed6e..af63548eaf26 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -95,9 +95,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) } if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { - cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); ses->chan_max = 1; spin_unlock(&ses->chan_lock); + cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); return 0; } spin_unlock(&ses->chan_lock); @@ -222,6 +222,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, /* Auth */ ctx.domainauto = ses->domainAuto; ctx.domainname = ses->domainName; + ctx.server_hostname = ses->server->hostname; ctx.username = ses->user_name; ctx.password = ses->password; ctx.sectype = ses->sectype; @@ -318,6 +319,19 @@ out: return rc; } +/* Mark all session channels for reconnect */ +void cifs_ses_mark_for_reconnect(struct cifs_ses *ses) +{ + int i; + + for (i = 0; i < ses->chan_count; i++) { + spin_lock(&GlobalMid_Lock); + if (ses->chans[i].server->tcpStatus != CifsExiting) + ses->chans[i].server->tcpStatus = CifsNeedReconnect; + spin_unlock(&GlobalMid_Lock); + } +} + static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) { __u32 capabilities = 0; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2f5f2c4c6183..8b3670388cda 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -142,7 +142,7 @@ static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server) { - int rc; + int rc = 0; struct nls_table *nls_codepage; struct cifs_ses *ses; int retries; diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 84da2c280012..ec9a1d780dc1 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp); + DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp); /* last refcount should be connected with its managed pslot. */ erofs_workgroup_unfreeze(grp, 0); @@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned int freed = 0; unsigned long index; + xa_lock(&sbi->managed_pslots); xa_for_each(&sbi->managed_pslots, index, grp) { /* try to shrink each valid workgroup */ if (!erofs_try_to_release_workgroup(sbi, grp)) continue; + xa_unlock(&sbi->managed_pslots); ++freed; if (!--nr_shrink) - break; + return freed; + xa_lock(&sbi->managed_pslots); } + xa_unlock(&sbi->managed_pslots); return freed; } diff --git a/fs/file.c b/fs/file.c index 8627dacfc424..ad4a8bf3cf10 100644 --- a/fs/file.c +++ b/fs/file.c @@ -858,6 +858,10 @@ loop: file = NULL; else if (!get_file_rcu_many(file, refs)) goto loop; + else if (files_lookup_fd_raw(files, fd) != file) { + fput_many(file, refs); + goto loop; + } } rcu_read_unlock(); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 79f7eda49e06..cd54a529460d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) replace_page_cache_page(oldpage, newpage); + get_page(newpage); + + if (!(buf->flags & PIPE_BUF_FLAG_LRU)) + lru_cache_add(newpage); + /* * Release while we have extra ref on stolen page. Otherwise * anon_pipe_buf_release() might think the page can be reused. */ pipe_buf_release(cs->pipe, buf); - get_page(newpage); - - if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add(newpage); - err = 0; spin_lock(&cs->req->waitq.lock); if (test_bit(FR_ABORTED, &cs->req->flags)) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 7235d539e969..d67108489148 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -940,7 +940,7 @@ do_alloc: else if (height == ip->i_height) ret = gfs2_hole_size(inode, lblock, len, mp, iomap); else - iomap->length = size - pos; + iomap->length = size - iomap->offset; } else if (flags & IOMAP_WRITE) { u64 alloc_size; diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index adafaaf7d24d..3e718cfc19a7 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, size_t *prev_count, size_t *window_size) { - char __user *p = i->iov[0].iov_base + i->iov_offset; size_t count = iov_iter_count(i); + char __user *p; int pages = 1; if (likely(!count)) @@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, if (*prev_count != count || !*window_size) { int pages, nr_dirtied; - pages = min_t(int, BIO_MAX_VECS, - DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE)); + pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE)); nr_dirtied = max(current->nr_dirtied_pause - current->nr_dirtied, 1); pages = min(pages, nr_dirtied); } *prev_count = count; + p = i->iov[0].iov_base + i->iov_offset; *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p); return true; } @@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder *statfs_gh = NULL; size_t prev_count = 0, window_size = 0; + size_t orig_count = iov_iter_count(from); size_t read = 0; ssize_t ret; @@ -1057,6 +1058,7 @@ retry_under_glock: if (inode == sdp->sd_rindex) gfs2_glock_dq_uninit(statfs_gh); + from->count = orig_count - read; if (should_fault_in_pages(ret, from, &prev_count, &window_size)) { size_t leftover; @@ -1064,6 +1066,7 @@ retry_under_glock: leftover = fault_in_iov_iter_readable(from, window_size); gfs2_holder_disallow_demote(gh); if (leftover != window_size) { + from->count = min(from->count, window_size - leftover); if (!gfs2_holder_queued(gh)) { if (read) goto out_uninit; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 19f38aee1b61..44a7a4288956 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret) static void demote_incompat_holders(struct gfs2_glock *gl, struct gfs2_holder *new_gh) { - struct gfs2_holder *gh; + struct gfs2_holder *gh, *tmp; /* * Demote incompatible holders before we make ourselves eligible. * (This holder may or may not allow auto-demoting, but we don't want * to demote the new holder before it's even granted.) */ - list_for_each_entry(gh, &gl->gl_holders, gh_list) { + list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { /* * Since holders are at the front of the list, we stop when we * find the first non-holder. @@ -496,7 +496,7 @@ again: * Since we unlock the lockref lock, we set a flag to indicate * instantiate is in progress. */ - if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { + if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, TASK_UNINTERRUPTIBLE); /* @@ -509,14 +509,10 @@ again: goto again; } - set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); - ret = glops->go_instantiate(gh); if (!ret) clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); - clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); - smp_mb__after_atomic(); - wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG); + clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); return ret; } @@ -1861,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) { - struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, }; unsigned long delay = 0; unsigned long holdtime; unsigned long now = jiffies; @@ -1894,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) * keep the glock until the last strong holder is done with it. */ if (!find_first_strong_holder(gl)) { - if (state == LM_ST_UNLOCKED) - mock_gh.gh_state = LM_ST_EXCLUSIVE; + struct gfs2_holder mock_gh = { + .gh_gl = gl, + .gh_state = (state == LM_ST_UNLOCKED) ? + LM_ST_EXCLUSIVE : state, + .gh_iflags = BIT(HIF_HOLDER) + }; + demote_incompat_holders(gl, &mock_gh); } handle_callback(gl, state, delay, true); diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6424b903e885..89905f4f29bb 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops; static const struct inode_operations gfs2_dir_iops; static const struct inode_operations gfs2_symlink_iops; -static int iget_test(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - return GFS2_I(inode)->i_no_addr == no_addr; -} - -static int iget_set(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - GFS2_I(inode)->i_no_addr = no_addr; - inode->i_ino = no_addr; - return 0; -} - -static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) -{ - struct inode *inode; - -repeat: - inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); - if (!inode) - return inode; - if (is_bad_inode(inode)) { - iput(inode); - goto repeat; - } - return inode; -} - /** * gfs2_set_iop - Sets inode operations * @inode: The inode with correct i_mode filled in @@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode) } } +static int iget_test(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + return GFS2_I(inode)->i_no_addr == no_addr; +} + +static int iget_set(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + GFS2_I(inode)->i_no_addr = no_addr; + inode->i_ino = no_addr; + return 0; +} + /** * gfs2_inode_lookup - Lookup an inode * @sb: The super block @@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, { struct inode *inode; struct gfs2_inode *ip; - struct gfs2_glock *io_gl = NULL; struct gfs2_holder i_gh; int error; gfs2_holder_mark_uninitialized(&i_gh); - inode = gfs2_iget(sb, no_addr); + inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); if (!inode) return ERR_PTR(-ENOMEM); @@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (inode->i_state & I_NEW) { struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_glock *io_gl; error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (unlikely(error)) goto fail; - flush_delayed_work(&ip->i_gl->gl_work); - - error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); - if (unlikely(error)) - goto fail; - if (blktype != GFS2_BLKST_UNLINKED) - gfs2_cancel_delete_work(io_gl); if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) { /* * The GL_SKIP flag indicates to skip reading the inode - * block. We read the inode with gfs2_inode_refresh + * block. We read the inode when instantiating it * after possibly checking the block type. */ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, @@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, } } - glock_set_object(ip->i_gl, ip); set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); - error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); + + error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (unlikely(error)) goto fail; - glock_set_object(ip->i_iopen_gh.gh_gl, ip); + if (blktype != GFS2_BLKST_UNLINKED) + gfs2_cancel_delete_work(io_gl); + error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); gfs2_glock_put(io_gl); - io_gl = NULL; + if (unlikely(error)) + goto fail; /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1); inode->i_atime.tv_nsec = 0; + glock_set_object(ip->i_gl, ip); + if (type == DT_UNKNOWN) { /* Inode glock must be locked already */ error = gfs2_instantiate(&i_gh); - if (error) + if (error) { + glock_clear_object(ip->i_gl, ip); goto fail; + } } else { ip->i_no_formal_ino = no_formal_ino; inode->i_mode = DT2IF(type); @@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); + glock_set_object(ip->i_iopen_gh.gh_gl, ip); gfs2_set_iop(inode); + unlock_new_inode(inode); } if (no_formal_ino && ip->i_no_formal_ino && no_formal_ino != ip->i_no_formal_ino) { - error = -ESTALE; - if (inode->i_state & I_NEW) - goto fail; iput(inode); - return ERR_PTR(error); + return ERR_PTR(-ESTALE); } - if (inode->i_state & I_NEW) - unlock_new_inode(inode); - return inode; fail: - if (gfs2_holder_initialized(&ip->i_iopen_gh)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); + if (gfs2_holder_initialized(&ip->i_iopen_gh)) gfs2_glock_dq_uninit(&ip->i_iopen_gh); - } - if (io_gl) - gfs2_glock_put(io_gl); if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); iget_failed(inode); @@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; - flush_delayed_work(&ip->i_gl->gl_work); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_free_inode; gfs2_cancel_delete_work(io_gl); + error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); + BUG_ON(error); + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_gunlock2; - glock_set_object(ip->i_gl, ip); error = gfs2_trans_begin(sdp, blocks, 0); if (error) goto fail_gunlock2; @@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_gunlock2; + glock_set_object(ip->i_gl, ip); glock_set_object(io_gl, ip); gfs2_set_iop(inode); - insert_inode_hash(inode); free_vfs_inode = 0; /* After this point, the inode is no longer considered free. Any failures need to undo @@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs + 1); gfs2_glock_put(io_gl); gfs2_qa_put(dip); + unlock_new_inode(inode); return error; fail_gunlock3: + glock_clear_object(ip->i_gl, ip); glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: - glock_clear_object(io_gl, ip); gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { - glock_clear_object(ip->i_gl, ip); if (free_vfs_inode) /* else evict will do the put for us */ gfs2_glock_put(ip->i_gl); } @@ -829,7 +807,10 @@ fail_gunlock: mark_inode_dirty(inode); set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); - iput(inode); + if (inode->i_state & I_NEW) + iget_failed(inode); + else + iput(inode); } if (gfs2_holder_initialized(ghs + 1)) gfs2_glock_dq_uninit(ghs + 1); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 5b121371508a..0f93e8beca4d 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1402,13 +1402,6 @@ out: gfs2_ordered_del_inode(ip); clear_inode(inode); gfs2_dir_hash_inval(ip); - if (ip->i_gl) { - glock_clear_object(ip->i_gl, ip); - wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); - gfs2_glock_add_to_lru(ip->i_gl); - gfs2_glock_put_eventually(ip->i_gl); - ip->i_gl = NULL; - } if (gfs2_holder_initialized(&ip->i_iopen_gh)) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; @@ -1421,6 +1414,13 @@ out: gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_put_eventually(gl); } + if (ip->i_gl) { + glock_clear_object(ip->i_gl, ip); + wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); + gfs2_glock_add_to_lru(ip->i_gl); + gfs2_glock_put_eventually(ip->i_gl); + ip->i_gl = NULL; + } } static struct inode *gfs2_alloc_inode(struct super_block *sb) diff --git a/fs/inode.c b/fs/inode.c index 3eba0940ffcf..6b80a51129d5 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) mapping->a_ops = &empty_aops; mapping->host = inode; mapping->flags = 0; - if (sb->s_type->fs_flags & FS_THP_SUPPORT) - __set_bit(AS_THP_SUPPORT, &mapping->flags); mapping->wb_err = 0; atomic_set(&mapping->i_mmap_writable, 0); #ifdef CONFIG_READ_ONLY_THP_FOR_FS diff --git a/fs/io_uring.c b/fs/io_uring.c index b07196b4511c..c4f217613f56 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { struct io_kiocb *req; @@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static bool io_match_linked(struct io_kiocb *head) +{ + struct io_kiocb *req; + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; + } + return false; +} + +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) +{ + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; +} + static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; @@ -1502,10 +1541,10 @@ static void io_prep_async_link(struct io_kiocb *req) if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; - spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); io_for_each_link(cur, req) io_prep_async_work(cur); - spin_unlock(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); } else { io_for_each_link(cur, req) io_prep_async_work(cur); @@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -5704,7 +5744,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, cancel_all)) + if (io_match_task_safe(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } @@ -6156,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; + if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) + return -EINVAL; + data->mode = io_translate_timeout_mode(flags); hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); @@ -6880,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx, static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) { struct io_kiocb *prev = req->timeout.prev; - int ret; + int ret = -ENOENT; if (prev) { - ret = io_try_cancel_userdata(req, prev->user_data); + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { @@ -9255,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) struct io_buffer *buf; unsigned long index; - xa_for_each(&ctx->io_buffers, index, buf) { + xa_for_each(&ctx->io_buffers, index, buf) __io_remove_buffers(ctx, buf, index, -1U); - cond_resched(); - } } static void io_req_caches_free(struct io_ring_ctx *ctx) @@ -9562,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { - struct io_ring_ctx *ctx = req->ctx; - - /* protect against races with linked timeouts */ - spin_lock(&ctx->completion_lock); - ret = io_match_task(req, cancel->task, cancel->all); - spin_unlock(&ctx->completion_lock); - } else { - ret = io_match_task(req, cancel->task, cancel->all); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -9586,7 +9617,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, cancel_all)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } @@ -9764,7 +9795,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx) } if (wq) { /* - * Must be after io_uring_del_task_file() (removes nodes under + * Must be after io_uring_del_tctx_node() (removes nodes under * uring_lock) to avoid race with io_uring_try_cancel_iowq(). */ io_wq_put_and_exit(wq); diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 1753c26c8e76..71a36ae120ee 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -205,7 +205,16 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static loff_t iomap_read_inline_data(const struct iomap_iter *iter, +/** + * iomap_read_inline_data - copy inline data into the page cache + * @iter: iteration structure + * @page: page to copy to + * + * Copy the inline data in @iter into @page and zero out the rest of the page. + * Only a single IOMAP_INLINE extent is allowed at the end of each file. + * Returns zero for success to complete the read, or the usual negative errno. + */ +static int iomap_read_inline_data(const struct iomap_iter *iter, struct page *page) { const struct iomap *iomap = iomap_iter_srcmap(iter); @@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, void *addr; if (PageUptodate(page)) - return PAGE_SIZE - poff; + return 0; if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) return -EIO; @@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, memset(addr + size, 0, PAGE_SIZE - poff - size); kunmap_local(addr); iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff); - return PAGE_SIZE - poff; + return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, @@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, sector_t sector; if (iomap->type == IOMAP_INLINE) - return min(iomap_read_inline_data(iter, page), length); + return iomap_read_inline_data(iter, page); /* zero post-eof blocks as the page may be mapped */ iop = iomap_page_create(iter->inode, page); @@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter, ctx->cur_page_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); + if (ret <= 0) + return ret; } return done; @@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, static int iomap_write_begin_inline(const struct iomap_iter *iter, struct page *page) { - int ret; - /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; - ret = iomap_read_inline_data(iter, page); - if (ret < 0) - return ret; - return 0; + return iomap_read_inline_data(iter, page); } static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 121f8e8c70ac..49c9da37315c 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -1697,8 +1697,10 @@ int smb2_sess_setup(struct ksmbd_work *work) negblob_off = le16_to_cpu(req->SecurityBufferOffset); negblob_len = le16_to_cpu(req->SecurityBufferLength); if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) || - negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) - return -EINVAL; + negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) { + rc = -EINVAL; + goto out_err; + } negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + negblob_off); @@ -4457,6 +4459,12 @@ static void get_file_stream_info(struct ksmbd_work *work, &stat); file_info = (struct smb2_file_stream_info *)rsp->Buffer; + buf_free_len = + smb2_calc_max_out_buf_len(work, 8, + le32_to_cpu(req->OutputBufferLength)); + if (buf_free_len < 0) + goto out; + xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; @@ -4465,12 +4473,6 @@ static void get_file_stream_info(struct ksmbd_work *work, goto out; } - buf_free_len = - smb2_calc_max_out_buf_len(work, 8, - le32_to_cpu(req->OutputBufferLength)); - if (buf_free_len < 0) - goto out; - while (idx < xattr_list_len) { stream_name = xattr_list + idx; streamlen = strlen(stream_name); @@ -4496,8 +4498,10 @@ static void get_file_stream_info(struct ksmbd_work *work, ":%s", &stream_name[XATTR_NAME_STREAM_LEN]); next = sizeof(struct smb2_file_stream_info) + streamlen * 2; - if (next > buf_free_len) + if (next > buf_free_len) { + kfree(stream_buf); break; + } file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, @@ -4514,6 +4518,7 @@ static void get_file_stream_info(struct ksmbd_work *work, file_info->NextEntryOffset = cpu_to_le32(next); } +out: if (!S_ISDIR(stat.mode) && buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { file_info = (struct smb2_file_stream_info *) @@ -4522,14 +4527,13 @@ static void get_file_stream_info(struct ksmbd_work *work, "::$DATA", 7, conn->local_nls, 0); streamlen *= 2; file_info->StreamNameLength = cpu_to_le32(streamlen); - file_info->StreamSize = 0; - file_info->StreamAllocationSize = 0; + file_info->StreamSize = cpu_to_le64(stat.size); + file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9); nbytes += sizeof(struct smb2_file_stream_info) + streamlen; } /* last entry offset should be 0 */ file_info->NextEntryOffset = 0; -out: kvfree(xattr_list); rsp->OutputBufferLength = cpu_to_le32(nbytes); @@ -5068,7 +5072,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work, if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | UNPROTECTED_DACL_SECINFO)) { - pr_err("Unsupported addition info: 0x%x)\n", + ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", addition_info); pntsd->revision = cpu_to_le16(1); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 9320a42dfaf9..7046f9bdd8dc 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -1008,8 +1008,8 @@ out: } EXPORT_SYMBOL(netfs_readpage); -/** - * netfs_skip_folio_read - prep a folio for writing without reading first +/* + * Prepare a folio for writing without reading first * @folio: The folio being prepared * @pos: starting position for the write * @len: length of write diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index dd53704c3f40..fda530d5e764 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) NFS_INO_DATA_INVAL_DEFER); else if (nfsi->cache_validity & NFS_INO_INVALID_DATA) nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER; + trace_nfs_set_cache_invalid(inode, 0); } EXPORT_SYMBOL_GPL(nfs_set_cache_invalid); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 08355b66e7cb..8b21ff1be717 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) loff_t newsize = pos + len; loff_t end = newsize - 1; - truncate_pagecache_range(inode, pos, end); + WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); + spin_lock(&inode->i_lock); if (newsize > i_size_read(inode)) i_size_write(inode, newsize); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c8bad735e4c1..271e5f92ed01 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp, status = decode_clone(xdr); if (status) goto out; - status = decode_getfattr(xdr, res->dst_fattr, res->server); - + decode_getfattr(xdr, res->dst_fattr, res->server); out: res->rpc_status = status; return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ecc4594299d6..f63dfa01001c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) dprintk("%s: exit with error %d for server %s\n", __func__, -EPROTONOSUPPORT, clp->cl_hostname); return -EPROTONOSUPPORT; + case -ENOSPC: + if (clp->cl_cons_state == NFS_CS_SESSION_INITING) + nfs_mark_client_ready(clp, -EIO); + return -EIO; case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ default: diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 21dac847f1e4..b3aee261801e 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); +DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); TRACE_EVENT(nfs_access_exit, TP_PROTO( diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b2a1d969a172..5a93a5db4fb0 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -288,11 +288,8 @@ nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen) p = xdr_inline_decode(argp->xdr, count << 2); if (!p) return nfserr_bad_xdr; - i = 0; - while (i < count) - bmval[i++] = be32_to_cpup(p++); - while (i < bmlen) - bmval[i++] = 0; + for (i = 0; i < bmlen; i++) + bmval[i] = (i < count) ? be32_to_cpup(p++) : 0; return nfs_ok; } diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig index 1667a7e590d8..f93e69a61283 100644 --- a/fs/ntfs/Kconfig +++ b/fs/ntfs/Kconfig @@ -52,6 +52,7 @@ config NTFS_DEBUG config NTFS_RW bool "NTFS write support" depends on NTFS_FS + depends on PAGE_SIZE_LESS_THAN_64KB help This enables the partial, but safe, write support in the NTFS driver. diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 30a3b66f475a..509f85148fee 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -154,9 +154,13 @@ ssize_t read_from_oldmem(char *buf, size_t count, nr_bytes = count; /* If pfn is not ram, return zeros for sparse dump files */ - if (!pfn_is_ram(pfn)) - memset(buf, 0, nr_bytes); - else { + if (!pfn_is_ram(pfn)) { + tmp = 0; + if (!userbuf) + memset(buf, 0, nr_bytes); + else if (clear_user(buf, nr_bytes)) + tmp = -EFAULT; + } else { if (encrypted) tmp = copy_oldmem_page_encrypted(pfn, buf, nr_bytes, @@ -165,12 +169,12 @@ ssize_t read_from_oldmem(char *buf, size_t count, else tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); - - if (tmp < 0) { - up_read(&vmcore_cb_rwsem); - return tmp; - } } + if (tmp < 0) { + up_read(&vmcore_cb_rwsem); + return tmp; + } + *ppos += nr_bytes; count -= nr_bytes; buf += nr_bytes; diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 328da35da390..8adabde685f1 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -173,7 +173,6 @@ config PSTORE_BLK tristate "Log panic/oops to a block device" depends on PSTORE depends on BLOCK - depends on BROKEN select PSTORE_ZONE default n help diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c index 5d1fbaffd66a..4ae0cfcd15f2 100644 --- a/fs/pstore/blk.c +++ b/fs/pstore/blk.c @@ -309,7 +309,7 @@ static int __init __best_effort_init(void) if (ret) kfree(best_effort_dev); else - pr_info("attached %s (%zu) (no dedicated panic_write!)\n", + pr_info("attached %s (%lu) (no dedicated panic_write!)\n", blkdev, best_effort_dev->zone.total_size); return ret; diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 70abdfad2df1..42e3e551fa4c 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -31,6 +31,7 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/bio.h> +#include <linux/iversion.h> #include "udf_i.h" #include "udf_sb.h" @@ -43,7 +44,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) struct fileIdentDesc *fi = NULL; struct fileIdentDesc cfi; udf_pblk_t block, iblock; - loff_t nf_pos; + loff_t nf_pos, emit_pos = 0; int flen; unsigned char *fname = NULL, *copy_name = NULL; unsigned char *nameptr; @@ -57,6 +58,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) int i, num, ret = 0; struct extent_position epos = { NULL, 0, {0, 0} }; struct super_block *sb = dir->i_sb; + bool pos_valid = false; if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) @@ -67,6 +69,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) if (nf_pos >= size) goto out; + /* + * Something changed since last readdir (either lseek was called or dir + * changed)? We need to verify the position correctly points at the + * beginning of some dir entry so that the directory parsing code does + * not get confused. Since UDF does not have any reliable way of + * identifying beginning of dir entry (names are under user control), + * we need to scan the directory from the beginning. + */ + if (!inode_eq_iversion(dir, file->f_version)) { + emit_pos = nf_pos; + nf_pos = 0; + } else { + pos_valid = true; + } + fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) { ret = -ENOMEM; @@ -122,13 +139,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) while (nf_pos < size) { struct kernel_lb_addr tloc; + loff_t cur_pos = nf_pos; - ctx->pos = (nf_pos >> 2) + 1; + /* Update file position only if we got past the current one */ + if (nf_pos >= emit_pos) { + ctx->pos = (nf_pos >> 2) + 1; + pos_valid = true; + } fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out; + /* Still not at offset where user asked us to read from? */ + if (cur_pos < emit_pos) + continue; liu = le16_to_cpu(cfi.lengthOfImpUse); lfi = cfi.lengthFileIdent; @@ -186,8 +211,11 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) } /* end while */ ctx->pos = (nf_pos >> 2) + 1; + pos_valid = true; out: + if (pos_valid) + file->f_version = inode_query_iversion(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index caeef08efed2..0ed4861b038f 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -30,6 +30,7 @@ #include <linux/sched.h> #include <linux/crc-itu-t.h> #include <linux/exportfs.h> +#include <linux/iversion.h> static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) @@ -134,6 +135,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, mark_buffer_dirty_inode(fibh->ebh, inode); mark_buffer_dirty_inode(fibh->sbh, inode); } + inode_inc_iversion(inode); + return 0; } diff --git a/fs/udf/super.c b/fs/udf/super.c index 34247fba6df9..f26b5e0b84b6 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -57,6 +57,7 @@ #include <linux/crc-itu-t.h> #include <linux/log2.h> #include <asm/byteorder.h> +#include <linux/iversion.h> #include "udf_sb.h" #include "udf_i.h" @@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); + inode_set_iversion(&ei->vfs_inode, 1); return &ei->vfs_inode; } diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index fbc9d816882c..23523b802539 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -1077,21 +1077,18 @@ xfs_attr_node_hasname( state = xfs_da_state_alloc(args); if (statep != NULL) - *statep = NULL; + *statep = state; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); - if (error) { - xfs_da_state_free(state); - return error; - } + if (error) + retval = error; - if (statep != NULL) - *statep = state; - else + if (!statep) xfs_da_state_free(state); + return retval; } @@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr( */ retval = xfs_attr_node_hasname(args, &dac->da_state); if (retval != -ENOATTR && retval != -EEXIST) - return retval; + goto error; if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) goto error; @@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup( error = xfs_attr_node_hasname(args, state); if (error != -EEXIST) - return error; + goto out; error = 0; ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index e1472004170e..da4af2142a2b 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag( trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); } -static inline void -xfs_inew_wait( - struct xfs_inode *ip) -{ - wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); - DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); - - do { - prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); - if (!xfs_iflags_test(ip, XFS_INEW)) - break; - schedule(); - } while (true); - finish_wait(wq, &wait.wq_entry); -} - /* * When we recycle a reclaimable inode, we need to re-initialise the VFS inode * part of the structure. This is made more complex by the fact we store @@ -368,18 +352,13 @@ xfs_iget_recycle( ASSERT(!rwsem_is_locked(&inode->i_rwsem)); error = xfs_reinit_inode(mp, inode); if (error) { - bool wake; - /* * Re-initializing the inode failed, and we are in deep * trouble. Try to re-add it to the reclaim list. */ rcu_read_lock(); spin_lock(&ip->i_flags_lock); - wake = !!__xfs_iflags_test(ip, XFS_INEW); ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); - if (wake) - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); ASSERT(ip->i_flags & XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index e635a3d64cba..c447bf04205a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip) #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ #define XFS_ISTALE (1 << 1) /* inode has been staled */ #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ -#define __XFS_INEW_BIT 3 /* inode has just been allocated */ -#define XFS_INEW (1 << __XFS_INEW_BIT) +#define XFS_INEW (1 << 3) /* inode has just been allocated */ #define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */ #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ @@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip) xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(VFS_I(ip)); - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); } static inline void xfs_setup_existing_inode(struct xfs_inode *ip) |