aboutsummaryrefslogtreecommitdiff
path: root/fs/smb
diff options
context:
space:
mode:
Diffstat (limited to 'fs/smb')
-rw-r--r--fs/smb/client/cifs_debug.c2
-rw-r--r--fs/smb/client/cifsencrypt.c144
-rw-r--r--fs/smb/client/cifsfs.c27
-rw-r--r--fs/smb/client/cifsglob.h22
-rw-r--r--fs/smb/client/cifssmb.c54
-rw-r--r--fs/smb/client/connect.c17
-rw-r--r--fs/smb/client/file.c99
-rw-r--r--fs/smb/client/inode.c2
-rw-r--r--fs/smb/client/ioctl.c2
-rw-r--r--fs/smb/client/link.c1
-rw-r--r--fs/smb/client/misc.c11
-rw-r--r--fs/smb/client/reparse.c11
-rw-r--r--fs/smb/client/smb2inode.c3
-rw-r--r--fs/smb/client/smb2ops.c267
-rw-r--r--fs/smb/client/smb2pdu.c97
-rw-r--r--fs/smb/client/smbdirect.c90
-rw-r--r--fs/smb/client/trace.h1
-rw-r--r--fs/smb/client/transport.c2
-rw-r--r--fs/smb/common/smb2pdu.h2
-rw-r--r--fs/smb/server/connection.c34
-rw-r--r--fs/smb/server/connection.h3
-rw-r--r--fs/smb/server/mgmt/share_config.c15
-rw-r--r--fs/smb/server/mgmt/share_config.h4
-rw-r--r--fs/smb/server/mgmt/tree_connect.c9
-rw-r--r--fs/smb/server/mgmt/tree_connect.h4
-rw-r--r--fs/smb/server/mgmt/user_session.c9
-rw-r--r--fs/smb/server/oplock.c2
-rw-r--r--fs/smb/server/smb2pdu.c44
-rw-r--r--fs/smb/server/smb_common.c9
-rw-r--r--fs/smb/server/smb_common.h6
-rw-r--r--fs/smb/server/transport_tcp.c4
-rw-r--r--fs/smb/server/xattr.h2
32 files changed, 553 insertions, 446 deletions
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index c71ae5c04306..4a20e92474b2 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -1072,7 +1072,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
static void
cifs_security_flags_handle_must_flags(unsigned int *flags)
{
- unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
+ unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
*flags = CIFSSEC_MUST_KRB5;
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 6322f0f68a17..7481b21a0489 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -21,127 +21,21 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/fips.h>
+#include <linux/iov_iter.h>
#include "../common/arc4.h"
#include <crypto/aead.h>
-/*
- * Hash data from a BVEC-type iterator.
- */
-static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
+static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- const struct bio_vec *bv = iter->bvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- void *p;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t off, len;
-
- len = bv[i].bv_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- off = bv[i].bv_offset + start;
+ struct shash_desc *shash = priv;
+ int ret, *pret = priv2;
- p = kmap_local_page(bv[i].bv_page);
- ret = crypto_shash_update(shash, p + off, len);
- kunmap_local(p);
- if (ret < 0)
- return ret;
-
- maxsize -= len;
- if (maxsize <= 0)
- break;
- start = 0;
+ ret = crypto_shash_update(shash, iter_base, len);
+ if (ret < 0) {
+ *pret = ret;
+ return len;
}
-
- return 0;
-}
-
-/*
- * Hash data from a KVEC-type iterator.
- */
-static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- const struct kvec *kv = iter->kvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t len;
-
- len = kv[i].iov_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- ret = crypto_shash_update(shash, kv[i].iov_base + start, len);
- if (ret < 0)
- return ret;
- maxsize -= len;
-
- if (maxsize <= 0)
- break;
- start = 0;
- }
-
- return 0;
-}
-
-/*
- * Hash data from an XARRAY-type iterator.
- */
-static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- struct folio *folios[16], *folio;
- unsigned int nr, i, j, npages;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t last, index = start / PAGE_SIZE;
- ssize_t ret = 0;
- size_t len, offset, foffset;
- void *p;
-
- if (maxsize == 0)
- return 0;
-
- last = (start + maxsize - 1) / PAGE_SIZE;
- do {
- nr = xa_extract(iter->xarray, (void **)folios, index, last,
- ARRAY_SIZE(folios), XA_PRESENT);
- if (nr == 0)
- return -EIO;
-
- for (i = 0; i < nr; i++) {
- folio = folios[i];
- npages = folio_nr_pages(folio);
- foffset = start - folio_pos(folio);
- offset = foffset % PAGE_SIZE;
- for (j = foffset / PAGE_SIZE; j < npages; j++) {
- len = min_t(size_t, maxsize, PAGE_SIZE - offset);
- p = kmap_local_page(folio_page(folio, j));
- ret = crypto_shash_update(shash, p, len);
- kunmap_local(p);
- if (ret < 0)
- return ret;
- maxsize -= len;
- if (maxsize <= 0)
- return 0;
- start += len;
- offset = 0;
- index++;
- }
- }
- } while (nr == ARRAY_SIZE(folios));
return 0;
}
@@ -151,21 +45,13 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize,
struct shash_desc *shash)
{
- if (maxsize == 0)
- return 0;
+ struct iov_iter tmp_iter = *iter;
+ int err = -EIO;
- switch (iov_iter_type(iter)) {
- case ITER_BVEC:
- return cifs_shash_bvec(iter, maxsize, shash);
- case ITER_KVEC:
- return cifs_shash_kvec(iter, maxsize, shash);
- case ITER_XARRAY:
- return cifs_shash_xarray(iter, maxsize, shash);
- default:
- pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter));
- WARN_ON_ONCE(1);
- return -EIO;
- }
+ if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err,
+ cifs_shash_step) != maxsize)
+ return err;
+ return 0;
}
int __cifs_calc_signature(struct smb_rqst *rqst,
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 2c4b357d85e2..2a2523c93944 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -75,9 +75,9 @@ unsigned int sign_CIFS_PDUs = 1;
/*
* Global transaction id (XID) information
*/
-unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
-unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
-unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
+unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
+unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
+unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
/*
@@ -1341,7 +1341,6 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
struct cifsFileInfo *smb_file_target;
struct cifs_tcon *src_tcon;
struct cifs_tcon *target_tcon;
- unsigned long long destend, fstart, fend;
ssize_t rc;
cifs_dbg(FYI, "copychunk range\n");
@@ -1391,25 +1390,13 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
goto unlock;
}
- destend = destoff + len - 1;
-
- /* Flush the folios at either end of the destination range to prevent
- * accidental loss of dirty data outside of the range.
+ /* Flush and invalidate all the folios in the destination region. If
+ * the copy was successful, then some of the flush is extra overhead,
+ * but we need to allow for the copy failing in some way (eg. ENOSPC).
*/
- fstart = destoff;
- fend = destend;
-
- rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
+ rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
if (rc)
goto unlock;
- rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
- if (rc)
- goto unlock;
- if (fend > target_cifsi->netfs.zero_point)
- target_cifsi->netfs.zero_point = fend + 1;
-
- /* Discard all the folios that overlap the destination region. */
- truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
i_size_read(target_inode), 0);
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index f6d1f075987f..2709c7b0be85 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -254,9 +254,8 @@ struct cifs_open_info_data {
struct smb_rqst {
struct kvec *rq_iov; /* array of kvecs */
unsigned int rq_nvec; /* number of kvecs in array */
- size_t rq_iter_size; /* Amount of data in ->rq_iter */
struct iov_iter rq_iter; /* Data iterator */
- struct xarray rq_buffer; /* Page buffer for encryption */
+ struct folio_queue *rq_buffer; /* Buffer for encryption */
};
struct mid_q_entry;
@@ -345,7 +344,7 @@ struct smb_version_operations {
/* connect to a server share */
int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
struct cifs_tcon *, const struct nls_table *);
- /* close tree connecion */
+ /* close tree connection */
int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
/* get DFS referrals */
int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
@@ -816,7 +815,7 @@ struct TCP_Server_Info {
* Protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mostly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
- * While @srv_lock is held for making string and NULL comparions against
+ * While @srv_lock is held for making string and NULL comparisons against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE[\OPTIONAL PATH]
@@ -1550,7 +1549,6 @@ struct cifsInodeInfo {
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
-#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
@@ -1881,7 +1879,7 @@ static inline bool is_replayable_error(int error)
#define CIFSSEC_MAY_SIGN 0x00001
#define CIFSSEC_MAY_NTLMV2 0x00004
#define CIFSSEC_MAY_KRB5 0x00008
-#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
+#define CIFSSEC_MAY_SEAL 0x00040
#define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_MUST_SIGN 0x01001
@@ -1891,11 +1889,11 @@ require use of the stronger protocol */
#define CIFSSEC_MUST_NTLMV2 0x04004
#define CIFSSEC_MUST_KRB5 0x08008
#ifdef CONFIG_CIFS_UPCALL
-#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */
+#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */
#else
-#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */
+#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */
#endif /* UPCALL */
-#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
+#define CIFSSEC_MUST_SEAL 0x40040
#define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
@@ -2017,9 +2015,9 @@ extern spinlock_t cifs_tcp_ses_lock;
/*
* Global transaction id (XID) information
*/
-extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
-extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
-extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
+extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
+extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
+extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
/*
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 595c4b673707..7f3b37120a21 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1261,16 +1261,30 @@ openRetry:
return rc;
}
+static void cifs_readv_worker(struct work_struct *work)
+{
+ struct cifs_io_subrequest *rdata =
+ container_of(work, struct cifs_io_subrequest, subreq.work);
+
+ netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
+}
+
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
struct cifs_io_subrequest *rdata = mid->callback_data;
+ struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_iter = rdata->subreq.io_iter };
- struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct cifs_credits credits = {
+ .value = 1,
+ .instance = 0,
+ .rreq_debug_id = rdata->rreq->debug_id,
+ .rreq_debug_index = rdata->subreq.debug_index,
+ };
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
@@ -1282,6 +1296,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
if (server->sign) {
int rc = 0;
+ iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
rc = cifs_verify_signature(&rqst, server,
mid->sequence_number);
if (rc)
@@ -1306,13 +1321,22 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->result = -EIO;
}
- if (rdata->result == 0 || rdata->result == -EAGAIN)
- iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
+ if (rdata->result == -ENODATA) {
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
+ } else {
+ size_t trans = rdata->subreq.transferred + rdata->got_bytes;
+ if (trans < rdata->subreq.len &&
+ rdata->subreq.start + trans == ictx->remote_i_size) {
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
+ }
+ }
+
rdata->credits.value = 0;
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result,
- false);
+ rdata->subreq.transferred += rdata->got_bytes;
+ INIT_WORK(&rdata->subreq.work, cifs_readv_worker);
+ queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -1619,9 +1643,15 @@ static void
cifs_writev_callback(struct mid_q_entry *mid)
{
struct cifs_io_subrequest *wdata = mid->callback_data;
+ struct TCP_Server_Info *server = wdata->server;
struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
- struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct cifs_credits credits = {
+ .value = 1,
+ .instance = 0,
+ .rreq_debug_id = wdata->rreq->debug_id,
+ .rreq_debug_index = wdata->subreq.debug_index,
+ };
ssize_t result;
size_t written;
@@ -1657,9 +1687,16 @@ cifs_writev_callback(struct mid_q_entry *mid)
break;
}
+ trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index,
+ wdata->credits.value,
+ server->credits, server->in_flight,
+ 0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
cifs_write_subrequest_terminated(wdata, result, true);
release_mid(mid);
+ trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0,
+ server->credits, server->in_flight,
+ credits.value, cifs_trace_rw_credits_write_response_add);
add_credits(tcon->ses->server, &credits, 0);
}
@@ -1713,7 +1750,6 @@ cifs_async_writev(struct cifs_io_subrequest *wdata)
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
rqst.rq_iter = wdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter);
cifs_dbg(FYI, "async write at %llu %zu bytes\n",
wdata->subreq.start, wdata->subreq.len);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index d2307162a2de..5375b0c1dfb9 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -657,6 +657,19 @@ static bool
server_unresponsive(struct TCP_Server_Info *server)
{
/*
+ * If we're in the process of mounting a share or reconnecting a session
+ * and the server abruptly shut down (e.g. socket wasn't closed, packet
+ * had been ACK'ed but no SMB response), don't wait longer than 20s to
+ * negotiate protocol.
+ */
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsInNegotiate &&
+ time_after(jiffies, server->lstrp + 20 * HZ)) {
+ spin_unlock(&server->srv_lock);
+ cifs_reconnect(server, false);
+ return true;
+ }
+ /*
* We need to wait 3 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
@@ -667,7 +680,6 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
- spin_lock(&server->srv_lock);
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) &&
@@ -4194,6 +4206,9 @@ tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
*
* If one doesn't exist then insert a new tcon_link struct into the tree and
* try to construct a new one.
+ *
+ * REMEMBER to call cifs_put_tlink() after successful calls to cifs_sb_tlink,
+ * to avoid refcount issues
*/
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index b2405dd4d4d4..bcde3f9c89e0 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -49,6 +49,7 @@ static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
struct cifs_io_subrequest *wdata =
container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = wdata->req;
+ struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
struct TCP_Server_Info *server;
struct cifsFileInfo *open_file = req->cfile;
size_t wsize = req->rreq.wsize;
@@ -73,7 +74,7 @@ retry:
}
}
- rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
+ rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
&wdata->credits);
if (rc < 0) {
subreq->error = rc;
@@ -92,7 +93,7 @@ retry:
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->smbd_conn)
- subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+ stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
#endif
}
@@ -139,25 +140,22 @@ static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
}
/*
- * Split the read up according to how many credits we can get for each piece.
- * It's okay to sleep here if we need to wait for more credit to become
- * available.
- *
- * We also choose the server and allocate an operation ID to be cleaned up
- * later.
+ * Negotiate the size of a read operation on behalf of the netfs library.
*/
-static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
struct TCP_Server_Info *server = req->server;
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
- size_t rsize = 0;
- int rc;
+ size_t size;
+ int rc = 0;
- rdata->xid = get_xid();
- rdata->have_xid = true;
+ if (!rdata->have_xid) {
+ rdata->xid = get_xid();
+ rdata->have_xid = true;
+ }
rdata->server = server;
if (cifs_sb->ctx->rsize == 0)
@@ -165,13 +163,12 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
cifs_sb->ctx);
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
+ &size, &rdata->credits);
+ if (rc)
+ return rc;
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
- &rdata->credits);
- if (rc) {
- subreq->error = rc;
- return false;
- }
+ rreq->io_streams[0].sreq_max_len = size;
rdata->credits.in_flight_check = 1;
rdata->credits.rreq_debug_id = rreq->debug_id;
@@ -183,13 +180,11 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
server->credits, server->in_flight, 0,
cifs_trace_rw_credits_read_submit);
- subreq->len = min_t(size_t, subreq->len, rsize);
-
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->smbd_conn)
- subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+ rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
#endif
- return true;
+ return 0;
}
/*
@@ -198,31 +193,41 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
* to only read a portion of that, but as long as we read something, the netfs
* helper will call us again so that we can issue another read.
*/
-static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
+static void cifs_issue_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct TCP_Server_Info *server = req->server;
int rc = 0;
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
subreq->transferred, subreq->len);
+ rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
+ if (rc)
+ goto failed;
+
if (req->cfile->invalidHandle) {
do {
rc = cifs_reopen_file(req->cfile, true);
} while (rc == -EAGAIN);
if (rc)
- goto out;
+ goto failed;
}
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
rc = rdata->server->ops->async_readv(rdata);
-out:
if (rc)
- netfs_subreq_terminated(subreq, rc, false);
+ goto failed;
+ return;
+
+failed:
+ netfs_read_subreq_terminated(subreq, rc, false);
}
/*
@@ -286,12 +291,6 @@ static void cifs_rreq_done(struct netfs_io_request *rreq)
inode_set_atime_to_ts(inode, inode_get_mtime(inode));
}
-static void cifs_post_modify(struct inode *inode)
-{
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
-}
-
static void cifs_free_request(struct netfs_io_request *rreq)
{
struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
@@ -315,7 +314,7 @@ static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
#endif
}
- if (rdata->credits.value != 0)
+ if (rdata->credits.value != 0) {
trace_smb3_rw_credits(rdata->rreq->debug_id,
rdata->subreq.debug_index,
rdata->credits.value,
@@ -323,8 +322,12 @@ static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
rdata->server ? rdata->server->in_flight : 0,
-rdata->credits.value,
cifs_trace_rw_credits_free_subreq);
+ if (rdata->server)
+ add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
+ else
+ rdata->credits.value = 0;
+ }
- add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
if (rdata->have_xid)
free_xid(rdata->xid);
}
@@ -335,10 +338,9 @@ const struct netfs_request_ops cifs_req_ops = {
.init_request = cifs_init_request,
.free_request = cifs_free_request,
.free_subrequest = cifs_free_subrequest,
- .clamp_length = cifs_clamp_length,
- .issue_read = cifs_req_issue_read,
+ .prepare_read = cifs_prepare_read,
+ .issue_read = cifs_issue_read,
.done = cifs_rreq_done,
- .post_modify = cifs_post_modify,
.begin_writeback = cifs_begin_writeback,
.prepare_write = cifs_prepare_write,
.issue_write = cifs_issue_write,
@@ -1362,7 +1364,7 @@ int cifs_close(struct inode *inode, struct file *file)
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cfile->status_file_deleted == false) &&
(smb2_can_defer_close(inode, dclose))) {
- if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+ if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
}
@@ -2749,6 +2751,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
ssize_t rc;
rc = netfs_start_io_write(inode);
@@ -2765,12 +2768,16 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
if (rc <= 0)
goto out;
- if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
+ (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, 0,
- NULL, CIFS_WRITE_OP))
- rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
- else
+ NULL, CIFS_WRITE_OP))) {
rc = -EACCES;
+ goto out;
+ }
+
+ rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
+
out:
up_read(&cinode->lock_sem);
netfs_end_io_write(inode);
@@ -2902,9 +2909,7 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
if (!CIFS_CACHE_READ(cinode))
return netfs_unbuffered_read_iter(iocb, to);
- if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
if (iocb->ki_flags & IOCB_DIRECT)
return netfs_unbuffered_read_iter(iocb, to);
return netfs_buffered_read_iter(iocb, to);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index dd0afa23734c..73e2e6c230b7 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -172,6 +172,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
CIFS_I(inode)->time = 0; /* force reval */
return -ESTALE;
}
+ if (inode->i_state & I_NEW)
+ CIFS_I(inode)->netfs.zero_point = fattr->cf_eof;
cifs_revalidate_cache(inode, fattr);
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 44dbaf9929a4..9bb5c869f4db 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -229,9 +229,11 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
shutdown_good:
trace_smb3_shutdown_done(flags, tcon->tid);
+ cifs_put_tlink(tlink);
return 0;
shutdown_out_err:
trace_smb3_shutdown_err(rc, flags, tcon->tid);
+ cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index d86da949a919..80099bbb333b 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -588,6 +588,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
+ /* BB could be clearer if skipped put_tlink on error here, but harmless */
goto symlink_exit;
}
pTcon = tlink_tcon(tlink);
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index b28ff62f1f15..c6f11e6f9eb9 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -352,7 +352,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
* on simple responses (wct, bcc both zero)
* in particular have seen this on
* ulogoffX and FindClose. This leaves
- * one byte of bcc potentially unitialized
+ * one byte of bcc potentially uninitialized
*/
/* zero rest of bcc */
tmp[sizeof(struct smb_hdr)+1] = 0;
@@ -1234,6 +1234,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
const char *full_path,
bool *islink)
{
+ struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
size_t len;
char *path;
@@ -1250,12 +1251,12 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
!is_tcon_dfs(tcon))
return 0;
- spin_lock(&tcon->tc_lock);
- if (!tcon->origin_fullpath) {
- spin_unlock(&tcon->tc_lock);
+ spin_lock(&server->srv_lock);
+ if (!server->leaf_fullpath) {
+ spin_unlock(&server->srv_lock);
return 0;
}
- spin_unlock(&tcon->tc_lock);
+ spin_unlock(&server->srv_lock);
/*
* Slow path - tcon is DFS and @full_path has prefix path, so attempt
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 689d8a506d45..48c27581ec51 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -378,6 +378,8 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
u32 plen, struct cifs_sb_info *cifs_sb,
bool unicode, struct cifs_open_info_data *data)
{
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
data->reparse.buf = buf;
/* See MS-FSCC 2.1.2 */
@@ -394,12 +396,13 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
case IO_REPARSE_TAG_LX_FIFO:
case IO_REPARSE_TAG_LX_CHR:
case IO_REPARSE_TAG_LX_BLK:
- return 0;
+ break;
default:
- cifs_dbg(VFS, "%s: unhandled reparse tag: 0x%08x\n",
- __func__, le32_to_cpu(buf->ReparseTag));
- return -EOPNOTSUPP;
+ cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
+ le32_to_cpu(buf->ReparseTag));
+ break;
}
+ return 0;
}
int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 9f5bc41433c1..11a1c53c64e0 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1106,6 +1106,8 @@ int smb2_rename_path(const unsigned int xid,
co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, from_name,
+ FIND_WR_WITH_DELETE, &cfile);
rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, NULL);
}
@@ -1149,6 +1151,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 322cabc69c6f..159a063de6dd 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -13,6 +13,7 @@
#include <linux/sort.h>
#include <crypto/aead.h>
#include <linux/fiemap.h>
+#include <linux/folio_queue.h>
#include <uapi/linux/magic.h>
#include "cifsfs.h"
#include "cifsglob.h"
@@ -301,7 +302,8 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
unsigned int /*enum smb3_rw_credits_trace*/ trace)
{
struct cifs_credits *credits = &subreq->credits;
- int new_val = DIV_ROUND_UP(subreq->subreq.len, SMB2_MAX_BUFFER_SIZE);
+ int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred,
+ SMB2_MAX_BUFFER_SIZE);
int scredits, in_flight;
if (!credits->value || credits->value == new_val)
@@ -316,7 +318,8 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
cifs_trace_rw_credits_no_adjust_up);
trace_smb3_too_many_credits(server->CurrentMid,
server->conn_id, server->hostname, 0, credits->value - new_val, 0);
- cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
+ cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
+ subreq->rreq->debug_id, subreq->subreq.debug_index,
credits->value, new_val);
return -EOPNOTSUPP;
@@ -338,8 +341,9 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
trace_smb3_reconnect_detected(server->CurrentMid,
server->conn_id, server->hostname, scredits,
credits->value - new_val, in_flight);
- cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
- credits->value - new_val);
+ cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
+ subreq->rreq->debug_id, subreq->subreq.debug_index,
+ credits->value - new_val);
return -EAGAIN;
}
@@ -3237,13 +3241,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
}
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
- loff_t offset, loff_t len, bool keep_size)
+ unsigned long long offset, unsigned long long len,
+ bool keep_size)
{
struct cifs_ses *ses = tcon->ses;
struct inode *inode = file_inode(file);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsFileInfo *cfile = file->private_data;
- unsigned long long new_size;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ unsigned long long i_size, new_size, remote_size;
long rc;
unsigned int xid;
@@ -3255,6 +3261,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
+ i_size = i_size_read(inode);
+ remote_size = ictx->remote_i_size;
+ if (offset + len >= remote_size && offset < i_size) {
+ unsigned long long top = umin(offset + len, i_size);
+
+ rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
+ if (rc < 0)
+ goto zero_range_exit;
+ }
+
/*
* We zero the range through ioctl, so we need remove the page caches
* first, otherwise the data may be inconsistent with the server.
@@ -3305,6 +3321,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
+ unsigned long long end = offset + len, i_size, remote_i_size;
long rc;
unsigned int xid;
__u8 set_sparse = 1;
@@ -3336,6 +3353,27 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
(char *)&fsctl_buf,
sizeof(struct file_zero_data_information),
CIFSMaxBufSize, NULL, NULL);
+
+ if (rc)
+ goto unlock;
+
+ /* If there's dirty data in the buffer that would extend the EOF if it
+ * were written, then we need to move the EOF marker over to the lower
+ * of the high end of the hole and the proposed EOF. The problem is
+ * that we locally hole-punch the tail of the dirty data, the proposed
+ * EOF update will end up in the wrong place.
+ */
+ i_size = i_size_read(inode);
+ remote_i_size = netfs_inode(inode)->remote_i_size;
+ if (end > remote_i_size && i_size > remote_i_size) {
+ unsigned long long extend_to = umin(end, i_size);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, extend_to);
+ if (rc >= 0)
+ netfs_inode(inode)->remote_i_size = extend_to;
+ }
+
+unlock:
filemap_invalidate_unlock(inode->i_mapping);
out:
inode_unlock(inode);
@@ -4356,30 +4394,86 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
}
/*
- * Clear a read buffer, discarding the folios which have XA_MARK_0 set.
+ * Clear a read buffer, discarding the folios which have the 1st mark set.
*/
-static void cifs_clear_xarray_buffer(struct xarray *buffer)
+static void cifs_clear_folioq_buffer(struct folio_queue *buffer)
{
+ struct folio_queue *folioq;
+
+ while ((folioq = buffer)) {
+ for (int s = 0; s < folioq_count(folioq); s++)
+ if (folioq_is_marked(folioq, s))
+ folio_put(folioq_folio(folioq, s));
+ buffer = folioq->next;
+ kfree(folioq);
+ }
+}
+
+/*
+ * Allocate buffer space into a folio queue.
+ */
+static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size)
+{
+ struct folio_queue *buffer = NULL, *tail = NULL, *p;
struct folio *folio;
+ unsigned int slot;
+
+ do {
+ if (!tail || folioq_full(tail)) {
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (!p)
+ goto nomem;
+ folioq_init(p);
+ if (tail) {
+ tail->next = p;
+ p->prev = tail;
+ } else {
+ buffer = p;
+ }
+ tail = p;
+ }
+
+ folio = folio_alloc(GFP_KERNEL|__GFP_HIGHMEM, 0);
+ if (!folio)
+ goto nomem;
+
+ slot = folioq_append_mark(tail, folio);
+ size -= folioq_folio_size(tail, slot);
+ } while (size > 0);
- XA_STATE(xas, buffer, 0);
+ return buffer;
- rcu_read_lock();
- xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) {
- folio_put(folio);
+nomem:
+ cifs_clear_folioq_buffer(buffer);
+ return NULL;
+}
+
+/*
+ * Copy data from an iterator to the folios in a folio queue buffer.
+ */
+static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
+ struct folio_queue *buffer)
+{
+ for (; buffer; buffer = buffer->next) {
+ for (int s = 0; s < folioq_count(buffer); s++) {
+ struct folio *folio = folioq_folio(buffer, s);
+ size_t part = folioq_folio_size(buffer, s);
+
+ part = umin(part, size);
+
+ if (copy_folio_from_iter(folio, 0, part, iter) != part)
+ return false;
+ size -= part;
+ }
}
- rcu_read_unlock();
- xa_destroy(buffer);
+ return true;
}
void
smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
{
- int i;
-
- for (i = 0; i < num_rqst; i++)
- if (!xa_empty(&rqst[i].rq_buffer))
- cifs_clear_xarray_buffer(&rqst[i].rq_buffer);
+ for (int i = 0; i < num_rqst; i++)
+ cifs_clear_folioq_buffer(rqst[i].rq_buffer);
}
/*
@@ -4400,53 +4494,32 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *new_rq, struct smb_rqst *old_rq)
{
struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
- struct page *page;
unsigned int orig_len = 0;
- int i, j;
int rc = -ENOMEM;
- for (i = 1; i < num_rqst; i++) {
+ for (int i = 1; i < num_rqst; i++) {
struct smb_rqst *old = &old_rq[i - 1];
struct smb_rqst *new = &new_rq[i];
- struct xarray *buffer = &new->rq_buffer;
- size_t size = iov_iter_count(&old->rq_iter), seg, copied = 0;
+ struct folio_queue *buffer;
+ size_t size = iov_iter_count(&old->rq_iter);
orig_len += smb_rqst_len(server, old);
new->rq_iov = old->rq_iov;
new->rq_nvec = old->rq_nvec;
- xa_init(buffer);
-
if (size > 0) {
- unsigned int npages = DIV_ROUND_UP(size, PAGE_SIZE);
-
- for (j = 0; j < npages; j++) {
- void *o;
-
- rc = -ENOMEM;
- page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!page)
- goto err_free;
- page->index = j;
- o = xa_store(buffer, j, page, GFP_KERNEL);
- if (xa_is_err(o)) {
- rc = xa_err(o);
- put_page(page);
- goto err_free;
- }
+ buffer = cifs_alloc_folioq_buffer(size);
+ if (!buffer)
+ goto err_free;
- xa_set_mark(buffer, j, XA_MARK_0);
+ new->rq_buffer = buffer;
+ iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE,
+ buffer, 0, 0, size);
- seg = min_t(size_t, size - copied, PAGE_SIZE);
- if (copy_page_from_iter(page, 0, seg, &old->rq_iter) != seg) {
- rc = -EFAULT;
- goto err_free;
- }
- copied += seg;
+ if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) {
+ rc = -EIO;
+ goto err_free;
}
- iov_iter_xarray(&new->rq_iter, ITER_SOURCE,
- buffer, 0, size);
- new->rq_iter_size = size;
}
}
@@ -4492,7 +4565,6 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
rqst.rq_nvec = 2;
if (iter) {
rqst.rq_iter = *iter;
- rqst.rq_iter_size = iov_iter_count(iter);
iter_size = iov_iter_count(iter);
}
@@ -4511,22 +4583,23 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
}
static int
-cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size,
- unsigned int skip, struct iov_iter *iter)
+cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
+ size_t skip, struct iov_iter *iter)
{
- struct page *page;
- unsigned long index;
-
- xa_for_each(pages, index, page) {
- size_t n, len = min_t(unsigned int, PAGE_SIZE - skip, data_size);
-
- n = copy_page_to_iter(page, skip, len, iter);
- if (n != len) {
- cifs_dbg(VFS, "%s: something went wrong\n", __func__);
- return -EIO;
+ for (; folioq; folioq = folioq->next) {
+ for (int s = 0; s < folioq_count(folioq); s++) {
+ struct folio *folio = folioq_folio(folioq, s);
+ size_t fsize = folio_size(folio);
+ size_t n, len = umin(fsize - skip, data_size);
+
+ n = copy_folio_to_iter(folio, skip, len, iter);
+ if (n != len) {
+ cifs_dbg(VFS, "%s: something went wrong\n", __func__);
+ return -EIO;
+ }
+ data_size -= n;
+ skip = 0;
}
- data_size -= n;
- skip = 0;
}
return 0;
@@ -4534,8 +4607,8 @@ cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size,
static int
handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
- char *buf, unsigned int buf_len, struct xarray *pages,
- unsigned int pages_len, bool is_offloaded)
+ char *buf, unsigned int buf_len, struct folio_queue *buffer,
+ unsigned int buffer_len, bool is_offloaded)
{
unsigned int data_offset;
unsigned int data_len;
@@ -4632,7 +4705,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
return 0;
}
- if (data_len > pages_len - pad_len) {
+ if (data_len > buffer_len - pad_len) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
if (is_offloaded)
@@ -4643,8 +4716,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
/* Copy the data to the output I/O iterator. */
- rdata->result = cifs_copy_pages_to_iter(pages, pages_len,
- cur_off, &rdata->subreq.io_iter);
+ rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len,
+ cur_off, &rdata->subreq.io_iter);
if (rdata->result != 0) {
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
@@ -4652,12 +4725,11 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
dequeue_mid(mid, rdata->result);
return 0;
}
- rdata->got_bytes = pages_len;
+ rdata->got_bytes = buffer_len;
} else if (buf_len >= data_offset + data_len) {
/* read response payload is in buf */
- WARN_ONCE(pages && !xa_empty(pages),
- "read data can be either in buf or in pages");
+ WARN_ONCE(buffer, "read data can be either in buf or in buffer");
length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
if (length < 0)
return length;
@@ -4683,7 +4755,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
struct smb2_decrypt_work {
struct work_struct decrypt;
struct TCP_Server_Info *server;
- struct xarray buffer;
+ struct folio_queue *buffer;
char *buf;
unsigned int len;
};
@@ -4697,7 +4769,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
struct mid_q_entry *mid;
struct iov_iter iter;
- iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, dw->len);
+ iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
&iter, true);
if (rc) {
@@ -4713,7 +4785,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->decrypted = true;
rc = handle_read_data(dw->server, mid, dw->buf,
dw->server->vals->read_rsp_size,
- &dw->buffer, dw->len,
+ dw->buffer, dw->len,
true);
if (rc >= 0) {
#ifdef CONFIG_CIFS_STATS2
@@ -4746,7 +4818,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
free_pages:
- cifs_clear_xarray_buffer(&dw->buffer);
+ cifs_clear_folioq_buffer(dw->buffer);
cifs_small_buf_release(dw->buf);
kfree(dw);
}
@@ -4756,20 +4828,17 @@ static int
receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
int *num_mids)
{
- struct page *page;
char *buf = server->smallbuf;
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
struct iov_iter iter;
- unsigned int len, npages;
+ unsigned int len;
unsigned int buflen = server->pdu_size;
int rc;
- int i = 0;
struct smb2_decrypt_work *dw;
dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
if (!dw)
return -ENOMEM;
- xa_init(&dw->buffer);
INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
dw->server = server;
@@ -4785,26 +4854,14 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
server->vals->read_rsp_size;
dw->len = len;
- npages = DIV_ROUND_UP(len, PAGE_SIZE);
+ len = round_up(dw->len, PAGE_SIZE);
rc = -ENOMEM;
- for (; i < npages; i++) {
- void *old;
-
- page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!page)
- goto discard_data;
- page->index = i;
- old = xa_store(&dw->buffer, i, page, GFP_KERNEL);
- if (xa_is_err(old)) {
- rc = xa_err(old);
- put_page(page);
- goto discard_data;
- }
- xa_set_mark(&dw->buffer, i, XA_MARK_0);
- }
+ dw->buffer = cifs_alloc_folioq_buffer(len);
+ if (!dw->buffer)
+ goto discard_data;
- iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, npages * PAGE_SIZE);
+ iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
/* Read the data into the buffer and clear excess bufferage. */
rc = cifs_read_iter_from_socket(server, &iter, dw->len);
@@ -4812,9 +4869,9 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
goto discard_data;
server->total_read += rc;
- if (rc < npages * PAGE_SIZE)
- iov_iter_zero(npages * PAGE_SIZE - rc, &iter);
- iov_iter_revert(&iter, npages * PAGE_SIZE);
+ if (rc < len)
+ iov_iter_zero(len - rc, &iter);
+ iov_iter_revert(&iter, len);
iov_iter_truncate(&iter, dw->len);
rc = cifs_discard_remaining_data(server);
@@ -4849,7 +4906,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
(*mid)->decrypted = true;
rc = handle_read_data(server, *mid, buf,
server->vals->read_rsp_size,
- &dw->buffer, dw->len, false);
+ dw->buffer, dw->len, false);
if (rc >= 0) {
if (server->ops->is_network_name_deleted) {
server->ops->is_network_name_deleted(buf,
@@ -4859,7 +4916,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
}
free_pages:
- cifs_clear_xarray_buffer(&dw->buffer);
+ cifs_clear_folioq_buffer(dw->buffer);
free_dw:
kfree(dw);
return rc;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 9a06b5594669..bb8ecbbe78af 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -82,6 +82,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
if (tcon->seal &&
(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
return 1;
+ if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
+ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ return 1;
return 0;
}
@@ -4438,7 +4441,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* If we want to do a RDMA write, fill in and append
* smbd_buffer_descriptor_v1 to the end of read request
*/
- if (smb3_use_rdma_offload(io_parms)) {
+ if (rdata && smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
@@ -4495,15 +4498,14 @@ static void smb2_readv_worker(struct work_struct *work)
struct cifs_io_subrequest *rdata =
container_of(work, struct cifs_io_subrequest, subreq.work);
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result, true);
+ netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
}
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
struct cifs_io_subrequest *rdata = mid->callback_data;
+ struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct TCP_Server_Info *server = rdata->server;
struct smb2_hdr *shdr =
@@ -4520,16 +4522,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->got_bytes) {
rqst.rq_iter = rdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter);
}
WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p",
rdata->server, mid->server);
- cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
+ cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
- rdata->subreq.len);
+ rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -4551,6 +4552,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
+ __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
/* reset bytes number since we can not check a sign */
@@ -4583,27 +4585,36 @@ smb2_readv_callback(struct mid_q_entry *mid)
rdata->subreq.debug_index,
rdata->xid,
rdata->req->cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, rdata->subreq.start,
- rdata->subreq.len, rdata->result);
+ tcon->tid, tcon->ses->Suid,
+ rdata->subreq.start + rdata->subreq.transferred,
+ rdata->subreq.len - rdata->subreq.transferred,
+ rdata->result);
} else
trace_smb3_read_done(rdata->rreq->debug_id,
rdata->subreq.debug_index,
rdata->xid,
rdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
- rdata->subreq.start, rdata->got_bytes);
+ rdata->subreq.start + rdata->subreq.transferred,
+ rdata->got_bytes);
if (rdata->result == -ENODATA) {
- /* We may have got an EOF error because fallocate
- * failed to enlarge the file.
- */
- if (rdata->subreq.start < rdata->subreq.rreq->i_size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
+ } else {
+ size_t trans = rdata->subreq.transferred + rdata->got_bytes;
+ if (trans < rdata->subreq.len &&
+ rdata->subreq.start + trans == ictx->remote_i_size) {
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
+ }
}
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight,
0, cifs_trace_rw_credits_read_response_clear);
rdata->credits.value = 0;
+ rdata->subreq.transferred += rdata->got_bytes;
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
@@ -4619,6 +4630,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
{
int rc, flags = 0;
char *buf;
+ struct netfs_io_subrequest *subreq = &rdata->subreq;
struct smb2_hdr *shdr;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
@@ -4629,15 +4641,15 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
int credit_request;
cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
- __func__, rdata->subreq.start, rdata->subreq.len);
+ __func__, subreq->start, subreq->len);
if (!rdata->server)
rdata->server = cifs_pick_channel(tcon->ses);
io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
io_parms.server = server = rdata->server;
- io_parms.offset = rdata->subreq.start;
- io_parms.length = rdata->subreq.len;
+ io_parms.offset = subreq->start + subreq->transferred;
+ io_parms.length = subreq->len - subreq->transferred;
io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
io_parms.pid = rdata->req->pid;
@@ -4652,11 +4664,13 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
rdata->iov[0].iov_base = buf;
rdata->iov[0].iov_len = total_len;
+ rdata->got_bytes = 0;
+ rdata->result = 0;
shdr = (struct smb2_hdr *)buf;
if (rdata->credits.value > 0) {
- shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len,
+ shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
SMB2_MAX_BUFFER_SIZE));
credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
if (server->credits >= server->max_credits)
@@ -4680,11 +4694,12 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
if (rc) {
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
trace_smb3_read_err(rdata->rreq->debug_id,
- rdata->subreq.debug_index,
+ subreq->debug_index,
rdata->xid, io_parms.persistent_fid,
io_parms.tcon->tid,
io_parms.tcon->ses->Suid,
- io_parms.offset, io_parms.length, rc);
+ io_parms.offset,
+ subreq->len - subreq->transferred, rc);
}
async_readv_out:
@@ -4867,6 +4882,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
cifs_write_subrequest_terminated(wdata, result ?: written, true);
release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
@@ -4911,6 +4927,13 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
if (rc)
goto out;
+ rqst.rq_iov = iov;
+ rqst.rq_iter = wdata->subreq.io_iter;
+
+ rqst.rq_iov[0].iov_len = total_len - 1;
+ rqst.rq_iov[0].iov_base = (char *)req;
+ rqst.rq_nvec += 1;
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -4922,6 +4945,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = SMB2_CHANNEL_NONE;
+ req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer));
@@ -4941,7 +4965,6 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
*/
if (smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
- size_t data_size = iov_iter_count(&wdata->subreq.io_iter);
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
@@ -4950,9 +4973,10 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
rc = -EAGAIN;
goto async_writev_out;
}
+ /* For RDMA read, I/O size is in RemainingBytes not in Length */
+ req->RemainingBytes = req->Length;
req->Length = 0;
req->DataOffset = 0;
- req->RemainingBytes = cpu_to_le32(data_size);
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
req->Channel = SMB2_CHANNEL_RDMA_V1;
@@ -4964,31 +4988,22 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);
+
+ rqst.rq_iov[0].iov_len += sizeof(*v1);
+
+ /*
+ * We keep wdata->subreq.io_iter,
+ * but we have to truncate rqst.rq_iter
+ */
+ iov_iter_truncate(&rqst.rq_iter, 0);
}
#endif
- iov[0].iov_len = total_len - 1;
- iov[0].iov_base = (char *)req;
- rqst.rq_iov = iov;
- rqst.rq_nvec = 1;
- rqst.rq_iter = wdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
smb2_set_replay(server, &rqst);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr)
- iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
-#endif
- cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
- io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter));
-#ifdef CONFIG_CIFS_SMB_DIRECT
- /* For RDMA read, I/O size is in RemainingBytes not in Length */
- if (!wdata->mr)
- req->Length = cpu_to_le32(io_parms->length);
-#else
- req->Length = cpu_to_le32(io_parms->length);
-#endif
+ cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
+ io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
if (wdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index d74e829de51c..80262a36030f 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/folio_queue.h>
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
@@ -406,7 +407,7 @@ static void smbd_post_send_credits(struct work_struct *work)
else
response = get_empty_queue_buffer(info);
if (!response) {
- /* now switch to emtpy packet queue */
+ /* now switch to empty packet queue */
if (use_receive_queue) {
use_receive_queue = 0;
continue;
@@ -618,7 +619,7 @@ out:
/*
* Test if FRWR (Fast Registration Work Requests) is supported on the device
- * This implementation requries FRWR on RDMA read/write
+ * This implementation requires FRWR on RDMA read/write
* return value: true if it is supported
*/
static bool frwr_is_supported(struct ib_device_attr *attrs)
@@ -2177,7 +2178,7 @@ cleanup_entries:
* MR available in the list. It may access the list while the
* smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
* as they never modify the same places. However, there may be several CPUs
- * issueing I/O trying to get MR at the same time, mr_list_lock is used to
+ * issuing I/O trying to get MR at the same time, mr_list_lock is used to
* protect this situation.
*/
static struct smbd_mr *get_mr(struct smbd_connection *info)
@@ -2311,7 +2312,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
/*
* There is no need for waiting for complemtion on ib_post_send
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
- * on the next ib_post_send when we actaully send I/O to remote peer
+ * on the next ib_post_send when we actually send I/O to remote peer
*/
rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc)
@@ -2463,6 +2464,8 @@ static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
start = 0;
}
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
return ret;
}
@@ -2519,50 +2522,65 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
start = 0;
}
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
return ret;
}
/*
- * Extract folio fragments from an XARRAY-class iterator and add them to an
- * RDMA list. The folios are not pinned.
+ * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
+ * list. The folios are not pinned.
*/
-static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter,
+static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
struct smb_extract_to_rdma *rdma,
ssize_t maxsize)
{
- struct xarray *xa = iter->xarray;
- struct folio *folio;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t index = start / PAGE_SIZE;
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
ssize_t ret = 0;
- size_t off, len;
- XA_STATE(xas, xa, index);
+ size_t offset = iter->iov_offset;
- rcu_read_lock();
+ BUG_ON(!folioq);
- xas_for_each(&xas, folio, ULONG_MAX) {
- if (xas_retry(&xas, folio))
- continue;
- if (WARN_ON(xa_is_value(folio)))
- break;
- if (WARN_ON(folio_test_hugetlb(folio)))
- break;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ if (WARN_ON_ONCE(!folioq))
+ return -EIO;
+ slot = 0;
+ }
- off = offset_in_folio(folio, start);
- len = min_t(size_t, maxsize, folio_size(folio) - off);
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t fsize = folioq_folio_size(folioq, slot);
- if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) {
- rcu_read_unlock();
- return -EIO;
+ if (offset < fsize) {
+ size_t part = umin(maxsize - ret, fsize - offset);
+
+ if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
+ return -EIO;
+
+ offset += part;
+ ret += part;
}
- maxsize -= len;
- ret += len;
- if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
- break;
- }
+ if (offset >= fsize) {
+ offset = 0;
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (!folioq->next) {
+ WARN_ON_ONCE(ret < iter->count);
+ break;
+ }
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ } while (rdma->nr_sge < rdma->max_sge || maxsize > 0);
- rcu_read_unlock();
+ iter->folioq = folioq;
+ iter->folioq_slot = slot;
+ iter->iov_offset = offset;
+ iter->count -= ret;
return ret;
}
@@ -2590,17 +2608,15 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
case ITER_KVEC:
ret = smb_extract_kvec_to_rdma(iter, rdma, len);
break;
- case ITER_XARRAY:
- ret = smb_extract_xarray_to_rdma(iter, rdma, len);
+ case ITER_FOLIOQ:
+ ret = smb_extract_folioq_to_rdma(iter, rdma, len);
break;
default:
WARN_ON_ONCE(1);
return -EIO;
}
- if (ret > 0) {
- iov_iter_advance(iter, ret);
- } else if (ret < 0) {
+ if (ret < 0) {
while (rdma->nr_sge > before) {
struct ib_sge *sge = &rdma->sge[rdma->nr_sge--];
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 0f0c10c7ada7..8e9964001e2a 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -30,6 +30,7 @@
EM(cifs_trace_rw_credits_old_session, "old-session") \
EM(cifs_trace_rw_credits_read_response_add, "rd-resp-add") \
EM(cifs_trace_rw_credits_read_response_clear, "rd-resp-clr") \
+ EM(cifs_trace_rw_credits_read_resubmit, "rd-resubmit") \
EM(cifs_trace_rw_credits_read_submit, "rd-submit ") \
EM(cifs_trace_rw_credits_write_prepare, "wr-prepare ") \
EM(cifs_trace_rw_credits_write_response_add, "wr-resp-add") \
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index adfe0d058701..6e68aaf5bd20 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -1289,7 +1289,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
out:
/*
* This will dequeue all mids. After this it is important that the
- * demultiplex_thread will not process any of these mids any futher.
+ * demultiplex_thread will not process any of these mids any further.
* This is prevented above by using a noop callback that will not
* wake this thread except for the very last PDU.
*/
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index c3ee42188d25..c769f9dbc0b4 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -1216,6 +1216,8 @@ struct create_context {
);
__u8 Buffer[];
} __packed;
+static_assert(offsetof(struct create_context, Buffer) == sizeof(struct create_context_hdr),
+ "struct member likely outside of __struct_group()");
struct smb2_create_req {
struct smb2_hdr hdr;
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 09e1e7771592..7889df8112b4 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -165,11 +165,43 @@ void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
up_read(&conn_list_lock);
}
-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
{
wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
}
+int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
+{
+ struct ksmbd_conn *conn;
+ int rc, retry_count = 0, max_timeout = 120;
+ int rcount = 1;
+
+retry_idle:
+ if (retry_count >= max_timeout)
+ return -EIO;
+
+ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
+ if (conn == curr_conn)
+ rcount = 2;
+ if (atomic_read(&conn->req_running) >= rcount) {
+ rc = wait_event_timeout(conn->req_running_q,
+ atomic_read(&conn->req_running) < rcount,
+ HZ);
+ if (!rc) {
+ up_read(&conn_list_lock);
+ retry_count++;
+ goto retry_idle;
+ }
+ }
+ }
+ }
+ up_read(&conn_list_lock);
+
+ return 0;
+}
+
int ksmbd_conn_write(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 5c2845e47cf2..5b947175c048 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -145,7 +145,8 @@ extern struct list_head conn_list;
extern struct rw_semaphore conn_list_lock;
bool ksmbd_conn_alive(struct ksmbd_conn *conn);
-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
+int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id);
struct ksmbd_conn *ksmbd_conn_alloc(void);
void ksmbd_conn_free(struct ksmbd_conn *conn);
bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index e0a6b758094f..d8d03070ae44 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -15,6 +15,7 @@
#include "share_config.h"
#include "user_config.h"
#include "user_session.h"
+#include "../connection.h"
#include "../transport_ipc.h"
#include "../misc.h"
@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share,
return 0;
}
-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
const char *name)
{
struct ksmbd_share_config_response *resp;
struct ksmbd_share_config *share = NULL;
struct ksmbd_share_config *lookup;
+ struct unicode_map *um = work->conn->um;
int ret;
resp = ksmbd_ipc_share_config_request(name);
@@ -181,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
KSMBD_SHARE_CONFIG_VETO_LIST(resp),
resp->veto_list_sz);
if (!ret && share->path) {
+ if (__ksmbd_override_fsids(work, share)) {
+ kill_share(share);
+ share = NULL;
+ goto out;
+ }
+
ret = kern_path(share->path, 0, &share->vfs_path);
+ ksmbd_revert_fsids(work);
if (ret) {
ksmbd_debug(SMB, "failed to access '%s'\n",
share->path);
@@ -214,7 +223,7 @@ out:
return share;
}
-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
const char *name)
{
struct ksmbd_share_config *share;
@@ -227,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
if (share)
return share;
- return share_config_request(um, name);
+ return share_config_request(work, name);
}
bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
index 5f591751b923..d4ac2dd4de20 100644
--- a/fs/smb/server/mgmt/share_config.h
+++ b/fs/smb/server/mgmt/share_config.h
@@ -11,6 +11,8 @@
#include <linux/path.h>
#include <linux/unicode.h>
+struct ksmbd_work;
+
struct ksmbd_share_config {
char *name;
char *path;
@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
__ksmbd_share_config_put(share);
}
-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
const char *name);
bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
const char *filename);
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index d2c81a8a11dd..94a52a75014a 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -16,17 +16,18 @@
#include "user_session.h"
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
- const char *share_name)
+ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
{
struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
struct ksmbd_tree_connect_response *resp = NULL;
struct ksmbd_share_config *sc;
struct ksmbd_tree_connect *tree_conn = NULL;
struct sockaddr *peer_addr;
+ struct ksmbd_conn *conn = work->conn;
+ struct ksmbd_session *sess = work->sess;
int ret;
- sc = ksmbd_share_config_get(conn->um, share_name);
+ sc = ksmbd_share_config_get(work, share_name);
if (!sc)
return status;
@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
struct ksmbd_share_config *new_sc;
ksmbd_share_config_del(sc);
- new_sc = ksmbd_share_config_get(conn->um, share_name);
+ new_sc = ksmbd_share_config_get(work, share_name);
if (!new_sc) {
pr_err("Failed to update stale share config\n");
status.ret = -ESTALE;
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index 6377a70b811c..a42cdd051041 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -13,6 +13,7 @@
struct ksmbd_share_config;
struct ksmbd_user;
struct ksmbd_conn;
+struct ksmbd_work;
enum {
TREE_NEW = 0,
@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
struct ksmbd_session;
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
- const char *share_name);
+ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 162a12685d2c..99416ce9f501 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -311,6 +311,7 @@ void destroy_previous_session(struct ksmbd_conn *conn,
{
struct ksmbd_session *prev_sess;
struct ksmbd_user *prev_user;
+ int err;
down_write(&sessions_table_lock);
down_write(&conn->session_lock);
@@ -325,8 +326,16 @@ void destroy_previous_session(struct ksmbd_conn *conn,
memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
goto out;
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
+ err = ksmbd_conn_wait_idle_sess_id(conn, id);
+ if (err) {
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ goto out;
+ }
+
ksmbd_destroy_file_table(&prev_sess->file_table);
prev_sess->state = SMB2_SESSION_EXPIRED;
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
ksmbd_launch_ksmbd_durable_scavenger();
out:
up_write(&conn->session_lock);
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index a8f52c4ebbda..e546ffa57b55 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -1510,7 +1510,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
* parse_lease_state() - parse lease context containted in file open request
* @open_req: buffer containing smb2 file open(create) request
*
- * Return: oplock state, -ENOENT if create lease context not found
+ * Return: allocated lease context object on success, otherwise NULL
*/
struct lease_ctx_info *parse_lease_state(void *open_req)
{
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 37a39ab4ee65..8bdc59251418 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -519,7 +519,7 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
* smb2_allocate_rsp_buf() - allocate smb2 response buffer
* @work: smb work containing smb request buffer
*
- * Return: 0 on success, otherwise -ENOMEM
+ * Return: 0 on success, otherwise error
*/
int smb2_allocate_rsp_buf(struct ksmbd_work *work)
{
@@ -1370,7 +1370,8 @@ static int ntlm_negotiate(struct ksmbd_work *work,
}
sz = le16_to_cpu(rsp->SecurityBufferOffset);
- memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+ unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len,
+ /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
out:
@@ -1453,7 +1454,9 @@ static int ntlm_authenticate(struct ksmbd_work *work,
return -ENOMEM;
sz = le16_to_cpu(rsp->SecurityBufferOffset);
- memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+ unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob,
+ spnego_blob_len,
+ /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
kfree(spnego_blob);
}
@@ -1687,6 +1690,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
rc = ksmbd_session_register(conn, sess);
if (rc)
goto out_err;
+
+ conn->binding = false;
} else if (conn->dialect >= SMB30_PROT_ID &&
(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
@@ -1765,6 +1770,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
sess = NULL;
goto out_err;
}
+
+ conn->binding = false;
}
work->sess = sess;
@@ -1955,7 +1962,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
name, treename);
- status = ksmbd_tree_conn_connect(conn, sess, name);
+ status = ksmbd_tree_conn_connect(work, name);
if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
else
@@ -2210,7 +2217,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_conn_unlock(conn);
ksmbd_close_session_fds(work);
- ksmbd_conn_wait_idle(conn, sess_id);
+ ksmbd_conn_wait_idle(conn);
/*
* Re-lookup session to validate if session is deleted
@@ -2767,8 +2774,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
}
}
- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
+ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
dh_info->CreateGuid =
durable_v2_blob->CreateGuid;
dh_info->persistent =
@@ -2788,8 +2795,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
+ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
ksmbd_debug(SMB, "Request for durable open\n");
dh_info->type = dh_idx;
}
@@ -3093,7 +3100,6 @@ int smb2_open(struct ksmbd_work *work)
goto err_out;
}
- file_present = true;
idmap = mnt_idmap(path.mnt);
} else {
if (rc != -ENOENT)
@@ -3411,7 +3417,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
} else {
- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+ if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) {
if (S_ISDIR(file_inode(filp)->i_mode)) {
lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE;
lc->is_dir = true;
@@ -3710,7 +3716,7 @@ err_out2:
kfree(name);
kfree(lc);
- return 0;
+ return rc;
}
static int readdir_info_level_struct_sz(int info_level)
@@ -4406,7 +4412,8 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->OutputBufferLength = cpu_to_le32(0);
rsp->Buffer[0] = 0;
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
- sizeof(struct smb2_query_directory_rsp));
+ offsetof(struct smb2_query_directory_rsp, Buffer)
+ + 1);
if (rc)
goto err_out;
} else {
@@ -5357,7 +5364,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
"NTFS", PATH_MAX, conn->local_nls, 0);
len = len * 2;
info->FileSystemNameLen = cpu_to_le32(len);
- sz = sizeof(struct filesystem_attribute_info) - 2 + len;
+ sz = sizeof(struct filesystem_attribute_info) + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
break;
}
@@ -5383,7 +5390,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
len = len * 2;
info->VolumeLabelSize = cpu_to_le32(len);
info->Reserved = 0;
- sz = sizeof(struct filesystem_vol_info) - 2 + len;
+ sz = sizeof(struct filesystem_vol_info) + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
break;
}
@@ -5596,6 +5603,11 @@ int smb2_query_info(struct ksmbd_work *work)
ksmbd_debug(SMB, "GOT query info request\n");
+ if (ksmbd_override_fsids(work)) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
@@ -5614,6 +5626,7 @@ int smb2_query_info(struct ksmbd_work *work)
req->InfoType);
rc = -EOPNOTSUPP;
}
+ ksmbd_revert_fsids(work);
if (!rc) {
rsp->StructureSize = cpu_to_le16(9);
@@ -5623,6 +5636,7 @@ int smb2_query_info(struct ksmbd_work *work)
le32_to_cpu(rsp->OutputBufferLength));
}
+err_out:
if (rc < 0) {
if (rc == -EACCES)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 474dadf6b7b8..13818ecb6e1b 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -732,10 +732,10 @@ bool is_asterisk(char *p)
return p && p[0] == '*';
}
-int ksmbd_override_fsids(struct ksmbd_work *work)
+int __ksmbd_override_fsids(struct ksmbd_work *work,
+ struct ksmbd_share_config *share)
{
struct ksmbd_session *sess = work->sess;
- struct ksmbd_share_config *share = work->tcon->share_conf;
struct cred *cred;
struct group_info *gi;
unsigned int uid;
@@ -775,6 +775,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
return 0;
}
+int ksmbd_override_fsids(struct ksmbd_work *work)
+{
+ return __ksmbd_override_fsids(work, work->tcon->share_conf);
+}
+
void ksmbd_revert_fsids(struct ksmbd_work *work)
{
const struct cred *cred;
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index f1092519c0c2..cc1d6dfe29d5 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -213,7 +213,7 @@ struct filesystem_attribute_info {
__le32 Attributes;
__le32 MaxPathNameComponentLength;
__le32 FileSystemNameLen;
- __le16 FileSystemName[1]; /* do not have to save this - get subset? */
+ __le16 FileSystemName[]; /* do not have to save this - get subset? */
} __packed;
struct filesystem_device_info {
@@ -226,7 +226,7 @@ struct filesystem_vol_info {
__le32 SerialNumber;
__le32 VolumeLabelSize;
__le16 Reserved;
- __le16 VolumeLabel[1];
+ __le16 VolumeLabel[];
} __packed;
struct filesystem_info {
@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn,
int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
+int __ksmbd_override_fsids(struct ksmbd_work *work,
+ struct ksmbd_share_config *share);
int ksmbd_override_fsids(struct ksmbd_work *work);
void ksmbd_revert_fsids(struct ksmbd_work *work);
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index a84788396daa..aaed9e293b2e 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -624,8 +624,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
for_each_netdev(&init_net, netdev) {
if (netif_is_bridge_port(netdev))
continue;
- if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
+ if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
+ rtnl_unlock();
return -ENOMEM;
+ }
}
rtnl_unlock();
bind_additional_ifaces = 1;
diff --git a/fs/smb/server/xattr.h b/fs/smb/server/xattr.h
index 16499ca5c82d..fa3e27d6971b 100644
--- a/fs/smb/server/xattr.h
+++ b/fs/smb/server/xattr.h
@@ -76,7 +76,7 @@ struct xattr_acl_entry {
struct xattr_smb_acl {
int count;
int next;
- struct xattr_acl_entry entries[];
+ struct xattr_acl_entry entries[] __counted_by(count);
};
/* 64bytes hash in xattr_ntacl is computed with sha256 */