aboutsummaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 019d6f49ff20..ce7726a04883 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -56,7 +56,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
}
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
- struct task_struct *task, bool cancel_all)
+ struct io_uring_task *tctx, bool cancel_all)
{
struct hlist_node *tmp;
struct io_kiocb *req;
@@ -70,7 +70,7 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
struct io_uring_cmd);
struct file *file = req->file;
- if (!cancel_all && req->task != task)
+ if (!cancel_all && req->tctx != tctx)
continue;
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
@@ -160,7 +160,7 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
+void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
unsigned issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
@@ -222,14 +222,18 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
struct io_ring_ctx *ctx = req->ctx;
- u16 index;
+ struct io_rsrc_node *node;
+ u16 index = READ_ONCE(sqe->buf_index);
- req->buf_index = READ_ONCE(sqe->buf_index);
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ node = io_rsrc_node_lookup(&ctx->buf_table, index);
+ if (unlikely(!node))
return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
+ /*
+ * Pi node upfront, prior to io_uring_cmd_import_fixed()
+ * being called. This prevents destruction of the mapped buffer
+ * we'll need at actual import time.
+ */
+ io_req_assign_buf_node(req, node);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
@@ -285,8 +289,13 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_rsrc_node *node = req->buf_node;
- return io_import_fixed(rw, iter, req->imu, ubuf, len);
+ /* Must have had rsrc_node assigned at prep time */
+ if (node)
+ return io_import_fixed(rw, iter, node->buf, ubuf, len);
+
+ return -EFAULT;
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);