From fc68fcda049108478ee4704d8a3ad3e05cc72fd0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 11 Sep 2023 13:35:42 -0600 Subject: io_uring/rw: add support for IORING_OP_READ_MULTISHOT This behaves like IORING_OP_READ, except: 1) It only supports pollable files (eg pipes, sockets, etc). Note that for sockets, you probably want to use recv/recvmsg with multishot instead. 2) It supports multishot mode, meaning it will repeatedly trigger a read and fill a buffer when data is available. This allows similar use to recv/recvmsg but on non-sockets, where a single request will repeatedly post a CQE whenever data is read from it. 3) Because of #2, it must be used with provided buffers. This is uniformly true across any request type that supports multishot and transfers data, with the reason being that it's obviously not possible to pass in a single buffer for the data, as multiple reads may very well trigger before an application has a chance to process previous CQEs and the data passed from them. Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ce..d127948b0d8a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -240,6 +240,7 @@ enum io_uring_op { IORING_OP_URING_CMD, IORING_OP_SEND_ZC, IORING_OP_SENDMSG_ZC, + IORING_OP_READ_MULTISHOT, /* this goes last, obviously */ IORING_OP_LAST, -- cgit From f31ecf671ddc498f20219453395794ff2383e06b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 10 Jul 2023 16:14:37 -0600 Subject: io_uring: add IORING_OP_WAITID support This adds support for an async version of waitid(2), in a fully async version. If an event isn't immediately available, wait for a callback to trigger a retry. The format of the sqe is as follows: sqe->len The 'which', the idtype being queried/waited for. sqe->fd The 'pid' (or id) being waited for. sqe->file_index The 'options' being set. sqe->addr2 A pointer to siginfo_t, if any, being filled in. buf_index, add3, and waitid_flags are reserved/unused for now. waitid_flags will be used for options for this request type. One interesting use case may be to add multi-shot support, so that the request stays armed and posts a notification every time a monitored process state change occurs. Note that this does not support rusage, on Arnd's recommendation. See the waitid(2) man page for details on the arguments. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 2 + include/uapi/linux/io_uring.h | 2 + io_uring/Makefile | 3 +- io_uring/cancel.c | 5 + io_uring/io_uring.c | 3 + io_uring/opdef.c | 9 + io_uring/waitid.c | 372 +++++++++++++++++++++++++++++++++++++++++ io_uring/waitid.h | 15 ++ 8 files changed, 410 insertions(+), 1 deletion(-) create mode 100644 io_uring/waitid.c create mode 100644 io_uring/waitid.h (limited to 'include/uapi/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 13d19b9be9f4..fe1c5d4ec56c 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -313,6 +313,8 @@ struct io_ring_ctx { struct list_head cq_overflow_list; struct io_hash_table cancel_table; + struct hlist_head waitid_list; + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ struct io_sq_data *sq_data; /* if using sq thread polling */ diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d127948b0d8a..683ac2b74721 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -65,6 +65,7 @@ struct io_uring_sqe { __u32 xattr_flags; __u32 msg_ring_flags; __u32 uring_cmd_flags; + __u32 waitid_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -241,6 +242,7 @@ enum io_uring_op { IORING_OP_SEND_ZC, IORING_OP_SENDMSG_ZC, IORING_OP_READ_MULTISHOT, + IORING_OP_WAITID, /* this goes last, obviously */ IORING_OP_LAST, diff --git a/io_uring/Makefile b/io_uring/Makefile index 8cc8e5387a75..7bd64e442567 100644 --- a/io_uring/Makefile +++ b/io_uring/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \ openclose.o uring_cmd.o epoll.o \ statx.o net.o msg_ring.o timeout.o \ sqpoll.o fdinfo.o tctx.o poll.o \ - cancel.o kbuf.o rsrc.o rw.o opdef.o notif.o + cancel.o kbuf.o rsrc.o rw.o opdef.o \ + notif.o waitid.o obj-$(CONFIG_IO_WQ) += io-wq.o diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 7b23607cf4af..eb77a51c5a79 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -15,6 +15,7 @@ #include "tctx.h" #include "poll.h" #include "timeout.h" +#include "waitid.h" #include "cancel.h" struct io_cancel { @@ -119,6 +120,10 @@ int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, if (ret != -ENOENT) return ret; + ret = io_waitid_cancel(ctx, cd, issue_flags); + if (ret != -ENOENT) + return ret; + spin_lock(&ctx->completion_lock); if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) ret = io_timeout_cancel(ctx, cd); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 783ed0fff71b..2dff4772bf14 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -92,6 +92,7 @@ #include "cancel.h" #include "net.h" #include "notif.h" +#include "waitid.h" #include "timeout.h" #include "poll.h" @@ -348,6 +349,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->tctx_list); ctx->submit_state.free_list.next = NULL; INIT_WQ_LIST(&ctx->locked_free_list); + INIT_HLIST_HEAD(&ctx->waitid_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); INIT_WQ_LIST(&ctx->submit_state.compl_reqs); return ctx; @@ -3303,6 +3305,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ret |= io_cancel_defer_files(ctx, task, cancel_all); mutex_lock(&ctx->uring_lock); ret |= io_poll_remove_all(ctx, task, cancel_all); + ret |= io_waitid_remove_all(ctx, task, cancel_all); mutex_unlock(&ctx->uring_lock); ret |= io_kill_timeouts(ctx, task, cancel_all); if (task) diff --git a/io_uring/opdef.c b/io_uring/opdef.c index a3fb1f9b3998..aadcbf7136b0 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -33,6 +33,7 @@ #include "poll.h" #include "cancel.h" #include "rw.h" +#include "waitid.h" static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags) { @@ -439,6 +440,10 @@ const struct io_issue_def io_issue_defs[] = { .prep = io_read_mshot_prep, .issue = io_read_mshot, }, + [IORING_OP_WAITID] = { + .prep = io_waitid_prep, + .issue = io_waitid, + }, }; const struct io_cold_def io_cold_defs[] = { @@ -661,6 +666,10 @@ const struct io_cold_def io_cold_defs[] = { [IORING_OP_READ_MULTISHOT] = { .name = "READ_MULTISHOT", }, + [IORING_OP_WAITID] = { + .name = "WAITID", + .async_size = sizeof(struct io_waitid_async), + }, }; const char *io_uring_get_opcode(u8 opcode) diff --git a/io_uring/waitid.c b/io_uring/waitid.c new file mode 100644 index 000000000000..6f851978606d --- /dev/null +++ b/io_uring/waitid.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for async notification of waitid + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "io_uring.h" +#include "cancel.h" +#include "waitid.h" +#include "../kernel/exit.h" + +static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts); + +#define IO_WAITID_CANCEL_FLAG BIT(31) +#define IO_WAITID_REF_MASK GENMASK(30, 0) + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo __user *infop; + struct waitid_info info; +}; + +static void io_waitid_free(struct io_kiocb *req) +{ + struct io_waitid_async *iwa = req->async_data; + + put_pid(iwa->wo.wo_pid); + kfree(req->async_data); + req->async_data = NULL; + req->flags &= ~REQ_F_ASYNC_DATA; +} + +#ifdef CONFIG_COMPAT +static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo) +{ + struct compat_siginfo __user *infop; + bool ret; + + infop = (struct compat_siginfo __user *) iw->infop; + + if (!user_write_access_begin(infop, sizeof(*infop))) + return false; + + unsafe_put_user(signo, &infop->si_signo, Efault); + unsafe_put_user(0, &infop->si_errno, Efault); + unsafe_put_user(iw->info.cause, &infop->si_code, Efault); + unsafe_put_user(iw->info.pid, &infop->si_pid, Efault); + unsafe_put_user(iw->info.uid, &infop->si_uid, Efault); + unsafe_put_user(iw->info.status, &infop->si_status, Efault); + ret = true; +done: + user_write_access_end(); + return ret; +Efault: + ret = false; + goto done; +} +#endif + +static bool io_waitid_copy_si(struct io_kiocb *req, int signo) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + bool ret; + + if (!iw->infop) + return true; + +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + return io_waitid_compat_copy_si(iw, signo); +#endif + + if (!user_write_access_begin(iw->infop, sizeof(*iw->infop))) + return false; + + unsafe_put_user(signo, &iw->infop->si_signo, Efault); + unsafe_put_user(0, &iw->infop->si_errno, Efault); + unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault); + unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault); + unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault); + unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault); + ret = true; +done: + user_write_access_end(); + return ret; +Efault: + ret = false; + goto done; +} + +static int io_waitid_finish(struct io_kiocb *req, int ret) +{ + int signo = 0; + + if (ret > 0) { + signo = SIGCHLD; + ret = 0; + } + + if (!io_waitid_copy_si(req, signo)) + ret = -EFAULT; + io_waitid_free(req); + return ret; +} + +static void io_waitid_complete(struct io_kiocb *req, int ret) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_tw_state ts = { .locked = true }; + + /* anyone completing better be holding a reference */ + WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); + + lockdep_assert_held(&req->ctx->uring_lock); + + /* + * Did cancel find it meanwhile? + */ + if (hlist_unhashed(&req->hash_node)) + return; + + hlist_del_init(&req->hash_node); + + ret = io_waitid_finish(req, ret); + if (ret < 0) + req_set_fail(req); + io_req_set_res(req, ret, 0); + io_req_task_complete(req, &ts); +} + +static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_waitid_async *iwa = req->async_data; + + /* + * Mark us canceled regardless of ownership. This will prevent a + * potential retry from a spurious wakeup. + */ + atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs); + + /* claim ownership */ + if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) + return false; + + spin_lock_irq(&iw->head->lock); + list_del_init(&iwa->wo.child_wait.entry); + spin_unlock_irq(&iw->head->lock); + io_waitid_complete(req, -ECANCELED); + return true; +} + +int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, + unsigned int issue_flags) +{ + struct hlist_node *tmp; + struct io_kiocb *req; + int nr = 0; + + if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED)) + return -ENOENT; + + io_ring_submit_lock(ctx, issue_flags); + hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { + if (req->cqe.user_data != cd->data && + !(cd->flags & IORING_ASYNC_CANCEL_ANY)) + continue; + if (__io_waitid_cancel(ctx, req)) + nr++; + if (!(cd->flags & IORING_ASYNC_CANCEL_ALL)) + break; + } + io_ring_submit_unlock(ctx, issue_flags); + + if (nr) + return nr; + + return -ENOENT; +} + +bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, + bool cancel_all) +{ + struct hlist_node *tmp; + struct io_kiocb *req; + bool found = false; + + lockdep_assert_held(&ctx->uring_lock); + + hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { + if (!io_match_task_safe(req, task, cancel_all)) + continue; + __io_waitid_cancel(ctx, req); + found = true; + } + + return found; +} + +static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_waitid_async *iwa = req->async_data; + + if (!atomic_sub_return(1, &iw->refs)) + return false; + + /* + * Wakeup triggered, racing with us. It was prevented from + * completing because of that, queue up the tw to do that. + */ + req->io_task_work.func = io_waitid_cb; + io_req_task_work_add(req); + remove_wait_queue(iw->head, &iwa->wo.child_wait); + return true; +} + +static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts) +{ + struct io_waitid_async *iwa = req->async_data; + struct io_ring_ctx *ctx = req->ctx; + int ret; + + io_tw_lock(ctx, ts); + + ret = __do_wait(&iwa->wo); + + /* + * If we get -ERESTARTSYS here, we need to re-arm and check again + * to ensure we get another callback. If the retry works, then we can + * just remove ourselves from the waitqueue again and finish the + * request. + */ + if (unlikely(ret == -ERESTARTSYS)) { + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + + /* Don't retry if cancel found it meanwhile */ + ret = -ECANCELED; + if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) { + iw->head = ¤t->signal->wait_chldexit; + add_wait_queue(iw->head, &iwa->wo.child_wait); + ret = __do_wait(&iwa->wo); + if (ret == -ERESTARTSYS) { + /* retry armed, drop our ref */ + io_waitid_drop_issue_ref(req); + return; + } + + remove_wait_queue(iw->head, &iwa->wo.child_wait); + } + } + + io_waitid_complete(req, ret); +} + +static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); + struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo); + struct io_kiocb *req = iwa->req; + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct task_struct *p = key; + + if (!pid_child_should_wake(wo, p)) + return 0; + + /* cancel is in progress */ + if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) + return 1; + + req->io_task_work.func = io_waitid_cb; + io_req_task_work_add(req); + list_del_init(&wait->entry); + return 1; +} + +int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + + if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) + return -EINVAL; + + iw->which = READ_ONCE(sqe->len); + iw->upid = READ_ONCE(sqe->fd); + iw->options = READ_ONCE(sqe->file_index); + iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + return 0; +} + +int io_waitid(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_ring_ctx *ctx = req->ctx; + struct io_waitid_async *iwa; + int ret; + + if (io_alloc_async_data(req)) + return -ENOMEM; + + iwa = req->async_data; + iwa->req = req; + + ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, + iw->options, NULL); + if (ret) + goto done; + + /* + * Mark the request as busy upfront, in case we're racing with the + * wakeup. If we are, then we'll notice when we drop this initial + * reference again after arming. + */ + atomic_set(&iw->refs, 1); + + /* + * Cancel must hold the ctx lock, so there's no risk of cancelation + * finding us until a) we remain on the list, and b) the lock is + * dropped. We only need to worry about racing with the wakeup + * callback. + */ + io_ring_submit_lock(ctx, issue_flags); + hlist_add_head(&req->hash_node, &ctx->waitid_list); + + init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait); + iwa->wo.child_wait.private = req->task; + iw->head = ¤t->signal->wait_chldexit; + add_wait_queue(iw->head, &iwa->wo.child_wait); + + ret = __do_wait(&iwa->wo); + if (ret == -ERESTARTSYS) { + /* + * Nobody else grabbed a reference, it'll complete when we get + * a waitqueue callback, or if someone cancels it. + */ + if (!io_waitid_drop_issue_ref(req)) { + io_ring_submit_unlock(ctx, issue_flags); + return IOU_ISSUE_SKIP_COMPLETE; + } + + /* + * Wakeup triggered, racing with us. It was prevented from + * completing because of that, queue up the tw to do that. + */ + io_ring_submit_unlock(ctx, issue_flags); + return IOU_ISSUE_SKIP_COMPLETE; + } + + hlist_del_init(&req->hash_node); + remove_wait_queue(iw->head, &iwa->wo.child_wait); + ret = io_waitid_finish(req, ret); + + io_ring_submit_unlock(ctx, issue_flags); +done: + if (ret < 0) + req_set_fail(req); + io_req_set_res(req, ret, 0); + return IOU_OK; +} diff --git a/io_uring/waitid.h b/io_uring/waitid.h new file mode 100644 index 000000000000..956a8adafe8c --- /dev/null +++ b/io_uring/waitid.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "../kernel/exit.h" + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_waitid(struct io_kiocb *req, unsigned int issue_flags); +int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, + unsigned int issue_flags); +bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, + bool cancel_all); -- cgit From 528ce6781726e022bc5dc84034360e6e8f1b89bd Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 28 Sep 2023 20:43:24 +0800 Subject: io_uring: retain top 8bits of uring_cmd flags for kernel internal use Retain top 8bits of uring_cmd flags for kernel internal use, so that we can move IORING_URING_CMD_POLLED out of uapi header. Reviewed-by: Gabriel Krisman Bertazi Reviewed-by: Anuj Gupta Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- include/linux/io_uring.h | 3 +++ include/uapi/linux/io_uring.h | 5 ++--- io_uring/io_uring.c | 3 +++ io_uring/uring_cmd.c | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 106cdc55ff3b..ae08d6f66e62 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -22,6 +22,9 @@ enum io_uring_cmd_flags { IO_URING_F_IOPOLL = (1 << 10), }; +/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ +#define IORING_URING_CMD_POLLED (1U << 31) + struct io_uring_cmd { struct file *file; const struct io_uring_sqe *sqe; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 683ac2b74721..425f64eee44e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -249,13 +249,12 @@ enum io_uring_op { }; /* - * sqe->uring_cmd_flags + * sqe->uring_cmd_flags top 8bits aren't available for userspace * IORING_URING_CMD_FIXED use registered buffer; pass this flag * along with setting sqe->buf_index. - * IORING_URING_CMD_POLLED driver use only */ #define IORING_URING_CMD_FIXED (1U << 0) -#define IORING_URING_CMD_POLLED (1U << 31) +#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED /* diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 2dff4772bf14..cb6bd9907045 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4669,6 +4669,9 @@ static int __init io_uring_init(void) BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); + /* top 8bits are for internal use */ + BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0); + io_uring_optable_init(); /* diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 537795fddc87..a0b0ec5473bf 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -91,7 +91,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); - if (ioucmd->flags & ~IORING_URING_CMD_FIXED) + if (ioucmd->flags & ~IORING_URING_CMD_MASK) return -EINVAL; if (ioucmd->flags & IORING_URING_CMD_FIXED) { -- cgit