aboutsummaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c110
1 files changed, 69 insertions, 41 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 15dea91625e2..5229976cb582 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -67,7 +67,18 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
-#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
+static inline bool io_check_multishot(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ /*
+ * When ->locked_cq is set we only allow to post CQEs from the original
+ * task context. Usual request completions will be handled in other
+ * generic paths but multipoll may decide to post extra cqes.
+ */
+ return !(issue_flags & IO_URING_F_IOWQ) ||
+ !(issue_flags & IO_URING_F_MULTISHOT) ||
+ !req->ctx->task_complete;
+}
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -127,13 +138,15 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
struct io_cache_entry *entry;
struct io_async_msghdr *hdr;
- if (!(issue_flags & IO_URING_F_UNLOCKED) &&
- (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
- hdr = container_of(entry, struct io_async_msghdr, cache);
- hdr->free_iov = NULL;
- req->flags |= REQ_F_ASYNC_DATA;
- req->async_data = hdr;
- return hdr;
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ entry = io_alloc_cache_get(&ctx->netmsg_cache);
+ if (entry) {
+ hdr = container_of(entry, struct io_async_msghdr, cache);
+ hdr->free_iov = NULL;
+ req->flags |= REQ_F_ASYNC_DATA;
+ req->async_data = hdr;
+ return hdr;
+ }
}
if (!io_alloc_async_data(req)) {
@@ -365,7 +378,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
+ ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
return ret;
@@ -451,7 +464,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
}
} else {
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+ ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter,
false);
if (ret > 0)
@@ -503,7 +516,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
}
} else {
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
+ ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (ret < 0)
@@ -591,7 +604,8 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
- unsigned int cflags, bool mshot_finished)
+ unsigned int cflags, bool mshot_finished,
+ unsigned issue_flags)
{
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, *ret, cflags);
@@ -600,21 +614,17 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
if (!mshot_finished) {
- if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
- cflags | IORING_CQE_F_MORE, false)) {
+ if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
io_recv_prep_retry(req);
return false;
}
- /*
- * Otherwise stop multishot but use the current result.
- * Probably will end up going into overflow, but this means
- * we cannot trust the ordering anymore
- */
+ /* Otherwise stop multishot but use the current result. */
}
io_req_set_res(req, *ret, cflags);
- if (req->flags & REQ_F_POLLED)
+ if (issue_flags & IO_URING_F_MULTISHOT)
*ret = IOU_STOP_MULTISHOT;
else
*ret = IOU_OK;
@@ -733,6 +743,9 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
+ if (!io_check_multishot(req, issue_flags))
+ return io_setup_async_msg(req, kmsg, issue_flags);
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -752,7 +765,7 @@ retry_multishot:
kmsg->fast_iov[0].iov_base = buf;
kmsg->fast_iov[0].iov_len = len;
- iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
len);
}
@@ -773,8 +786,7 @@ retry_multishot:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
ret = io_setup_async_msg(req, kmsg, issue_flags);
- if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
- IO_APOLL_MULTI_POLLED) {
+ if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -803,7 +815,7 @@ retry_multishot:
if (kmsg->msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!io_recv_finish(req, &ret, cflags, mshot_finished))
+ if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished) {
@@ -833,6 +845,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -847,7 +862,7 @@ retry_multishot:
sr->buf = buf;
}
- ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
+ ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
if (unlikely(ret))
goto out_free;
@@ -869,7 +884,7 @@ retry_multishot:
ret = sock_recvmsg(sock, &msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
+ if (issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -902,7 +917,7 @@ out_free:
if (msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!io_recv_finish(req, &ret, cflags, ret <= 0))
+ if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
goto retry_multishot;
return ret;
@@ -925,6 +940,9 @@ void io_send_zc_cleanup(struct io_kiocb *req)
}
}
+#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
+#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
+
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -937,10 +955,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL;
- zc->flags = READ_ONCE(sqe->ioprio);
- if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
- IORING_RECVSEND_FIXED_BUF))
- return -EINVAL;
notif = zc->notif = io_alloc_notif(ctx);
if (!notif)
return -ENOMEM;
@@ -948,6 +962,17 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
req->flags |= REQ_F_NEED_CLEANUP;
+
+ zc->flags = READ_ONCE(sqe->ioprio);
+ if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
+ if (zc->flags & ~IO_ZC_FLAGS_VALID)
+ return -EINVAL;
+ if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
+ io_notif_set_extended(notif);
+ io_notif_to_data(notif)->zc_report = true;
+ }
+ }
+
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index);
@@ -1083,13 +1108,14 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
- ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
+ ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter;
} else {
- ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
+ io_notif_set_extended(zc->notif);
+ ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
&msg.msg_iter);
if (unlikely(ret))
return ret;
@@ -1150,6 +1176,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
+ io_notif_set_extended(sr->notif);
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -1271,6 +1299,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
@@ -1289,8 +1319,7 @@ retry:
* return EAGAIN to arm the poll infra since it
* has already been done
*/
- if ((req->flags & IO_APOLL_MULTI_POLLED) ==
- IO_APOLL_MULTI_POLLED)
+ if (issue_flags & IO_URING_F_MULTISHOT)
ret = IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
@@ -1310,14 +1339,13 @@ retry:
return IOU_OK;
}
- if (ret >= 0 &&
- io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
+ if (ret < 0)
+ return ret;
+ if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
goto retry;
- io_req_set_res(req, ret, 0);
- if (req->flags & REQ_F_POLLED)
- return IOU_STOP_MULTISHOT;
- return IOU_OK;
+ return -ECANCELED;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)