diff options
Diffstat (limited to 'io_uring/net.c')
-rw-r--r-- | io_uring/net.c | 142 |
1 files changed, 112 insertions, 30 deletions
diff --git a/io_uring/net.c b/io_uring/net.c index 070dea9a4eda..594490a1389b 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -51,6 +51,16 @@ struct io_connect { bool seen_econnaborted; }; +struct io_bind { + struct file *file; + int addr_len; +}; + +struct io_listen { + struct file *file; + int backlog; +}; + struct io_sr_msg { struct file *file; union { @@ -817,20 +827,20 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, bool mshot_finished, unsigned issue_flags) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - unsigned int cflags; - - if (sr->flags & IORING_RECVSEND_BUNDLE) - cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), - issue_flags); - else - cflags = io_put_kbuf(req, issue_flags); + unsigned int cflags = 0; if (kmsg->msg.msg_inq > 0) cflags |= IORING_CQE_F_SOCK_NONEMPTY; - /* bundle with no more immediate buffers, we're done */ - if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY) - goto finish; + if (sr->flags & IORING_RECVSEND_BUNDLE) { + cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), + issue_flags); + /* bundle with no more immediate buffers, we're done */ + if (req->flags & REQ_F_BL_EMPTY) + goto finish; + } else { + cflags |= io_put_kbuf(req, issue_flags); + } /* * Fill CQE for this receive and see if we should keep trying to @@ -1129,13 +1139,15 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) retry_multishot: if (io_do_buffer_select(req)) { ret = io_recv_buf_select(req, kmsg, &len, issue_flags); - if (unlikely(ret)) + if (unlikely(ret)) { + kmsg->msg.msg_inq = -1; goto out_free; + } sr->buf = NULL; } - kmsg->msg.msg_inq = -1; kmsg->msg.msg_flags = 0; + kmsg->msg.msg_inq = -1; if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); @@ -1265,14 +1277,14 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC); } -static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, +static int io_sg_from_iter_iovec(struct sk_buff *skb, struct iov_iter *from, size_t length) { skb_zcopy_downgrade_managed(skb); - return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); + return zerocopy_fill_skb_from_iter(skb, from, length); } -static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, +static int io_sg_from_iter(struct sk_buff *skb, struct iov_iter *from, size_t length) { struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -1285,7 +1297,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, if (!frag) shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; else if (unlikely(!skb_zcopy_managed(skb))) - return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); + return zerocopy_fill_skb_from_iter(skb, from, length); bi.bi_size = min(from->count, length); bi.bi_bvec_done = from->iov_offset; @@ -1312,14 +1324,6 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, skb->data_len += copied; skb->len += copied; skb->truesize += truesize; - - if (sk && sk->sk_type == SOCK_STREAM) { - sk_wmem_queued_add(sk, truesize); - if (!skb_zcopy_pure(skb)) - sk_mem_charge(sk, truesize); - } else { - refcount_add(truesize, &skb->sk->sk_wmem_alloc); - } return ret; } @@ -1528,9 +1532,12 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) { struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; bool fixed = !!accept->file_slot; + struct proto_accept_arg arg = { + .flags = force_nonblock ? O_NONBLOCK : 0, + }; struct file *file; + unsigned cflags; int ret, fd; if (!(req->flags & REQ_F_POLLED) && @@ -1543,7 +1550,9 @@ retry: if (unlikely(fd < 0)) return fd; } - file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, + arg.err = 0; + arg.is_empty = -1; + file = do_accept(req->file, &arg, accept->addr, accept->addr_len, accept->flags); if (IS_ERR(file)) { if (!fixed) @@ -1571,17 +1580,26 @@ retry: accept->file_slot); } + cflags = 0; + if (!arg.is_empty) + cflags |= IORING_CQE_F_SOCK_NONEMPTY; + if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { - io_req_set_res(req, ret, 0); + io_req_set_res(req, ret, cflags); return IOU_OK; } if (ret < 0) return ret; - if (io_req_post_cqe(req, ret, IORING_CQE_F_MORE)) - goto retry; + if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { + if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) + goto retry; + if (issue_flags & IO_URING_F_MULTISHOT) + return IOU_ISSUE_SKIP_COMPLETE; + return -EAGAIN; + } - io_req_set_res(req, ret, 0); + io_req_set_res(req, ret, cflags); return IOU_STOP_MULTISHOT; } @@ -1701,6 +1719,70 @@ out: return IOU_OK; } +int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); + struct sockaddr __user *uaddr; + struct io_async_msghdr *io; + + if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) + return -EINVAL; + + uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); + bind->addr_len = READ_ONCE(sqe->addr2); + + io = io_msg_alloc_async(req); + if (unlikely(!io)) + return -ENOMEM; + return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); +} + +int io_bind(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); + struct io_async_msghdr *io = req->async_data; + struct socket *sock; + int ret; + + sock = sock_from_file(req->file); + if (unlikely(!sock)) + return -ENOTSOCK; + + ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); + if (ret < 0) + req_set_fail(req); + io_req_set_res(req, ret, 0); + return 0; +} + +int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); + + if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) + return -EINVAL; + + listen->backlog = READ_ONCE(sqe->len); + return 0; +} + +int io_listen(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); + struct socket *sock; + int ret; + + sock = sock_from_file(req->file); + if (unlikely(!sock)) + return -ENOTSOCK; + + ret = __sys_listen_socket(sock, listen->backlog); + if (ret < 0) + req_set_fail(req); + io_req_set_res(req, ret, 0); + return 0; +} + void io_netmsg_cache_free(const void *entry) { struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; |