diff options
Diffstat (limited to 'io_uring/net.c')
| -rw-r--r-- | io_uring/net.c | 104 | 
1 files changed, 90 insertions, 14 deletions
diff --git a/io_uring/net.c b/io_uring/net.c index 7c98c4d50946..7b75da2e7826 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -51,6 +51,16 @@ struct io_connect {  	bool				seen_econnaborted;  }; +struct io_bind { +	struct file			*file; +	int				addr_len; +}; + +struct io_listen { +	struct file			*file; +	int				backlog; +}; +  struct io_sr_msg {  	struct file			*file;  	union { @@ -817,20 +827,20 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,  				  bool mshot_finished, unsigned issue_flags)  {  	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); -	unsigned int cflags; - -	if (sr->flags & IORING_RECVSEND_BUNDLE) -		cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), -				      issue_flags); -	else -		cflags = io_put_kbuf(req, issue_flags); +	unsigned int cflags = 0;  	if (kmsg->msg.msg_inq > 0)  		cflags |= IORING_CQE_F_SOCK_NONEMPTY; -	/* bundle with no more immediate buffers, we're done */ -	if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY) -		goto finish; +	if (sr->flags & IORING_RECVSEND_BUNDLE) { +		cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), +				      issue_flags); +		/* bundle with no more immediate buffers, we're done */ +		if (req->flags & REQ_F_BL_EMPTY) +			goto finish; +	} else { +		cflags |= io_put_kbuf(req, issue_flags); +	}  	/*  	 * Fill CQE for this receive and see if we should keep trying to @@ -1127,16 +1137,18 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)  		flags |= MSG_DONTWAIT;  retry_multishot: -	kmsg->msg.msg_inq = -1; -	kmsg->msg.msg_flags = 0; -  	if (io_do_buffer_select(req)) {  		ret = io_recv_buf_select(req, kmsg, &len, issue_flags); -		if (unlikely(ret)) +		if (unlikely(ret)) { +			kmsg->msg.msg_inq = -1;  			goto out_free; +		}  		sr->buf = NULL;  	} +	kmsg->msg.msg_flags = 0; +	kmsg->msg.msg_inq = -1; +  	if (flags & MSG_WAITALL)  		min_ret = iov_iter_count(&kmsg->msg.msg_iter); @@ -1715,6 +1727,70 @@ out:  	return IOU_OK;  } +int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ +	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); +	struct sockaddr __user *uaddr; +	struct io_async_msghdr *io; + +	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) +		return -EINVAL; + +	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); +	bind->addr_len =  READ_ONCE(sqe->addr2); + +	io = io_msg_alloc_async(req); +	if (unlikely(!io)) +		return -ENOMEM; +	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); +} + +int io_bind(struct io_kiocb *req, unsigned int issue_flags) +{ +	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); +	struct io_async_msghdr *io = req->async_data; +	struct socket *sock; +	int ret; + +	sock = sock_from_file(req->file); +	if (unlikely(!sock)) +		return -ENOTSOCK; + +	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); +	if (ret < 0) +		req_set_fail(req); +	io_req_set_res(req, ret, 0); +	return 0; +} + +int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ +	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); + +	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) +		return -EINVAL; + +	listen->backlog = READ_ONCE(sqe->len); +	return 0; +} + +int io_listen(struct io_kiocb *req, unsigned int issue_flags) +{ +	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); +	struct socket *sock; +	int ret; + +	sock = sock_from_file(req->file); +	if (unlikely(!sock)) +		return -ENOTSOCK; + +	ret = __sys_listen_socket(sock, listen->backlog); +	if (ret < 0) +		req_set_fail(req); +	io_req_set_res(req, ret, 0); +	return 0; +} +  void io_netmsg_cache_free(const void *entry)  {  	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;  |