diff options
Diffstat (limited to 'io_uring/msg_ring.c')
| -rw-r--r-- | io_uring/msg_ring.c | 122 | 
1 files changed, 78 insertions, 44 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 81c4a9d43729..29fa9285a33d 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -11,9 +11,9 @@  #include "io_uring.h"  #include "rsrc.h"  #include "filetable.h" +#include "alloc_cache.h"  #include "msg_ring.h" -  /* All valid masks for MSG_RING */  #define IORING_MSG_RING_MASK		(IORING_MSG_RING_CQE_SKIP | \  					IORING_MSG_RING_FLAGS_PASS) @@ -68,59 +68,70 @@ void io_msg_ring_cleanup(struct io_kiocb *req)  static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)  { -	if (!target_ctx->task_complete) -		return false; -	return current != target_ctx->submitter_task; +	return target_ctx->task_complete;  } -static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func) +static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)  { -	struct io_ring_ctx *ctx = req->file->private_data; -	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); -	struct task_struct *task = READ_ONCE(ctx->submitter_task); +	struct io_ring_ctx *ctx = req->ctx; -	if (unlikely(!task)) -		return -EOWNERDEAD; +	io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags); +	if (spin_trylock(&ctx->msg_lock)) { +		if (io_alloc_cache_put(&ctx->msg_cache, req)) +			req = NULL; +		spin_unlock(&ctx->msg_lock); +	} +	if (req) +		kmem_cache_free(req_cachep, req); +	percpu_ref_put(&ctx->refs); +} -	init_task_work(&msg->tw, func); -	if (task_work_add(task, &msg->tw, TWA_SIGNAL)) +static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, +			      int res, u32 cflags, u64 user_data) +{ +	req->task = READ_ONCE(ctx->submitter_task); +	if (!req->task) { +		kmem_cache_free(req_cachep, req);  		return -EOWNERDEAD; +	} +	req->cqe.user_data = user_data; +	io_req_set_res(req, res, cflags); +	percpu_ref_get(&ctx->refs); +	req->ctx = ctx; +	req->io_task_work.func = io_msg_tw_complete; +	io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE); +	return 0; +} -	return IOU_ISSUE_SKIP_COMPLETE; +static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) +{ +	struct io_kiocb *req = NULL; + +	if (spin_trylock(&ctx->msg_lock)) { +		req = io_alloc_cache_get(&ctx->msg_cache); +		spin_unlock(&ctx->msg_lock); +	} +	if (req) +		return req; +	return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN);  } -static void io_msg_tw_complete(struct callback_head *head) +static int io_msg_data_remote(struct io_kiocb *req)  { -	struct io_msg *msg = container_of(head, struct io_msg, tw); -	struct io_kiocb *req = cmd_to_io_kiocb(msg);  	struct io_ring_ctx *target_ctx = req->file->private_data; -	int ret = 0; - -	if (current->flags & PF_EXITING) { -		ret = -EOWNERDEAD; -	} else { -		u32 flags = 0; - -		if (msg->flags & IORING_MSG_RING_FLAGS_PASS) -			flags = msg->cqe_flags; - -		/* -		 * If the target ring is using IOPOLL mode, then we need to be -		 * holding the uring_lock for posting completions. Other ring -		 * types rely on the regular completion locking, which is -		 * handled while posting. -		 */ -		if (target_ctx->flags & IORING_SETUP_IOPOLL) -			mutex_lock(&target_ctx->uring_lock); -		if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags)) -			ret = -EOVERFLOW; -		if (target_ctx->flags & IORING_SETUP_IOPOLL) -			mutex_unlock(&target_ctx->uring_lock); -	} +	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); +	struct io_kiocb *target; +	u32 flags = 0; -	if (ret < 0) -		req_set_fail(req); -	io_req_queue_tw_complete(req, ret); +	target = io_msg_get_kiocb(req->ctx); +	if (unlikely(!target)) +		return -ENOMEM; + +	if (msg->flags & IORING_MSG_RING_FLAGS_PASS) +		flags = msg->cqe_flags; + +	return io_msg_remote_post(target_ctx, target, msg->len, flags, +					msg->user_data);  }  static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) @@ -138,7 +149,7 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)  		return -EBADFD;  	if (io_msg_need_remote(target_ctx)) -		return io_msg_exec_remote(req, io_msg_tw_complete); +		return io_msg_data_remote(req);  	if (msg->flags & IORING_MSG_RING_FLAGS_PASS)  		flags = msg->cqe_flags; @@ -218,6 +229,22 @@ static void io_msg_tw_fd_complete(struct callback_head *head)  	io_req_queue_tw_complete(req, ret);  } +static int io_msg_fd_remote(struct io_kiocb *req) +{ +	struct io_ring_ctx *ctx = req->file->private_data; +	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); +	struct task_struct *task = READ_ONCE(ctx->submitter_task); + +	if (unlikely(!task)) +		return -EOWNERDEAD; + +	init_task_work(&msg->tw, io_msg_tw_fd_complete); +	if (task_work_add(task, &msg->tw, TWA_SIGNAL)) +		return -EOWNERDEAD; + +	return IOU_ISSUE_SKIP_COMPLETE; +} +  static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)  {  	struct io_ring_ctx *target_ctx = req->file->private_data; @@ -240,7 +267,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)  	}  	if (io_msg_need_remote(target_ctx)) -		return io_msg_exec_remote(req, io_msg_tw_fd_complete); +		return io_msg_fd_remote(req);  	return io_msg_install_complete(req, issue_flags);  } @@ -294,3 +321,10 @@ done:  	io_req_set_res(req, ret, 0);  	return IOU_OK;  } + +void io_msg_cache_free(const void *entry) +{ +	struct io_kiocb *req = (struct io_kiocb *) entry; + +	kmem_cache_free(req_cachep, req); +}  |