aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/io_uring_types.h4
-rw-r--r--io_uring/io_uring.c22
2 files changed, 0 insertions, 26 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index d34c8433caf9..c47f412cf18e 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -346,10 +346,6 @@ struct io_ring_ctx {
spinlock_t completion_lock;
- /* IRQ completion list, under ->completion_lock */
- unsigned int locked_free_nr;
- struct io_wq_work_list locked_free_list;
-
struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
struct io_hash_table cancel_table;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8078d6944411..e291f227a1a0 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -334,7 +334,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_llist_head(&ctx->work_llist);
INIT_LIST_HEAD(&ctx->tctx_list);
ctx->submit_state.free_list.next = NULL;
- INIT_WQ_LIST(&ctx->locked_free_list);
INIT_HLIST_HEAD(&ctx->waitid_list);
#ifdef CONFIG_FUTEX
INIT_HLIST_HEAD(&ctx->futex_list);
@@ -988,15 +987,6 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
-static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
-{
- spin_lock(&ctx->completion_lock);
- wq_list_splice(&ctx->locked_free_list, &state->free_list);
- ctx->locked_free_nr = 0;
- spin_unlock(&ctx->completion_lock);
-}
-
/*
* A request might get retired back into the request caches even before opcode
* handlers and io_issue_sqe() are done with it, e.g. inline completion path.
@@ -1010,17 +1000,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
void *reqs[IO_REQ_ALLOC_BATCH];
int ret;
- /*
- * If we have more than a batch's worth of requests in our IRQ side
- * locked cache, grab the lock and move them over to our submission
- * side cache.
- */
- if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
- io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
- if (!io_req_cache_empty(ctx))
- return true;
- }
-
ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
/*
@@ -2654,7 +2633,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
int nr = 0;
mutex_lock(&ctx->uring_lock);
- io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
while (!io_req_cache_empty(ctx)) {
req = io_extract_req(ctx);