aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <[email protected]>2024-09-08 16:34:55 -0600
committerJens Axboe <[email protected]>2024-09-08 16:43:57 -0600
commit0e0bcf07ec5b305ce7612385b06580dcbe5bc6a5 (patch)
tree3538f3afae2e5887eb2c3383d8f958f613e48e7b
parentc9f9ce65c2436879779d39c6e65b95c74a206e49 (diff)
io_uring/eventfd: move refs to refcount_t
atomic_t for the struct io_ev_fd references and there are no issues with it. While the ref getting and putting for the eventfd code is somewhat performance critical for cases where eventfd signaling is used (news flash, you should not...), it probably doesn't warrant using an atomic_t for this. Let's just move to it to refcount_t to get the added protection of over/underflows. Link: https://lore.kernel.org/lkml/[email protected]/ Reported-by: kernel test robot <[email protected]> Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/ Signed-off-by: Jens Axboe <[email protected]>
-rw-r--r--io_uring/eventfd.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index d9836d43725f..e37fddd5d9ce 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -15,7 +15,7 @@ struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd;
unsigned int eventfd_async: 1;
struct rcu_head rcu;
- atomic_t refs;
+ refcount_t refs;
atomic_t ops;
};
@@ -37,7 +37,7 @@ static void io_eventfd_do_signal(struct rcu_head *rcu)
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
io_eventfd_free(rcu);
}
@@ -63,7 +63,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
*/
if (unlikely(!ev_fd))
return;
- if (!atomic_inc_not_zero(&ev_fd->refs))
+ if (!refcount_inc_not_zero(&ev_fd->refs))
return;
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
goto out;
@@ -77,7 +77,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
}
}
out:
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
call_rcu(&ev_fd->rcu, io_eventfd_free);
}
@@ -137,7 +137,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
ev_fd->eventfd_async = eventfd_async;
ctx->has_evfd = true;
- atomic_set(&ev_fd->refs, 1);
+ refcount_set(&ev_fd->refs, 1);
atomic_set(&ev_fd->ops, 0);
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
return 0;
@@ -152,7 +152,7 @@ int io_eventfd_unregister(struct io_ring_ctx *ctx)
if (ev_fd) {
ctx->has_evfd = false;
rcu_assign_pointer(ctx->io_ev_fd, NULL);
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
call_rcu(&ev_fd->rcu, io_eventfd_free);
return 0;
}