aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-01-08 11:04:00 -0700
committerJens Axboe <axboe@kernel.dk>2020-01-20 17:04:04 -0700
commitf2842ab5b72d7ee5f7f8385c2d4f32c133f5837b (patch)
tree02731be82d19be0a9793ab1cb2ccd957c6f021b8 /fs/io_uring.c
parentio_uring: change io_ring_ctx bool fields into bit fields (diff)
downloadlinux-dev-f2842ab5b72d7ee5f7f8385c2d4f32c133f5837b.tar.xz
linux-dev-f2842ab5b72d7ee5f7f8385c2d4f32c133f5837b.zip
io_uring: enable option to only trigger eventfd for async completions
If an application is using eventfd notifications with poll to know when new SQEs can be issued, it's expecting the following read/writes to complete inline. And with that, it knows that there are events available, and don't want spurious wakeups on the eventfd for those requests. This adds IORING_REGISTER_EVENTFD_ASYNC, which works just like IORING_REGISTER_EVENTFD, except it only triggers notifications for events that happen from async completions (IRQ, or io-wq worker completions). Any completions inline from the submission itself will not trigger notifications. Suggested-by: Mark Papadakis <markuspapadakis@icloud.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 42bf83b3fbd5..70656762244f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -206,6 +206,7 @@ struct io_ring_ctx {
int account_mem: 1;
int cq_overflow_flushed: 1;
int drain_next: 1;
+ int eventfd_async: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -963,13 +964,20 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask];
}
+static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
+{
+ if (!ctx->eventfd_async)
+ return true;
+ return io_wq_current_is_worker() || in_interrupt();
+}
+
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (waitqueue_active(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
+ if (ctx->cq_ev_fd && io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
}
@@ -6556,10 +6564,17 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
ret = io_sqe_files_update(ctx, arg, nr_args);
break;
case IORING_REGISTER_EVENTFD:
+ case IORING_REGISTER_EVENTFD_ASYNC:
ret = -EINVAL;
if (nr_args != 1)
break;
ret = io_eventfd_register(ctx, arg);
+ if (ret)
+ break;
+ if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
+ ctx->eventfd_async = 1;
+ else
+ ctx->eventfd_async = 0;
break;
case IORING_UNREGISTER_EVENTFD:
ret = -EINVAL;