aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2019-12-28 14:13:03 +0300
committerJens Axboe <axboe@kernel.dk>2020-01-20 17:04:02 -0700
commit2b85edfc0c90efc68dea3d665bb4111bf0694e05 (patch)
tree9e5cb950fa5d4fbafd3981e569ef3d9c856cf127 /fs/io_uring.c
parentpcpu_ref: add percpu_ref_tryget_many() (diff)
downloadlinux-dev-2b85edfc0c90efc68dea3d665bb4111bf0694e05.tar.xz
linux-dev-2b85edfc0c90efc68dea3d665bb4111bf0694e05.zip
io_uring: batch getting pcpu references
percpu_ref_tryget() has its own overhead. Instead getting a reference for each request, grab a bunch once per io_submit_sqes(). ~5% throughput boost for a "submit and wait 128 nops" benchmark. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> __io_req_free_empty() -> __io_req_do_free() Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 17a199c99c7c..9b869fb6c635 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1083,9 +1083,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!percpu_ref_tryget(&ctx->refs))
- return NULL;
-
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
@@ -1145,6 +1142,14 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
}
}
+static void __io_req_do_free(struct io_kiocb *req)
+{
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+}
+
static void __io_free_req(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1166,11 +1171,9 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
- percpu_ref_put(&ctx->refs);
- if (likely(!io_is_fallback_req(req)))
- kmem_cache_free(req_cachep, req);
- else
- clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+
+ percpu_ref_put(&req->ctx->refs);
+ __io_req_do_free(req);
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
@@ -4539,6 +4542,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
return -EBUSY;
}
+ if (!percpu_ref_tryget_many(&ctx->refs, nr))
+ return -EAGAIN;
+
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
statep = &state;
@@ -4555,7 +4561,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
if (!io_get_sqring(ctx, req, &sqe)) {
- __io_free_req(req);
+ __io_req_do_free(req);
break;
}
@@ -4586,6 +4592,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
+ if (submitted != nr)
+ percpu_ref_put_many(&ctx->refs, nr - submitted);
if (link)
io_queue_link_head(link);
if (statep)