aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-06-26 21:40:49 +0100
committerJens Axboe <axboe@kernel.dk>2021-06-30 14:15:40 -0600
commit99ebe4efbd3882422db1fd6a1b477291ea8bdab7 (patch)
treeaadeb6e8c2b16ed616bc351e24b1a28040221640 /fs/io_uring.c
parentio_uring: refactor io_submit_flush_completions (diff)
downloadlinux-dev-99ebe4efbd3882422db1fd6a1b477291ea8bdab7.tar.xz
linux-dev-99ebe4efbd3882422db1fd6a1b477291ea8bdab7.zip
io_uring: pre-initialise some of req fields
Most of requests are allocated from an internal cache, so it's waste of time fully initialising them every time. Instead, let's pre-init some of the fields we can during initial allocation (e.g. kmalloc(), see io_alloc_req()) and keep them valid on request recycling. There are four of them in this patch: ->ctx is always stays the same ->link is NULL on free, it's an invariant ->result is not even needed to init, just a precaution ->async_data we now clean in io_dismantle_req() as it's likely to never be allocated. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/892ba0e71309bba9fe9e0142472330bbf9d8f05d.1624739600.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b1620fbd69eb..b14de92832e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1711,7 +1711,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
if (!state->free_reqs) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
- int ret;
+ int ret, i;
if (io_flush_cached_reqs(ctx))
goto got_req;
@@ -1729,6 +1729,20 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
return NULL;
ret = 1;
}
+
+ /*
+ * Don't initialise the fields below on every allocation, but
+ * do that in advance and keep valid on free.
+ */
+ for (i = 0; i < ret; i++) {
+ struct io_kiocb *req = state->reqs[i];
+
+ req->ctx = ctx;
+ req->link = NULL;
+ req->async_data = NULL;
+ /* not necessary, but safer to zero */
+ req->result = 0;
+ }
state->free_reqs = ret;
}
got_req:
@@ -1752,8 +1766,10 @@ static void io_dismantle_req(struct io_kiocb *req)
io_put_file(req->file);
if (req->fixed_rsrc_refs)
percpu_ref_put(req->fixed_rsrc_refs);
- if (req->async_data)
+ if (req->async_data) {
kfree(req->async_data);
+ req->async_data = NULL;
+ }
}
/* must to be called somewhat shortly after putting a request */
@@ -6534,15 +6550,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags = sqe_flags = READ_ONCE(sqe->flags);
req->user_data = READ_ONCE(sqe->user_data);
- req->async_data = NULL;
req->file = NULL;
- req->ctx = ctx;
- req->link = NULL;
req->fixed_rsrc_refs = NULL;
/* one is dropped after submission, the other at completion */
atomic_set(&req->refs, 2);
req->task = current;
- req->result = 0;
/* enforce forwards compatibility on users */
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))