aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-12 03:23:51 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-12 05:30:25 -0700
commitd3d7298d05cb026305b0f5033acc9c9c4f281e14 (patch)
tree109334fb85d7cadb8addd9ecfac0ef499f7ea9d1 /fs
parentio_uring: take compl state from submit state (diff)
downloadlinux-dev-d3d7298d05cb026305b0f5033acc9c9c4f281e14.tar.xz
linux-dev-d3d7298d05cb026305b0f5033acc9c9c4f281e14.zip
io_uring: optimise out unlikely link queue
__io_queue_sqe() tries to issue as much requests of a link as it can, and uses io_put_req_find_next() to extract a next one, targeting inline completed requests. As now __io_queue_sqe() is always used together with struct io_comp_state, it leaves next propagation only a small window and only for async reqs, that doesn't justify its existence. Remove it, make __io_queue_sqe() to issue only a head request. It simplifies the code and will allow other optimisations. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c42
1 files changed, 10 insertions, 32 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8c2613bf54d3..26d1080217e5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6563,26 +6563,20 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req)
{
- struct io_kiocb *linked_timeout;
+ struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
const struct cred *old_creds = NULL;
int ret;
-again:
- linked_timeout = io_prep_linked_timeout(req);
-
if ((req->flags & REQ_F_WORK_INITIALIZED) &&
(req->work.flags & IO_WQ_WORK_CREDS) &&
- req->work.identity->creds != current_cred()) {
- if (old_creds)
- revert_creds(old_creds);
- if (old_creds == req->work.identity->creds)
- old_creds = NULL; /* restored original creds */
- else
- old_creds = override_creds(req->work.identity->creds);
- }
+ req->work.identity->creds != current_cred())
+ old_creds = override_creds(req->work.identity->creds);
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
+ if (old_creds)
+ revert_creds(old_creds);
+
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
@@ -6595,9 +6589,6 @@ again:
*/
io_queue_async_work(req);
}
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
} else if (likely(!ret)) {
/* drop submission reference */
if (req->flags & REQ_F_COMPLETE_INLINE) {
@@ -6605,31 +6596,18 @@ again:
struct io_comp_state *cs = &ctx->submit_state.comp;
cs->reqs[cs->nr++] = req;
- if (cs->nr == IO_COMPL_BATCH)
+ if (cs->nr == ARRAY_SIZE(cs->reqs))
io_submit_flush_completions(cs, ctx);
- req = NULL;
} else {
- req = io_put_req_find_next(req);
- }
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
-
- if (req) {
- if (!(req->flags & REQ_F_FORCE_ASYNC))
- goto again;
- io_queue_async_work(req);
+ io_put_req(req);
}
} else {
- /* un-prep timeout, so it'll be killed as any other linked */
- req->flags &= ~REQ_F_LINK_TIMEOUT;
req_set_fail_links(req);
io_put_req(req);
io_req_complete(req, ret);
}
-
- if (old_creds)
- revert_creds(old_creds);
+ if (linked_timeout)
+ io_queue_linked_timeout(linked_timeout);
}
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)