aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-10-27 23:25:35 +0000
committerJens Axboe <axboe@kernel.dk>2020-12-09 12:03:59 -0700
commit863e05604a6fb45f0f56b3e9eca5cd533001253b (patch)
tree90d7dfe47fddafa08e63ec7a49ca0034e9a0fb39 /fs/io_uring.c
parentio_uring: split poll and poll_remove structs (diff)
downloadlinux-dev-863e05604a6fb45f0f56b3e9eca5cd533001253b.tar.xz
linux-dev-863e05604a6fb45f0f56b3e9eca5cd533001253b.zip
io_uring: track link's head and tail during submit
Explicitly save not only a link's head in io_submit_sqe[s]() but the tail as well. That's in preparation for keeping linked requests in a singly linked list. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 10cfb6d17994..0441185ac510 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6536,8 +6536,13 @@ static inline void io_queue_link_head(struct io_kiocb *req,
io_queue_sqe(req, NULL, cs);
}
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_kiocb **link, struct io_comp_state *cs)
+ struct io_submit_link *link, struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -6549,8 +6554,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
* submitted sync once the chain is complete. If none of those
* conditions are true (normal request), then just queue it.
*/
- if (*link) {
- struct io_kiocb *head = *link;
+ if (link->head) {
+ struct io_kiocb *head = link->head;
/*
* Taking sequential execution of a link, draining both sides
@@ -6571,11 +6576,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
trace_io_uring_link(ctx, req, head);
list_add_tail(&req->link_list, &head->link_list);
+ link->last = req;
/* last request of a link, enqueue the link */
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
io_queue_link_head(head, cs);
- *link = NULL;
+ link->head = NULL;
}
} else {
if (unlikely(ctx->drain_next)) {
@@ -6589,7 +6595,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = io_req_defer_prep(req, sqe);
if (unlikely(ret))
req->flags |= REQ_F_FAIL_LINK;
- *link = req;
+ link->head = req;
+ link->last = req;
} else {
io_queue_sqe(req, sqe, cs);
}
@@ -6769,7 +6776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
{
struct io_submit_state state;
- struct io_kiocb *link = NULL;
+ struct io_submit_link link;
int i, submitted = 0;
/* if we have a backlog and couldn't flush it all, return BUSY */
@@ -6789,6 +6796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
refcount_add(nr, &current->usage);
io_submit_state_start(&state, ctx, nr);
+ link.head = NULL;
for (i = 0; i < nr; i++) {
const struct io_uring_sqe *sqe;
@@ -6834,8 +6842,8 @@ fail_req:
percpu_counter_sub(&tctx->inflight, unused);
put_task_struct_many(current, unused);
}
- if (link)
- io_queue_link_head(link, &state.comp);
+ if (link.head)
+ io_queue_link_head(link.head, &state.comp);
io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */