aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-04-15 00:39:50 +0300
committerJens Axboe <axboe@kernel.dk>2020-04-14 19:16:59 -0600
commit31af27c7cc9f675d93a135dca99e6413f9096f1d (patch)
treefb117c1957897b8eff97aec74593fb86c810e69f /fs
parentio_uring: kill already cached timeout.seq_offset (diff)
downloadwireguard-linux-31af27c7cc9f675d93a135dca99e6413f9096f1d.tar.xz
wireguard-linux-31af27c7cc9f675d93a135dca99e6413f9096f1d.zip
io_uring: don't count rqs failed after current one
When checking for draining with __req_need_defer(), it tries to match how many requests were sent before a current one with number of already completed. Dropped SQEs are included in req->sequence, and they won't ever appear in CQ. To compensate for that, __req_need_defer() substracts ctx->cached_sq_dropped. However, what it should really use is number of SQEs dropped __before__ the current one. In other words, any submitted request shouldn't shouldn't affect dequeueing from the drain queue of previously submitted ones. Instead of saving proper ctx->cached_sq_dropped in each request, substract from req->sequence it at initialisation, so it includes number of properly submitted requests. note: it also changes behaviour of timeouts, but 1. it's already diverge from the description because of using SQ 2. the description is ambiguous regarding dropped SQEs Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3fc33ba4855d..381d50becd04 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -957,8 +957,8 @@ static inline bool __req_need_defer(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
- + atomic_read(&ctx->cached_cq_overflow);
+ return req->sequence != ctx->cached_cq_tail
+ + atomic_read(&ctx->cached_cq_overflow);
}
static inline bool req_need_defer(struct io_kiocb *req)
@@ -5801,7 +5801,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
* it can be used to mark the position of the first IO in the
* link list.
*/
- req->sequence = ctx->cached_sq_head;
+ req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
req->opcode = READ_ONCE(sqe->opcode);
req->user_data = READ_ONCE(sqe->user_data);
req->io = NULL;