aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorOlivier Langlois <olivier@trillion01.com>2021-06-22 05:17:39 -0700
committerJens Axboe <axboe@kernel.dk>2021-06-30 14:15:39 -0600
commit59b735aeeb0f23a760bc21f1c5a1ab6c79e9fe0e (patch)
tree9faf30ef1178458bbfd79719eb2617b7accea9d2 /fs/io_uring.c
parentio_uring: add IOPOLL and reserved field checks to IORING_OP_UNLINKAT (diff)
downloadlinux-dev-59b735aeeb0f23a760bc21f1c5a1ab6c79e9fe0e.tar.xz
linux-dev-59b735aeeb0f23a760bc21f1c5a1ab6c79e9fe0e.zip
io_uring: reduce latency by reissueing the operation
It is quite frequent that when an operation fails and returns EAGAIN, the data becomes available between that failure and the call to vfs_poll() done by io_arm_poll_handler(). Detecting the situation and reissuing the operation is much faster than going ahead and push the operation to the io-wq. Performance improvement testing has been performed with: Single thread, 1 TCP connection receiving a 5 Mbps stream, no sqpoll. 4 measurements have been taken: 1. The time it takes to process a read request when data is already available 2. The time it takes to process by calling twice io_issue_sqe() after vfs_poll() indicated that data was available 3. The time it takes to execute io_queue_async_work() 4. The time it takes to complete a read request asynchronously 2.25% of all the read operations did use the new path. ready data (baseline) avg 3657.94182918628 min 580 max 20098 stddev 1213.15975908162 reissue completion average 7882.67567567568 min 2316 max 28811 stddev 1982.79172973284 insert io-wq time average 8983.82276995305 min 3324 max 87816 stddev 2551.60056552038 async time completion average 24670.4758861127 min 10758 max 102612 stddev 3483.92416873804 Conclusion: On average reissuing the sqe with the patch code is 1.1uSec faster and in the worse case scenario 59uSec faster than placing the request on io-wq On average completion time by reissuing the sqe with the patch code is 16.79uSec faster and in the worse case scenario 73.8uSec faster than async completion. Signed-off-by: Olivier Langlois <olivier@trillion01.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/9e8441419bb1b8f3c3fcc607b2713efecdef2136.1624364038.git.olivier@trillion01.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a0bb3cc7d3bb..ab30c0a0c09b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5160,7 +5160,13 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
return mask;
}
-static bool io_arm_poll_handler(struct io_kiocb *req)
+enum {
+ IO_APOLL_OK,
+ IO_APOLL_ABORTED,
+ IO_APOLL_READY
+};
+
+static int io_arm_poll_handler(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
@@ -5170,22 +5176,22 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
int rw;
if (!req->file || !file_can_poll(req->file))
- return false;
+ return IO_APOLL_ABORTED;
if (req->flags & REQ_F_POLLED)
- return false;
+ return IO_APOLL_ABORTED;
if (def->pollin)
rw = READ;
else if (def->pollout)
rw = WRITE;
else
- return false;
+ return IO_APOLL_ABORTED;
/* if we can't nonblock try, then no point in arming a poll handler */
if (!io_file_supports_async(req, rw))
- return false;
+ return IO_APOLL_ABORTED;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
- return false;
+ return IO_APOLL_ABORTED;
apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
@@ -5211,12 +5217,14 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (ret || ipt.error) {
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
- return false;
+ if (ret)
+ return IO_APOLL_READY;
+ return IO_APOLL_ABORTED;
}
spin_unlock_irq(&ctx->completion_lock);
trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
mask, apoll->poll.events);
- return true;
+ return IO_APOLL_OK;
}
static bool __io_poll_remove_one(struct io_kiocb *req,
@@ -6445,6 +6453,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
int ret;
+issue_sqe:
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
/*
@@ -6464,12 +6473,16 @@ static void __io_queue_sqe(struct io_kiocb *req)
io_put_req(req);
}
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
- if (!io_arm_poll_handler(req)) {
+ switch (io_arm_poll_handler(req)) {
+ case IO_APOLL_READY:
+ goto issue_sqe;
+ case IO_APOLL_ABORTED:
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
+ break;
}
} else {
io_req_complete_failed(req, ret);