aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-17 08:04:44 -0700
committerJens Axboe <axboe@kernel.dk>2020-01-20 17:03:59 -0700
commitce35a47a3a0208a77b4d31b7f2e8ed57d624093d (patch)
tree903370888177b20dcbeab7e48a0452f696b858c8 /fs/io_uring.c
parentio-wq: support concurrent non-blocking work (diff)
downloadlinux-dev-ce35a47a3a0208a77b4d31b7f2e8ed57d624093d.tar.xz
linux-dev-ce35a47a3a0208a77b4d31b7f2e8ed57d624093d.zip
io_uring: add IOSQE_ASYNC
io_uring defaults to always doing inline submissions, if at all possible. But for larger copies, even if the data is fully cached, that can take a long time. Add an IOSQE_ASYNC flag that the application can set on the SQE - if set, it'll ensure that we always go async for those kinds of requests. Use the io-wq IO_WQ_WORK_CONCURRENT flag to ensure we get the concurrency we desire for this case. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 115c7d413372..9ffcfdc6382b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -483,6 +483,7 @@ struct io_kiocb {
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
+#define REQ_F_FORCE_ASYNC 131072 /* IOSQE_ASYNC */
u64 user_data;
u32 result;
u32 sequence;
@@ -4017,8 +4018,17 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req_set_fail_links(req);
io_double_put_req(req);
}
- } else
+ } else if ((req->flags & REQ_F_FORCE_ASYNC) &&
+ !io_wq_current_is_worker()) {
+ /*
+ * Never try inline submit of IOSQE_ASYNC is set, go straight
+ * to async execution.
+ */
+ req->work.flags |= IO_WQ_WORK_CONCURRENT;
+ io_queue_async_work(req);
+ } else {
__io_queue_sqe(req, sqe);
+ }
}
static inline void io_queue_link_head(struct io_kiocb *req)
@@ -4031,7 +4041,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
- IOSQE_IO_HARDLINK)
+ IOSQE_IO_HARDLINK | IOSQE_ASYNC)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
@@ -4044,6 +4054,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = -EINVAL;
goto err_req;
}
+ if (sqe->flags & IOSQE_ASYNC)
+ req->flags |= REQ_F_FORCE_ASYNC;
ret = io_req_set_file(state, req, sqe);
if (unlikely(ret)) {