aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-04-01 20:41:15 -0600
committerJens Axboe <axboe@kernel.dk>2021-04-02 09:24:20 -0600
commit230d50d448acb6639991440913299e50cacf1daf (patch)
tree80c9957edc04cc1203dd3741c26193417dcd697e /fs/io_uring.c
parentio_uring: fix EIOCBQUEUED iter revert (diff)
downloadlinux-dev-230d50d448acb6639991440913299e50cacf1daf.tar.xz
linux-dev-230d50d448acb6639991440913299e50cacf1daf.zip
io_uring: move reissue into regular IO path
It's non-obvious how retry is done for block backed files, when it happens off the kiocb done path. It also makes it tricky to deal with the iov_iter handling. Just mark the req as needing a reissue, and handling it from the submission path instead. This makes it directly obvious that we're not re-importing the iovec from userspace past the submit point, and it means that we can just reuse our usual -EAGAIN retry path from the read/write handling. At some point in the future, we'll gain the ability to always reliably return -EAGAIN through the stack. A previous attempt on the block side didn't pan out and got reverted, hence the need to check for this information out-of-band right now. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 717942474fa9..8be542050648 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -697,6 +697,7 @@ enum {
REQ_F_NO_FILE_TABLE_BIT,
REQ_F_LTIMEOUT_ACTIVE_BIT,
REQ_F_COMPLETE_INLINE_BIT,
+ REQ_F_REISSUE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -740,6 +741,8 @@ enum {
REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
/* completion is deferred through io_comp_state */
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
};
struct async_poll {
@@ -2503,8 +2506,10 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
if (req->rw.kiocb.ki_flags & IOCB_WRITE)
kiocb_end_write(req);
- if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+ if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE;
return;
+ }
if (res != req->result)
req_set_fail_links(req);
if (req->flags & REQ_F_BUFFER_SELECTED)
@@ -3283,9 +3288,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
ret = io_iter_do_read(req, iter);
- if (ret == -EIOCBQUEUED) {
- goto out_free;
- } else if (ret == -EAGAIN) {
+ if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
@@ -3295,6 +3298,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
+ } else if (ret == -EIOCBQUEUED) {
+ goto out_free;
} else if (ret <= 0 || ret == io_size || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
/* read all, failed, already did sync or don't want to retry */
@@ -3407,6 +3412,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
else
ret2 = -EINVAL;
+ if (req->flags & REQ_F_REISSUE)
+ ret2 = -EAGAIN;
+
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
@@ -6160,6 +6168,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
ret = -ECANCELED;
if (!ret) {
+ req->flags &= ~REQ_F_REISSUE;
do {
ret = io_issue_sqe(req, 0);
/*