aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-01-31 17:16:48 -0700
committerJens Axboe <axboe@kernel.dk>2020-02-03 17:27:47 -0700
commit1a417f4e618e05fba29ba222f1e8555c302376ce (patch)
tree24b6070b4400fe2f5bb3220e170cb28a860bf4c8 /fs/io_uring.c
parentio_uring: remove extra ->file check (diff)
downloadlinux-dev-1a417f4e618e05fba29ba222f1e8555c302376ce.tar.xz
linux-dev-1a417f4e618e05fba29ba222f1e8555c302376ce.zip
io_uring: fix sporadic double CQE entry for close
We punt close to async for the final fput(), but we log the completion even before that even in that case. We rely on the request not having a files table assigned to detect what the final async close should do. However, if we punt the async queue to __io_queue_sqe(), we'll get ->files assigned and this makes io_close_finish() think it should both close the filp again (which does no harm) AND log a new CQE event for this request. This causes duplicate CQEs. Queue the request up for async manually so we don't grab files needlessly and trigger this condition. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a3c75aff8d10..b9c9e04cc1cb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2843,16 +2843,13 @@ static void io_close_finish(struct io_wq_work **workptr)
int ret;
ret = filp_close(req->close.put_file, req->work.files);
- if (ret < 0) {
+ if (ret < 0)
req_set_fail_links(req);
- }
io_cqring_add_event(req, ret);
}
fput(req->close.put_file);
- /* we bypassed the re-issue, drop the submission reference */
- io_put_req(req);
io_put_req_find_next(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
@@ -2894,7 +2891,13 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
eagain:
req->work.func = io_close_finish;
- return -EAGAIN;
+ /*
+ * Do manual async queue here to avoid grabbing files - we don't
+ * need the files, and it'll cause io_close_finish() to close
+ * the file again and cause a double CQE entry for this request
+ */
+ io_queue_async_work(req);
+ return 0;
}
static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)