aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-08-19 12:15:59 -0600
committerJens Axboe <axboe@kernel.dk>2019-08-20 11:01:58 -0600
commit500f9fbadef86466a435726192f4ca4df7d94236 (patch)
treef73f93d0f7d5a76b4f71dfc70fa30008a93d204c /fs/io_uring.c
parentLinux 5.3-rc5 (diff)
downloadlinux-dev-500f9fbadef86466a435726192f4ca4df7d94236.tar.xz
linux-dev-500f9fbadef86466a435726192f4ca4df7d94236.zip
io_uring: fix potential hang with polled IO
If a request issue ends up being punted to async context to avoid blocking, we can get into a situation where the original application enters the poll loop for that very request before it has been issued. This should not be an issue, except that the polling will hold the io_uring uring_ctx mutex for the duration of the poll. When the async worker has actually issued the request, it needs to acquire this mutex to add the request to the poll issued list. Since the application polling is already holding this mutex, the workqueue sleeps on the mutex forever, and the application thus never gets a chance to poll for the very request it was interested in. Fix this by ensuring that the polling drops the uring_ctx occasionally if it's not making any progress. Reported-by: Jeffrey M. Birnbaum <jmbnyc@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c36
1 files changed, 25 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 24bbe3cb7ad4..36f04d0b197b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -805,11 +805,34 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
long min)
{
- int ret = 0;
+ int iters, ret = 0;
+
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+ iters = 0;
do {
int tmin = 0;
+ /*
+ * If a submit got punted to a workqueue, we can have the
+ * application entering polling for a command before it gets
+ * issued. That app will hold the uring_lock for the duration
+ * of the poll right here, so we need to take a breather every
+ * now and then to ensure that the issue has a chance to add
+ * the poll to the issued list. Otherwise we can spin here
+ * forever, while the workqueue is stuck trying to acquire the
+ * very same mutex.
+ */
+ if (!(++iters & 7)) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ }
+
if (*nr_events < min)
tmin = min - *nr_events;
@@ -819,6 +842,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
ret = 0;
} while (min && !*nr_events && !need_resched());
+ mutex_unlock(&ctx->uring_lock);
return ret;
}
@@ -2280,15 +2304,7 @@ static int io_sq_thread(void *data)
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
- /*
- * We disallow the app entering submit/complete
- * with polling, but we still need to lock the
- * ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
io_iopoll_check(ctx, &nr_events, 0);
- mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
@@ -3190,9 +3206,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
- mutex_unlock(&ctx->uring_lock);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}