aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-01-18 19:23:51 -0700
committerJens Axboe <axboe@kernel.dk>2022-01-18 19:28:12 -0700
commit361aee450c6e36c8dbab712c94a8a7835bd92e25 (patch)
treeed619890204291ca1dab66fe74cade4b2e616e67 /fs/io-wq.c
parentio-wq: perform both unstarted and started work cancelations in one go (diff)
downloadwireguard-linux-361aee450c6e36c8dbab712c94a8a7835bd92e25.tar.xz
wireguard-linux-361aee450c6e36c8dbab712c94a8a7835bd92e25.zip
io-wq: add intermediate work step between pending list and active work
We have a gap where a worker removes an item from the work list and to when it gets added as the workers active work. In this state, the work item cannot be found by cancelations. This is a small window, but it does exist. Add a temporary pointer to a work item that isn't on the pending work list anymore, but also not the active work. This is needed as we need to drop the wqe lock in between grabbing the work item and marking it as active, to ensure that signal based cancelations are properly ordered. Reported-by: Florian Fischer <florian.fl.fischer@fau.de> Link: https://lore.kernel.org/io-uring/20220118151337.fac6cthvbnu7icoc@pasture/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c34
1 files changed, 29 insertions, 5 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index db150186ce94..1efb134c98b7 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -48,6 +48,7 @@ struct io_worker {
struct io_wqe *wqe;
struct io_wq_work *cur_work;
+ struct io_wq_work *next_work;
raw_spinlock_t lock;
struct completion ref_done;
@@ -530,6 +531,7 @@ static void io_assign_current_work(struct io_worker *worker,
raw_spin_lock(&worker->lock);
worker->cur_work = work;
+ worker->next_work = NULL;
raw_spin_unlock(&worker->lock);
}
@@ -554,9 +556,20 @@ get_next:
* clear the stalled flag.
*/
work = io_get_next_work(acct, worker);
- if (work)
+ if (work) {
__io_worker_busy(wqe, worker);
+ /*
+ * Make sure cancelation can find this, even before
+ * it becomes the active work. That avoids a window
+ * where the work has been removed from our general
+ * work list, but isn't yet discoverable as the
+ * current work item for this worker.
+ */
+ raw_spin_lock(&worker->lock);
+ worker->next_work = work;
+ raw_spin_unlock(&worker->lock);
+ }
raw_spin_unlock(&wqe->lock);
if (!work)
break;
@@ -972,6 +985,19 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}
+static bool __io_wq_worker_cancel(struct io_worker *worker,
+ struct io_cb_cancel_data *match,
+ struct io_wq_work *work)
+{
+ if (work && match->fn(work, match->data)) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ set_notify_signal(worker->task);
+ return true;
+ }
+
+ return false;
+}
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
@@ -981,11 +1007,9 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
* may dereference the passed in work.
*/
raw_spin_lock(&worker->lock);
- if (worker->cur_work &&
- match->fn(worker->cur_work, match->data)) {
- set_notify_signal(worker->task);
+ if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
+ __io_wq_worker_cancel(worker, match, worker->next_work))
match->nr_running++;
- }
raw_spin_unlock(&worker->lock);
return match->nr_running && !match->cancel_all;