aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-06-17 10:08:11 -0600
committerJens Axboe <axboe@kernel.dk>2021-06-17 10:08:11 -0600
commit0e03496d1967abf1ebb151a24318c07d07f41f7f (patch)
tree386e2a8d430775ff013242fca4e90eeba6da7728 /fs/io-wq.c
parentio-wq: remove header files not needed anymore (diff)
downloadlinux-dev-0e03496d1967abf1ebb151a24318c07d07f41f7f.tar.xz
linux-dev-0e03496d1967abf1ebb151a24318c07d07f41f7f.zip
io-wq: use private CPU mask
In preparation for allowing user specific CPU masks for IO thread creation, switch to using a mask embedded in the per-node wqe structure. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c50
1 files changed, 43 insertions, 7 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 897b94530b57..2af8e1df4646 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -94,6 +94,8 @@ struct io_wqe {
struct io_wq *wq;
struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
+
+ cpumask_var_t cpu_mask;
};
/*
@@ -638,7 +640,7 @@ fail:
tsk->pf_io_worker = worker;
worker->task = tsk;
- set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node));
+ set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
tsk->flags |= PF_NO_SETAFFINITY;
raw_spin_lock_irq(&wqe->lock);
@@ -922,6 +924,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
if (!wqe)
goto err;
+ if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
+ goto err;
+ cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
wq->wqes[node] = wqe;
wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].index = IO_WQ_ACCT_BOUND;
@@ -947,8 +952,12 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
err:
io_wq_put_hash(data->hash);
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- for_each_node(node)
+ for_each_node(node) {
+ if (!wq->wqes[node])
+ continue;
+ free_cpumask_var(wq->wqes[node]->cpu_mask);
kfree(wq->wqes[node]);
+ }
err_wq:
kfree(wq);
return ERR_PTR(ret);
@@ -1018,6 +1027,7 @@ static void io_wq_destroy(struct io_wq *wq)
.cancel_all = true,
};
io_wqe_cancel_pending_work(wqe, &match);
+ free_cpumask_var(wqe->cpu_mask);
kfree(wqe);
}
io_wq_put_hash(wq->hash);
@@ -1032,31 +1042,57 @@ void io_wq_put_and_exit(struct io_wq *wq)
io_wq_destroy(wq);
}
+struct online_data {
+ unsigned int cpu;
+ bool online;
+};
+
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
{
- set_cpus_allowed_ptr(worker->task, cpumask_of_node(worker->wqe->node));
+ struct online_data *od = data;
+ if (od->online)
+ cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
+ else
+ cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
return false;
}
-static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
+static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
{
- struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
+ struct online_data od = {
+ .cpu = cpu,
+ .online = online
+ };
int i;
rcu_read_lock();
for_each_node(i)
- io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
+ io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
rcu_read_unlock();
return 0;
}
+static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
+
+ return __io_wq_cpu_online(wq, cpu, true);
+}
+
+static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
+
+ return __io_wq_cpu_online(wq, cpu, false);
+}
+
static __init int io_wq_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
- io_wq_cpu_online, NULL);
+ io_wq_cpu_online, io_wq_cpu_offline);
if (ret < 0)
return ret;
io_wq_online = ret;