aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/core.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2022-03-21 13:57:27 +0200
committerChristoph Hellwig <hch@lst.de>2022-03-29 09:29:04 +0200
commit8832cf922151e9dfa2821736beb0ae2dd3968b6e (patch)
treee73b437a77400681d0c2bb1e94a52f2ddfcdb34a /drivers/nvme/target/core.c
parentnvme-pci: add quirks for Samsung X5 SSDs (diff)
downloadlinux-dev-8832cf922151e9dfa2821736beb0ae2dd3968b6e.tar.xz
linux-dev-8832cf922151e9dfa2821736beb0ae2dd3968b6e.zip
nvmet: use a private workqueue instead of the system workqueue
Any attempt to flush kernel-global WQs has possibility of deadlock so we should simply stop using them, instead introduce nvmet_wq which is the generic nvmet workqueue for work elements that don't explicitly require a dedicated workqueue (by the mere fact that they are using the system_wq). Changes were done using the following replaces: - s/schedule_work(/queue_work(nvmet_wq, /g - s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g - s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/target/core.c')
-rw-r--r--drivers/nvme/target/core.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 92af37d33ec3..cd833b1dd47c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
@@ -1620,9 +1623,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
}
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq) {
+ error = -ENOMEM;
+ goto out_free_buffered_work_queue;
+ }
+
error = nvmet_init_discovery();
if (error)
- goto out_free_work_queue;
+ goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1631,7 +1640,9 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
@@ -1643,6 +1654,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);