aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Wagner <dwagner@suse.de>2022-08-29 11:28:41 +0200
committerChristoph Hellwig <hch@lst.de>2022-09-19 17:55:21 +0200
commit1c467e259599864ec925d5b85066a0960320fb3c (patch)
tree12d9440ae66662a6a82091cad0a528329fd744a0
parentnvme-tcp: handle number of queue changes (diff)
downloadlinux-dev-1c467e259599864ec925d5b85066a0960320fb3c.tar.xz
linux-dev-1c467e259599864ec925d5b85066a0960320fb3c.zip
nvme-rdma: handle number of queue changes
On reconnect, the number of queues might have changed. In the case where we have more queues available than previously we try to access queues which are not initialized yet. The other case where we have less queues than previously, the connection attempt will fail because the target doesn't support the old number of queues and we end up in a reconnect loop. Thus, only start queues which are currently present in the tagset limited by the number of available queues. Then we update the tagset and we can start any new queue. Signed-off-by: Daniel Wagner <dwagner@suse.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/host/rdma.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index ba08851e42c3..4c6df34f9d7a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -696,11 +696,12 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
return ret;
}
-static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
+static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
+ int first, int last)
{
int i, ret = 0;
- for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_rdma_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -709,7 +710,7 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_rdma_stop_queue(&ctrl->queues[i]);
return ret;
}
@@ -964,7 +965,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_rdma_alloc_io_queues(ctrl);
if (ret)
@@ -980,7 +981,13 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_tag_set;
}
- ret = nvme_rdma_start_io_queues(ctrl);
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
+ ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
if (ret)
goto out_cleanup_connect_q;
@@ -1000,6 +1007,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_unfreeze(&ctrl->ctrl);
}
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
+ ctrl->tag_set.nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
return 0;
out_wait_freeze_timed_out: