aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_debugfs.c
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-08-14 16:57:11 -0700
committerMartin K. Petersen <martin.petersen@oracle.com>2019-08-19 22:41:12 -0400
commitc00f62e6c5468ed0673c583f1ff284274e817410 (patch)
treecc3d9054a97252f7fe0358461acb76138c58fbd3 /drivers/scsi/lpfc/lpfc_debugfs.c
parentscsi: lpfc: Add NVMe sequence level error recovery support (diff)
downloadlinux-dev-c00f62e6c5468ed0673c583f1ff284274e817410.tar.xz
linux-dev-c00f62e6c5468ed0673c583f1ff284274e817410.zip
scsi: lpfc: Merge per-protocol WQ/CQ pairs into single per-cpu pair
Currently, each hardware queue, typically allocated per-cpu, consists of a WQ/CQ pair per protocol. Meaning if both SCSI and NVMe are supported 2 WQ/CQ pairs will exist for the hardware queue. Separate queues are unnecessary. The current implementation wastes memory backing the 2nd set of queues, and the use of double the SLI-4 WQ/CQ's means less hardware queues can be supported which means there may not always be enough to have a pair per cpu. If there is only 1 pair per cpu, more cpu's may get their own WQ/CQ. Rework the implementation to use a single WQ/CQ pair by both protocols. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_debugfs.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c92
1 files changed, 11 insertions, 81 deletions
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 75055ee59e91..45f431fbe0d2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue;
pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -3786,23 +3782,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
- if (qp->assoc_qid != cq_id)
- continue;
- *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
- if (*len >= max_cnt)
- return 1;
- }
- }
return 0;
}
@@ -3868,9 +3854,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp;
int rc;
- qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[eqidx].io_cq;
- *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+ *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
@@ -3878,28 +3864,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt)
return 1;
- rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
-
- *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
-
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
-
- if (*len >= max_cnt)
- return 1;
-
- rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
- max_cnt, qp->queue_id);
- if (rc)
- return 1;
- }
-
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4317,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4329,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
- /* NVME complete queue */
- if (phba->sli4_hba.hdwq) {
- qidx = 0;
- do {
- qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- } while (++qidx < phba->cfg_hdw_queue);
- }
goto error_out;
break;
case LPFC_IDIAG_MQ:
@@ -4419,20 +4372,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- }
- /* NVME work queue */
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -6442,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_wq(phba, DUMP_IO, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6394,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_cq(phba, DUMP_IO, idx);
/*
* Dump Event Queues (EQs)