aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mpi3mr/mpi3mr_fw.c
diff options
context:
space:
mode:
authorKashyap Desai <kashyap.desai@broadcom.com>2021-05-20 20:55:38 +0530
committerMartin K. Petersen <martin.petersen@oracle.com>2021-06-02 00:56:17 -0400
commit463429f8dd5c94aae4b0948c56e67eb21cb6964e (patch)
tree18eafe7005302ff720737e0e37a7fd1f1b542738 /drivers/scsi/mpi3mr/mpi3mr_fw.c
parentscsi: mpi3mr: Hardware workaround for UNMAP commands to NVMe drives (diff)
downloadlinux-dev-463429f8dd5c94aae4b0948c56e67eb21cb6964e.tar.xz
linux-dev-463429f8dd5c94aae4b0948c56e67eb21cb6964e.zip
scsi: mpi3mr: Add support for threaded ISR
Register driver for threaded interrupts. By default the driver will attempt I/O completion from interrupt context (primary handler). Since the driver tracks per reply queue outstanding I/Os, it will schedule threaded ISR if there are any outstanding I/Os expected on that particular reply queue. Threaded ISR (secondary handler) will loop for I/O completion as long as there are outstanding I/Os (speculative method using same per reply queue outstanding counter) or it has completed some X amount of commands (something like budget). Link: https://lore.kernel.org/r/20210520152545.2710479-18-kashyap.desai@broadcom.com Cc: sathya.prakash@broadcom.com Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Tomas Henzl <thenzl@redhat.com> Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com> Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/mpi3mr/mpi3mr_fw.c')
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c79
1 files changed, 76 insertions, 3 deletions
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0418cb221262..6a7891de3f01 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -345,12 +345,16 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
reply_qidx = op_reply_q->qid - 1;
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
exp_phase = op_reply_q->ephase;
reply_ci = op_reply_q->ci;
reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
return 0;
}
@@ -361,6 +365,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -375,6 +380,14 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ intr_info->op_reply_q->enable_irq_poll = true;
+ break;
+ }
} while (1);
@@ -383,6 +396,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase;
+ atomic_dec(&op_reply_q->in_use);
return num_op_reply;
}
@@ -391,7 +405,7 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
struct mpi3mr_intr_info *intr_info = privdata;
struct mpi3mr_ioc *mrioc;
u16 midx;
- u32 num_admin_replies = 0;
+ u32 num_admin_replies = 0, num_op_reply = 0;
if (!intr_info)
return IRQ_NONE;
@@ -405,8 +419,10 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
- if (num_admin_replies)
+ if (num_admin_replies || num_op_reply)
return IRQ_HANDLED;
else
return IRQ_NONE;
@@ -415,15 +431,32 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
static irqreturn_t mpi3mr_isr(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
int ret;
if (!intr_info)
return IRQ_NONE;
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
/* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata);
- return ret;
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_WAKE_THREAD;
}
/**
@@ -438,6 +471,36 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
*/
static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled)
+ break;
+
+ if (!midx)
+ mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(pci_irq_vector(mrioc->pdev, midx));
+
return IRQ_HANDLED;
}
@@ -1147,6 +1210,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -1465,6 +1531,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0;
op_req_q->pi = pi;
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+
writel(op_req_q->pi,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
@@ -2795,6 +2865,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
u32 ioc_status, ioc_config, i;
struct mpi3_ioc_facts_data facts_data;
+ mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
mrioc->change_count = 0;
if (!re_init) {
mrioc->cpu_count = num_online_cpus();
@@ -3081,6 +3152,8 @@ static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
mrioc->op_reply_qinfo[i].ci = 0;
mrioc->op_reply_qinfo[i].num_replies = 0;
mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
mpi3mr_memset_op_reply_q_buffers(mrioc, i);
mrioc->req_qinfo[i].ci = 0;