aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h31
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c128
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c144
4 files changed, 281 insertions, 23 deletions
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
index 3b960893870f..50a65b16a818 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
@@ -9,6 +9,7 @@
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
+#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0d72b5f1b69d..ab36aa2dfdc4 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.12.0.3.50"
-#define MPI3MR_DRIVER_RELDATE "11-November-2024"
+#define MPI3MR_DRIVER_VERSION "8.12.1.0.50"
+#define MPI3MR_DRIVER_RELDATE "28-January-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -80,13 +80,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
-#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -328,6 +329,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -387,6 +389,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
+ bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -456,6 +459,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
+ * @qfull_watermark: Watermark defined in reply queue to avoid
+ * reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -471,6 +476,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
+ u16 qfull_watermark;
};
/**
@@ -928,6 +934,8 @@ struct trigger_event_data {
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
+ * @is_segmented: The buffer is segmented or not
+ * @disabled_after_reset: The buffer is disabled after reset
*/
struct diag_buffer_desc {
u8 type;
@@ -937,6 +945,8 @@ struct diag_buffer_desc {
u32 size;
void *addr;
dma_addr_t dma_addr;
+ bool is_segmented;
+ bool disabled_after_reset;
};
/**
@@ -1090,6 +1100,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1153,6 +1164,12 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
+ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
+ * @prevent_reply_qfull: Enable reply queue prevention
+ * @seg_tb_support: Segmented trace buffer support
+ * @num_tb_segs: Number of Segments in Trace buffer
+ * @trace_buf_pool: DMA pool for Segmented trace buffer segments
+ * @trace_buf: Trace buffer segments memory descriptor
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1276,6 +1293,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
+ u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1351,6 +1369,13 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
+ atomic_t reply_qfull_count;
+ bool prevent_reply_qfull;
+ bool seg_tb_support;
+ u32 num_tb_segs;
+ struct dma_pool *trace_buf_pool;
+ struct segments *trace_buf;
+
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 7589f48aebc8..26582776749f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -12,23 +12,98 @@
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
/**
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
* @mrioc: Adapter instance reference
* @trace_size: Trace buffer size
*
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
{
struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+ int i, sz;
+ u64 *diag_buffer_list = NULL;
+ dma_addr_t diag_buffer_list_dma;
+ u32 seg_count;
+
+ if (mrioc->seg_tb_support) {
+ seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+ diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * seg_count,
+ &diag_buffer_list_dma, GFP_KERNEL);
+ if (!diag_buffer_list)
+ return -1;
+
+ mrioc->num_tb_segs = seg_count;
+
+ sz = sizeof(struct segments) * seg_count;
+ mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->trace_buf)
+ goto trace_buf_failed;
+
+ mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+ &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+ 0);
+ if (!mrioc->trace_buf_pool) {
+ ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+ goto trace_buf_pool_failed;
+ }
+
+ for (i = 0; i < seg_count; i++) {
+ mrioc->trace_buf[i].segment =
+ dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+ &mrioc->trace_buf[i].segment_dma);
+ diag_buffer_list[i] =
+ (u64) mrioc->trace_buf[i].segment_dma;
+ if (!diag_buffer_list[i])
+ goto tb_seg_alloc_failed;
+ }
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
- if (diag_buffer->addr) {
- dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ diag_buffer->addr = diag_buffer_list;
+ diag_buffer->dma_addr = diag_buffer_list_dma;
+ diag_buffer->is_segmented = true;
+
+ dprint_init(mrioc, "segmented trace diag buffer\n"
+ "is allocated successfully seg_count:%d\n", seg_count);
return 0;
+ } else {
+ diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (diag_buffer->addr) {
+ dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ return 0;
+ }
+ return -1;
}
+
+tb_seg_alloc_failed:
+ if (mrioc->trace_buf_pool) {
+ for (i = 0; i < mrioc->num_tb_segs; i++) {
+ if (mrioc->trace_buf[i].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[i].segment,
+ mrioc->trace_buf[i].segment_dma);
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+trace_buf_pool_failed:
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+trace_buf_failed:
+ if (diag_buffer_list)
+ dma_free_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * mrioc->num_tb_segs,
+ diag_buffer_list, diag_buffer_list_dma);
return -1;
}
@@ -100,8 +175,9 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
u8 prev_status;
int retval = 0;
+ if (diag_buffer->disabled_after_reset) {
+ dprint_bsg_err(mrioc, "%s: skiping diag buffer posting\n"
+ "as it is disabled after reset\n", __func__);
+ return -1;
+ }
+
memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
- dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
- diag_buffer->type);
+ if (diag_buffer->is_segmented)
+ diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+ dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+ diag_buffer->type, diag_buffer->is_segmented);
+
prev_status = diag_buffer->status;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
init_completion(&mrioc->init_cmds.done);
@@ -3061,6 +3147,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
+ * reply_qfull_count_show - Show reply qfull count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
+ * formats it as a string for display.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
+}
+
+static DEVICE_ATTR_RO(reply_qfull_count);
+
+/**
* logging_level_show - Show controller debug level
* @dev: class device
* @attr: Device attributes
@@ -3152,6 +3261,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
+ &dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5ed31fe57474..7796fdce03c8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
-
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -459,7 +459,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +554,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -1302,7 +1302,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
retval = 0;
- ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
return retval;
}
@@ -1355,6 +1355,19 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
+ if (mrioc->seg_tb_support) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+ ioc_err(mrioc,
+ "critical error: previously enabled segmented trace\n"
+ " buffer capability is disabled after reset. Please\n"
+ " update the firmware or reboot the system or\n"
+ " reload the driver to enable trace diag buffer\n");
+ mrioc->diag_buffers[0].disabled_after_reset = true;
+ } else
+ mrioc->diag_buffers[0].disabled_after_reset = false;
+ }
+
if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
GFP_KERNEL);
@@ -1717,7 +1730,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
(!retval)?"successful":"failed", ioc_status,
ioc_config);
if (retval)
@@ -2104,15 +2117,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
- !mrioc->pdev->revision)
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+
+ if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
+ if (mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ } else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
+
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
+ op_reply_q->qfull_watermark =
+ op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2436,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
+ struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
+ op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2470,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
+ /* Reply queue is nearing to get full, push back IOs to SML */
+ if ((mrioc->prevent_reply_qfull == true) &&
+ (atomic_read(&op_reply_q->pend_ios) >
+ (op_reply_q->qfull_watermark))) {
+ atomic_inc(&mrioc->reply_qfull_count);
+ retval = -EAGAIN;
+ goto out;
+ }
+
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -3091,6 +3122,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4248,13 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
+ if (mrioc->facts.max_req_limit)
+ mrioc->prevent_reply_qfull = true;
+
+ if (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+ mrioc->seg_tb_support = true;
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4411,7 @@ retry_init:
goto out_failed_noretry;
}
+ mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -4671,7 +4713,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*/
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
- u16 i;
+ u16 i, j;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
@@ -4806,6 +4848,26 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
+ if ((i == 0) && mrioc->seg_tb_support) {
+ if (mrioc->trace_buf_pool) {
+ for (j = 0; j < mrioc->num_tb_segs; j++) {
+ if (mrioc->trace_buf[j].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[j].segment,
+ mrioc->trace_buf[j].segment_dma);
+ mrioc->trace_buf[j].segment = NULL;
+ }
+
+ mrioc->trace_buf[j].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+ diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+ }
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
@@ -4883,7 +4945,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
}
ioc_info(mrioc,
- "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status,
ioc_config);
}
@@ -5229,6 +5291,55 @@ cleanup_drv_cmd:
}
/**
+ * mpi3mr_check_op_admin_proc -
+ * @mrioc: Adapter instance reference
+ *
+ * Check if any of the operation reply queues
+ * or the admin reply queue are currently in use.
+ * If any queue is in use, this function waits for
+ * a maximum of 10 seconds for them to become available.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
+{
+
+ u16 timeout = 10 * 10;
+ u16 elapsed_time = 0;
+ bool op_admin_in_use = false;
+
+ do {
+ op_admin_in_use = false;
+
+ /* Check admin_reply queue first to exit early */
+ if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
+ op_admin_in_use = true;
+ else {
+ /* Check op_reply queues */
+ int i;
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
+ op_admin_in_use = true;
+ break;
+ }
+ }
+ }
+
+ if (!op_admin_in_use)
+ break;
+
+ msleep(100);
+
+ } while (++elapsed_time < timeout);
+
+ if (op_admin_in_use)
+ return 1;
+
+ return 0;
+}
+
+/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
* @reset_reason: Reset reason code
@@ -5308,6 +5419,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
+ mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5447,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+
+ retval = mpi3mr_check_op_admin_proc(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
+ "thread still processing replies even after a 10 second\n"
+ "timeout. Marking the controller as unrecoverable!\n");
+
+ goto out;
+ }
+
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,