From f64dad2628bdf62eac7ac145a6e31430376b65e4 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 14 Dec 2016 18:45:58 -0800 Subject: scsi: storvsc: Enable tracking of queue depth Enable tracking of queue depth. Signed-off-by: K. Y. Srinivasan Reviewed-by: Long Li Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 05526b71541b..ccb2101c6385 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1550,6 +1550,7 @@ static struct scsi_host_template scsi_driver = { /* Make sure we dont get a sg segment crosses a page boundary */ .dma_boundary = PAGE_SIZE-1, .no_write_same = 1, + .track_queue_depth = 1, }; enum { -- cgit v1.2.3-59-g8ed1b From 977965283526dd2e887331365da19b05c909a966 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 14 Dec 2016 18:45:59 -0800 Subject: scsi: storvsc: Remove the restriction on max segment size Remove the artificially imposed restriction on max segment size. Signed-off-by: K. Y. Srinivasan Reviewed-by: Long Li Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ccb2101c6385..3b1c2f6528b4 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1267,8 +1267,6 @@ static int storvsc_do_io(struct hv_device *device, static int storvsc_device_configure(struct scsi_device *sdevice) { - blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); - blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); -- cgit v1.2.3-59-g8ed1b From d86adf482b843b3a58a9ec3b7c1ccdbf7c705db1 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 14 Dec 2016 18:46:00 -0800 Subject: scsi: storvsc: Enable multi-queue support Enable multi-q support. We will allocate the outgoing channel using the following policy: 1. We will make every effort to pick a channel that is in the same NUMA node that is initiating the I/O 2. The mapping between the guest CPU and the outgoing channel is persistent. Signed-off-by: K. Y. Srinivasan Reviewed-by: Long Li Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 113 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 110 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3b1c2f6528b4..63f6b1ad57c5 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -458,6 +458,15 @@ struct storvsc_device { * Max I/O, the device can support. */ u32 max_transfer_bytes; + /* + * Number of sub-channels we will open. + */ + u16 num_sc; + struct vmbus_channel **stor_chns; + /* + * Mask of CPUs bound to subchannels. + */ + struct cpumask alloced_cpus; /* Used for vsc/vsp channel reset process */ struct storvsc_cmd_request init_request; struct storvsc_cmd_request reset_request; @@ -635,6 +644,11 @@ static void handle_sc_creation(struct vmbus_channel *new_sc) (void *)&props, sizeof(struct vmstorage_channel_properties), storvsc_on_channel_callback, new_sc); + + if (new_sc->state == CHANNEL_OPENED_STATE) { + stor_device->stor_chns[new_sc->target_cpu] = new_sc; + cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); + } } static void handle_multichannel_storage(struct hv_device *device, int max_chns) @@ -651,6 +665,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) if (!stor_device) return; + stor_device->num_sc = num_sc; request = &stor_device->init_request; vstor_packet = &request->vstor_packet; @@ -838,6 +853,25 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) * support multi-channel. */ max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; + + /* + * Allocate state to manage the sub-channels. + * We allocate an array based on the numbers of possible CPUs + * (Hyper-V does not support cpu online/offline). + * This Array will be sparseley populated with unique + * channels - primary + sub-channels. + * We will however populate all the slots to evenly distribute + * the load. + */ + stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(), + GFP_KERNEL); + if (stor_device->stor_chns == NULL) + return -ENOMEM; + + stor_device->stor_chns[device->channel->target_cpu] = device->channel; + cpumask_set_cpu(device->channel->target_cpu, + &stor_device->alloced_cpus); + if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) { if (vstor_packet->storage_channel_properties.flags & STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) @@ -1198,17 +1232,64 @@ static int storvsc_dev_remove(struct hv_device *device) /* Close the channel */ vmbus_close(device->channel); + kfree(stor_device->stor_chns); kfree(stor_device); return 0; } +static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, + u16 q_num) +{ + u16 slot = 0; + u16 hash_qnum; + struct cpumask alloced_mask; + int num_channels, tgt_cpu; + + if (stor_device->num_sc == 0) + return stor_device->device->channel; + + /* + * Our channel array is sparsley populated and we + * initiated I/O on a processor/hw-q that does not + * currently have a designated channel. Fix this. + * The strategy is simple: + * I. Ensure NUMA locality + * II. Distribute evenly (best effort) + * III. Mapping is persistent. + */ + + cpumask_and(&alloced_mask, &stor_device->alloced_cpus, + cpumask_of_node(cpu_to_node(q_num))); + + num_channels = cpumask_weight(&alloced_mask); + if (num_channels == 0) + return stor_device->device->channel; + + hash_qnum = q_num; + while (hash_qnum >= num_channels) + hash_qnum -= num_channels; + + for_each_cpu(tgt_cpu, &alloced_mask) { + if (slot == hash_qnum) + break; + slot++; + } + + stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu]; + + return stor_device->stor_chns[q_num]; +} + + static int storvsc_do_io(struct hv_device *device, - struct storvsc_cmd_request *request) + struct storvsc_cmd_request *request, u16 q_num) { struct storvsc_device *stor_device; struct vstor_packet *vstor_packet; struct vmbus_channel *outgoing_channel; int ret = 0; + struct cpumask alloced_mask; + int tgt_cpu; vstor_packet = &request->vstor_packet; stor_device = get_out_stor_device(device); @@ -1222,7 +1303,26 @@ static int storvsc_do_io(struct hv_device *device, * Select an an appropriate channel to send the request out. */ - outgoing_channel = vmbus_get_outgoing_channel(device->channel); + if (stor_device->stor_chns[q_num] != NULL) { + outgoing_channel = stor_device->stor_chns[q_num]; + if (outgoing_channel->target_cpu == smp_processor_id()) { + /* + * Ideally, we want to pick a different channel if + * available on the same NUMA node. + */ + cpumask_and(&alloced_mask, &stor_device->alloced_cpus, + cpumask_of_node(cpu_to_node(q_num))); + for_each_cpu(tgt_cpu, &alloced_mask) { + if (tgt_cpu != outgoing_channel->target_cpu) { + outgoing_channel = + stor_device->stor_chns[tgt_cpu]; + break; + } + } + } + } else { + outgoing_channel = get_og_chn(stor_device, q_num); + } vstor_packet->flags |= REQUEST_COMPLETION_FLAG; @@ -1522,7 +1622,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) cmd_request->payload_sz = payload_sz; /* Invokes the vsc to start an IO */ - ret = storvsc_do_io(dev, cmd_request); + ret = storvsc_do_io(dev, cmd_request, get_cpu()); + put_cpu(); if (ret == -EAGAIN) { /* no more space */ @@ -1679,6 +1780,11 @@ static int storvsc_probe(struct hv_device *device, * from the host. */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); + /* + * Set the number of HW queues we are supporting. + */ + if (stor_device->num_sc != 0) + host->nr_hw_queues = stor_device->num_sc + 1; /* Register the HBA and start the scsi bus scan */ ret = scsi_add_host(host, &device->device); @@ -1715,6 +1821,7 @@ err_out2: goto err_out0; err_out1: + kfree(stor_device->stor_chns); kfree(stor_device); err_out0: -- cgit v1.2.3-59-g8ed1b From 3cd6d3d9b1abab8dcdf0800224ce26daac24eea2 Mon Sep 17 00:00:00 2001 From: Long Li Date: Wed, 14 Dec 2016 18:46:01 -0800 Subject: scsi: storvsc: use tagged SRB requests if supported by the device Properly set SRB flags when hosting device supports tagged queuing. This patch improves the performance on Fiber Channel disks. Signed-off-by: Long Li Reviewed-by: K. Y. Srinivasan Signed-off-by: K. Y. Srinivasan Cc: Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 63f6b1ad57c5..320938720544 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -136,6 +136,8 @@ struct hv_fc_wwn_packet { #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 +#define SP_UNTAGGED ((unsigned char) ~0) +#define SRB_SIMPLE_TAG_REQUEST 0x20 /* * Platform neutral description of a scsi request - @@ -1549,6 +1551,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DISABLE_SYNCH_TRANSFER; + if (scmnd->device->tagged_supported) { + vm_srb->win8_extension.srb_flags |= + (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); + vm_srb->win8_extension.queue_tag = SP_UNTAGGED; + vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST; + } + /* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE: -- cgit v1.2.3-59-g8ed1b From bba5dc332ec2d3a685cb4dae668c793f6a3713a3 Mon Sep 17 00:00:00 2001 From: Long Li Date: Wed, 14 Dec 2016 18:46:02 -0800 Subject: scsi: storvsc: properly handle SRB_ERROR when sense message is present When sense message is present on error, we should pass along to the upper layer to decide how to deal with the error. This patch fixes connectivity issues with Fiber Channel devices. Signed-off-by: Long Li Reviewed-by: K. Y. Srinivasan Signed-off-by: K. Y. Srinivasan Cc: Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 320938720544..3c92dc2a25a0 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -924,6 +924,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, switch (SRB_STATUS(vm_srb->srb_status)) { case SRB_STATUS_ERROR: + /* + * Let upper layer deal with error when + * sense message is present. + */ + + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) + break; /* * If there is an error; offline the device since all * error recovery strategies would have already been -- cgit v1.2.3-59-g8ed1b From 40630f462824ee24bc00d692865c86c3828094e0 Mon Sep 17 00:00:00 2001 From: Long Li Date: Wed, 14 Dec 2016 18:46:03 -0800 Subject: scsi: storvsc: properly set residual data length on errors On I/O errors, the Windows driver doesn't set data_transfer_length on error conditions other than SRB_STATUS_DATA_OVERRUN. In these cases we need to set data_transfer_length to 0, indicating there is no data transferred. On SRB_STATUS_DATA_OVERRUN, data_transfer_length is set by the Windows driver to the actual data transferred. Reported-by: Shiva Krishna Signed-off-by: Long Li Reviewed-by: K. Y. Srinivasan Signed-off-by: K. Y. Srinivasan Cc: Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3c92dc2a25a0..888e16ec8554 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -377,6 +377,7 @@ enum storvsc_request_type { #define SRB_STATUS_SUCCESS 0x01 #define SRB_STATUS_ABORTED 0x02 #define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_DATA_OVERRUN 0x12 #define SRB_STATUS(status) \ (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) @@ -996,6 +997,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, struct scsi_cmnd *scmnd = cmd_request->cmd; struct scsi_sense_hdr sense_hdr; struct vmscsi_request *vm_srb; + u32 data_transfer_length; struct Scsi_Host *host; u32 payload_sz = cmd_request->payload_sz; void *payload = cmd_request->payload; @@ -1003,6 +1005,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, host = stor_dev->host; vm_srb = &cmd_request->vstor_packet.vm_srb; + data_transfer_length = vm_srb->data_transfer_length; scmnd->result = vm_srb->scsi_status; @@ -1016,13 +1019,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, &sense_hdr); } - if (vm_srb->srb_status != SRB_STATUS_SUCCESS) + if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, sense_hdr.ascq); + /* + * The Windows driver set data_transfer_length on + * SRB_STATUS_DATA_OVERRUN. On other errors, this value + * is untouched. In these cases we set it to 0. + */ + if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) + data_transfer_length = 0; + } scsi_set_resid(scmnd, - cmd_request->payload->range.len - - vm_srb->data_transfer_length); + cmd_request->payload->range.len - data_transfer_length); scmnd->scsi_done(scmnd); -- cgit v1.2.3-59-g8ed1b From e0165f20447c8ca1d367725ee94d8ec9f38ca275 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:20 -0800 Subject: scsi: lpfc: Clear the VendorVersion in the PLOGI/PLOGI ACC payload Clear the VendorVersion in the PLOGI/PLOGI ACC payload Vendor version info may have been set on fabric login. Before sending PLOGI payloads, ensure that it's cleared. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_els.c | 6 ++++++ drivers/scsi/lpfc/lpfc_hw.h | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 236e4e51d161..27f0cbb9b278 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1999,6 +1999,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); @@ -3988,6 +3991,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } else { memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 822654322e67..3b970d370600 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -360,6 +360,12 @@ struct csp { * Word 1 Bit 30 in PLOGI request is random offset */ #define virtual_fabric_support randomOffset /* Word 1, bit 30 */ +/* + * Word 1 Bit 29 in common service parameter is overloaded. + * Word 1 Bit 29 in FLOGI response is multiple NPort assignment + * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level + */ +#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ -- cgit v1.2.3-59-g8ed1b From b2fd103b05feabbc86db726ae121da94639892d7 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:21 -0800 Subject: scsi: lpfc: Correct error in setting OS Driver Version with FW Correct error in setting OS Driver Version with FW. Prior length was too short. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4faa7672fc1d..f895cb9c89a3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6306,7 +6306,8 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) LPFC_SLI4_MBX_EMBED); mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; - mbox->u.mqe.un.set_host_data.param_len = 8; + mbox->u.mqe.un.set_host_data.param_len = + LPFC_HOST_OS_DRIVER_VERSION_SIZE; snprintf(mbox->u.mqe.un.set_host_data.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, -- cgit v1.2.3-59-g8ed1b From 8c6a6f4076984ab15c33c2a52a10b681359f5a2a Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:22 -0800 Subject: scsi: lpfc: Deprecate lpfc_soft_wwn parameter Deprecate lpfc_soft_wwn parameter. No longer allow override of hw-assigned wwns Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 4 - drivers/scsi/lpfc/lpfc_attr.c | 216 ------------------------------------------ drivers/scsi/lpfc/lpfc_init.c | 16 +--- 3 files changed, 3 insertions(+), 233 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8a20b4e86224..341b656e9d9d 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -729,8 +729,6 @@ struct lpfc_hba { uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; - uint64_t cfg_soft_wwnn; - uint64_t cfg_soft_wwpn; uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; @@ -835,8 +833,6 @@ struct lpfc_hba { #define VPD_PORT 0x8 /* valid vpd port data */ #define VPD_MASK 0xf /* mask for any vpd data */ - uint8_t soft_wwn_enable; - struct timer_list fcp_poll_timer; struct timer_list eratt_poll; uint32_t eratt_poll_interval; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c84775562c65..98c40a734a36 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1987,7 +1987,6 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); -static char *lpfc_soft_wwn_key = "C99G71SL8032A"; #define WWN_SZ 8 /** * lpfc_wwn_set - Convert string to the 8 byte WWN value. @@ -2031,216 +2030,6 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) } return 0; } -/** - * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: containing the string lpfc_soft_wwn_key. - * @count: must be size of lpfc_soft_wwn_key. - * - * Returns: - * -EINVAL if the buffer does not contain lpfc_soft_wwn_key - * length of buf indicates success - **/ -static ssize_t -lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - unsigned int cnt = count; - - /* - * We're doing a simple sanity check for soft_wwpn setting. - * We require that the user write a specific key to enable - * the soft_wwpn attribute to be settable. Once the attribute - * is written, the enable key resets. If further updates are - * desired, the key must be written again to re-enable the - * attribute. - * - * The "key" is not secret - it is a hardcoded string shown - * here. The intent is to protect against the random user or - * application that is just writing attributes. - */ - - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if ((cnt != strlen(lpfc_soft_wwn_key)) || - (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) - return -EINVAL; - - phba->soft_wwn_enable = 1; - return count; -} -static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, - lpfc_soft_wwn_enable_store); - -/** - * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: on return contains the wwpn in hexadecimal. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - - return snprintf(buf, PAGE_SIZE, "0x%llx\n", - (unsigned long long)phba->cfg_soft_wwpn); -} - -/** - * lpfc_soft_wwpn_store - Set the ww port name of the adapter - * @dev class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: contains the wwpn in hexadecimal. - * @count: number of wwpn bytes in buf - * - * Returns: - * -EACCES hba reset not enabled, adapter over temp - * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid - * -EIO error taking adapter offline or online - * value of count on success - **/ -static ssize_t -lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - struct completion online_compl; - int stat1 = 0, stat2 = 0; - unsigned int cnt = count; - u8 wwpn[WWN_SZ]; - int rc; - - if (!phba->cfg_enable_hba_reset) - return -EACCES; - spin_lock_irq(&phba->hbalock); - if (phba->over_temp_state == HBA_OVER_TEMP) { - spin_unlock_irq(&phba->hbalock); - return -EACCES; - } - spin_unlock_irq(&phba->hbalock); - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if (!phba->soft_wwn_enable) - return -EINVAL; - - /* lock setting wwpn, wwnn down */ - phba->soft_wwn_enable = 0; - - rc = lpfc_wwn_set(buf, cnt, wwpn); - if (!rc) { - /* not able to set wwpn, unlock it */ - phba->soft_wwn_enable = 1; - return rc; - } - - phba->cfg_soft_wwpn = wwn_to_u64(wwpn); - fc_host_port_name(shost) = phba->cfg_soft_wwpn; - if (phba->cfg_soft_wwnn) - fc_host_node_name(shost) = phba->cfg_soft_wwnn; - - dev_printk(KERN_NOTICE, &phba->pcidev->dev, - "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); - - stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (stat1) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0463 lpfc_soft_wwpn attribute set failed to " - "reinit adapter - %d\n", stat1); - init_completion(&online_compl); - rc = lpfc_workq_post_event(phba, &stat2, &online_compl, - LPFC_EVT_ONLINE); - if (rc == 0) - return -ENOMEM; - - wait_for_completion(&online_compl); - if (stat2) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0464 lpfc_soft_wwpn attribute set failed to " - "reinit adapter - %d\n", stat2); - return (stat1 || stat2) ? -EIO : count; -} -static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR, - lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); - -/** - * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: on return contains the wwnn in hexadecimal. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "0x%llx\n", - (unsigned long long)phba->cfg_soft_wwnn); -} - -/** - * lpfc_soft_wwnn_store - sets the ww node name of the adapter - * @cdev: class device that is converted into a Scsi_host. - * @buf: contains the ww node name in hexadecimal. - * @count: number of wwnn bytes in buf. - * - * Returns: - * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid - * value of count on success - **/ -static ssize_t -lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - unsigned int cnt = count; - u8 wwnn[WWN_SZ]; - int rc; - - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if (!phba->soft_wwn_enable) - return -EINVAL; - - rc = lpfc_wwn_set(buf, cnt, wwnn); - if (!rc) { - /* Allow wwnn to be set many times, as long as the enable - * is set. However, once the wwpn is set, everything locks. - */ - return rc; - } - - phba->cfg_soft_wwnn = wwn_to_u64(wwnn); - - dev_printk(KERN_NOTICE, &phba->pcidev->dev, - "lpfc%d: soft_wwnn set. Value will take effect upon " - "setting of the soft_wwpn\n", phba->brd_no); - - return count; -} -static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR, - lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); /** * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for @@ -4750,9 +4539,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_enable_bg, - &dev_attr_lpfc_soft_wwnn, - &dev_attr_lpfc_soft_wwpn, - &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_EnableXLane, @@ -5765,8 +5551,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) else phba->cfg_poll = lpfc_poll; - phba->cfg_soft_wwnn = 0L; - phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 4776fd85514f..43d2b06bdc82 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -319,36 +319,26 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /** * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, - * cfg_soft_wwnn, cfg_soft_wwpn * @vport: pointer to lpfc vport data structure. * - * * Return codes * None. **/ void lpfc_update_vport_wwn(struct lpfc_vport *vport) { - /* If the soft name exists then update it using the service params */ - if (vport->phba->cfg_soft_wwnn) - u64_to_wwn(vport->phba->cfg_soft_wwnn, - vport->fc_sparam.nodeName.u.wwn); - if (vport->phba->cfg_soft_wwpn) - u64_to_wwn(vport->phba->cfg_soft_wwpn, - vport->fc_sparam.portName.u.wwn); - /* - * If the name is empty or there exists a soft name + * If the name is empty * then copy the service params name, otherwise use the fc name */ - if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) + if (vport->fc_nodename.u.wwn[0] == 0) memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); - if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) + if (vport->fc_portname.u.wwn[0] == 0) memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); else -- cgit v1.2.3-59-g8ed1b From e6c6acc0e0223ddaf867628d420ee196349c6fae Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:23 -0800 Subject: scsi: lpfc: Correct issue leading to oops during link reset Correct issue leading to oops during link reset. Missing vport pointer. [mkp: fixed typo] Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f895cb9c89a3..6b75a8fced33 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10029,6 +10029,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; + abtsiocbp->vport = vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort xri x%x, original iotag x%x, " -- cgit v1.2.3-59-g8ed1b From 6c9231f604c2575be24c96d38deb70f145172f92 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:24 -0800 Subject: scsi: lpfc: Correct host name in symbolic_name field Correct host name in symbolic_name field of nameserver registrations Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 98c40a734a36..f3250ad6b3eb 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4846,6 +4846,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) * Dynamic FC Host Attributes Support */ +/** + * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_symbolic_name(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); +} + /** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. @@ -5383,6 +5396,8 @@ struct fc_function_template lpfc_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ @@ -5450,6 +5465,8 @@ struct fc_function_template lpfc_vport_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ -- cgit v1.2.3-59-g8ed1b From 104450eb08ca662e6b1d02da11aca9598e978f3e Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:25 -0800 Subject: scsi: lpfc: FCoE VPort enable-disable does not bring up the VPort FCoE VPort enable-disable does not bring up the VPort. VPI structure needed to be initialized before being re-registered. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_vport.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index c27f4b724547..e18bbc66e83b 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -537,6 +537,12 @@ enable_vport(struct fc_vport *fc_vport) spin_lock_irq(shost->host_lock); vport->load_flag |= FC_LOADING; + if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + spin_unlock_irq(shost->host_lock); + lpfc_issue_init_vpi(vport); + goto out; + } + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); @@ -557,6 +563,8 @@ enable_vport(struct fc_vport *fc_vport) } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } + +out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1827 Vport Enabled.\n"); return VPORT_OK; -- cgit v1.2.3-59-g8ed1b From b5749fe182a0fa1c39ea1b33c0691d436f269195 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:26 -0800 Subject: scsi: lpfc: Fix Xlane dynamic LUN set for LUN priority. Fix Xlane dynamic LUN set for LUN priority. Dynamic changing of the priority was not getting reflected on the LUN. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 23 +++++++++++++++-------- drivers/scsi/lpfc/lpfc_crtn.h | 7 ++++--- drivers/scsi/lpfc/lpfc_scsi.c | 18 ++++++++++++------ 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f3250ad6b3eb..c30fafe3bb31 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2224,7 +2224,8 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, else phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; phba->cfg_oas_flags &= ~OAS_LUN_VALID; - phba->cfg_oas_priority = phba->cfg_XLanePriority; + if (phba->cfg_oas_priority == 0) + phba->cfg_oas_priority = phba->cfg_XLanePriority; phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; return count; } @@ -2350,7 +2351,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], rc = -ENOMEM; } else { lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, - (struct lpfc_name *)tgt_wwpn, lun); + (struct lpfc_name *)tgt_wwpn, lun, pri); } return rc; @@ -2374,7 +2375,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], */ static uint64_t lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], - uint8_t tgt_wwpn[], uint32_t *lun_status) + uint8_t tgt_wwpn[], uint32_t *lun_status, + uint32_t *lun_pri) { uint64_t found_lun; @@ -2387,7 +2389,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], &phba->sli4_hba.oas_next_lun, (struct lpfc_name *)vpt_wwpn, (struct lpfc_name *)tgt_wwpn, - &found_lun, lun_status)) + &found_lun, lun_status, lun_pri)) return found_lun; else return NOT_OAS_ENABLED_LUN; @@ -2459,7 +2461,8 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, phba->cfg_oas_tgt_wwpn, - &phba->cfg_oas_lun_status); + &phba->cfg_oas_lun_status, + &phba->cfg_oas_priority); if (oas_lun != NOT_OAS_ENABLED_LUN) phba->cfg_oas_flags |= OAS_LUN_VALID; @@ -2490,6 +2493,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint64_t scsi_lun; + uint32_t pri; ssize_t rc; if (!phba->cfg_fof) @@ -2507,17 +2511,20 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, if (sscanf(buf, "0x%llx", &scsi_lun) != 1) return -EINVAL; + pri = phba->cfg_oas_priority; + if (pri == 0) + pri = phba->cfg_XLanePriority; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " "priority 0x%x with oas state %d\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn), wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, - phba->cfg_oas_priority, phba->cfg_oas_lun_state); + pri, phba->cfg_oas_lun_state); rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, phba->cfg_oas_tgt_wwpn, scsi_lun, - phba->cfg_oas_lun_state, - phba->cfg_oas_priority); + phba->cfg_oas_lun_state, pri); if (rc) return rc; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 15d2bfdf582d..309643a2c55c 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -480,7 +480,7 @@ void lpfc_sli4_offline_eratt(struct lpfc_hba *); struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, - uint64_t, bool); + uint64_t, uint32_t, bool); void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, struct list_head *list, @@ -489,9 +489,10 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t, uint8_t); bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, - struct lpfc_name *, uint64_t); + struct lpfc_name *, uint64_t, uint8_t); bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t *, struct lpfc_name *, - struct lpfc_name *, uint64_t *, uint32_t *); + struct lpfc_name *, uint64_t *, + uint32_t *, uint32_t *); int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index ad350d969bdc..19d349fc889f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -5452,7 +5452,9 @@ lpfc_slave_alloc(struct scsi_device *sdev) device_data = lpfc_create_device_data(phba, &vport->fc_portname, &target_wwpn, - sdev->lun, true); + sdev->lun, + phba->cfg_XLanePriority, + true); if (!device_data) return -ENOMEM; spin_lock_irqsave(&phba->devicelock, flags); @@ -5587,7 +5589,7 @@ lpfc_slave_destroy(struct scsi_device *sdev) struct lpfc_device_data* lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t lun, - bool atomic_create) + uint32_t pri, bool atomic_create) { struct lpfc_device_data *lun_info; @@ -5614,7 +5616,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, sizeof(struct lpfc_name)); lun_info->device_id.lun = lun; lun_info->oas_enabled = false; - lun_info->priority = phba->cfg_XLanePriority; + lun_info->priority = pri; lun_info->available = false; return lun_info; } @@ -5716,7 +5718,8 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *found_vport_wwpn, struct lpfc_name *found_target_wwpn, uint64_t *found_lun, - uint32_t *found_lun_status) + uint32_t *found_lun_status, + uint32_t *found_lun_pri) { unsigned long flags; @@ -5763,6 +5766,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, OAS_LUN_STATUS_EXISTS; else *found_lun_status = 0; + *found_lun_pri = lun_info->priority; if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) memset(vport_wwpn, 0x0, sizeof(struct lpfc_name)); @@ -5824,13 +5828,14 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, if (lun_info) { if (!lun_info->oas_enabled) lun_info->oas_enabled = true; + lun_info->priority = pri; spin_unlock_irqrestore(&phba->devicelock, flags); return true; } /* Create an lun info structure and add to list of luns */ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, - false); + pri, false); if (lun_info) { lun_info->oas_enabled = true; lun_info->priority = pri; @@ -5864,7 +5869,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, **/ bool lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, - struct lpfc_name *target_wwpn, uint64_t lun) + struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) { struct lpfc_device_data *lun_info; @@ -5882,6 +5887,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, target_wwpn, lun); if (lun_info) { lun_info->oas_enabled = false; + lun_info->priority = pri; if (!lun_info->available) lpfc_delete_device_data(phba, lun_info); spin_unlock_irqrestore(&phba->devicelock, flags); -- cgit v1.2.3-59-g8ed1b From f2bf460cf5ef989f0a593d05932460326376d5f6 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:27 -0800 Subject: scsi: lpfc: Deprecate lpfc_prot_sg_seg_cnt parameter Deprecate lpfc_prot_sg_seg_cnt parameter. Eliminates driver from unnecessarily limiting DIF s/g list length. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 1 - drivers/scsi/lpfc/lpfc_attr.c | 10 ---------- 2 files changed, 11 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 341b656e9d9d..c2e6ffd2f372 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -727,7 +727,6 @@ struct lpfc_hba { uint32_t cfg_fcp_io_channel; uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; - uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c30fafe3bb31..8301c08b8768 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4465,14 +4465,6 @@ LPFC_ATTR(delay_discovery, 0, 0, 1, LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -/* - * This parameter will be depricated, the driver cannot limit the - * protection data s/g list. - */ -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, - LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); - /* * lpfc_enable_mds_diags: Enable MDS Diagnostics * 0 = MDS Diagnostics disabled (default) @@ -4559,7 +4551,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, - &dev_attr_lpfc_prot_sg_seg_cnt, &dev_attr_lpfc_aer_support, &dev_attr_lpfc_aer_state_cleanup, &dev_attr_lpfc_sriov_nr_virtfn, @@ -5576,7 +5567,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_poll = lpfc_poll; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); - lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); -- cgit v1.2.3-59-g8ed1b From 2f07784f05c23b322863274e801c31aa64e46c71 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:29 -0800 Subject: scsi: lpfc: Correct oops on vport port resets Correct oops on vport port resets. Incorrect WQE type, thus the clearing code actually overstepped the WQE. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6b75a8fced33..7e11cbdb238c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -17221,7 +17221,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) unsigned long iflags = 0; char *fail_msg = NULL; struct lpfc_sglq *sglq; - union lpfc_wqe wqe; + union lpfc_wqe128 wqe128; + union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128; uint32_t txq_cnt = 0; spin_lock_irqsave(&pring->ring_lock, iflags); @@ -17260,9 +17261,9 @@ lpfc_drain_txq(struct lpfc_hba *phba) piocbq->sli4_xritag = sglq->sli4_xritag; if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) fail_msg = "to convert bpl to sgl"; - else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) + else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe)) fail_msg = "to convert iocb to wqe"; - else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) + else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) fail_msg = " - Wq is full"; else lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); -- cgit v1.2.3-59-g8ed1b From 6b3b3bdb83b4ad51252d21bb13596db879e51850 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:30 -0800 Subject: scsi: lpfc: Add missing memory barrier On loosely ordered memory systems (PPC for example), the WQE elements were being updated in memory, but not necessarily flushed before the separate doorbell was written to hw which would cause hw to dma the WQE element. Thus, the hardware occasionally received partially updated WQE data. Add the memory barrier after updating the WQE memory. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_sli.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7e11cbdb238c..3a8470d55af5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -120,6 +120,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + /* ensure WQE bcopy flushed before doorbell write */ + wmb(); /* Update the host index before invoking device */ host_index = q->host_index; -- cgit v1.2.3-59-g8ed1b From 4b089d18ed979d91424355b117070f2520f85aeb Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 19 Dec 2016 15:07:31 -0800 Subject: scsi: lpfc: lpfc version change to 11.2.0.4 lpfc version change to 11.2.0.4 Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 50bfc43ebcb0..0ee0623a354c 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.2" +#define LPFC_DRIVER_VERSION "11.2.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ -- cgit v1.2.3-59-g8ed1b From 93380123fbb5357d3ea6935efc9226df3c59099d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 16 Dec 2016 17:04:49 -0800 Subject: scsi: hpsa: use designated initializers Prepare to mark sensitive kernel structures for randomization by making sure they're using designated initializers. These were identified during allyesconfig builds of x86, arm, and arm64, with most initializer fixes extracted from grsecurity. Signed-off-by: Kees Cook Acked-by: Don Brace Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.h | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 64e98295b707..bf6cdc106654 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -578,38 +578,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) } static struct access_method SA5_access = { - SA5_submit_command, - SA5_intr_mask, - SA5_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5_ioaccel_mode1_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_ioaccel_mode1_intr_pending, - SA5_ioaccel_mode1_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_ioaccel_mode1_intr_pending, + .command_completed = SA5_ioaccel_mode1_completed, }; static struct access_method SA5_ioaccel_mode2_access = { - SA5_submit_command_ioaccel2, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command_ioaccel2, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; static struct access_method SA5_performant_access_no_read = { - SA5_submit_command_no_read, - SA5_performant_intr_mask, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command_no_read, + .set_intr_mask = SA5_performant_intr_mask, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; struct board_type { -- cgit v1.2.3-59-g8ed1b From 5f5fca6db3d3e6a2c1a2ae92cbecee50e1242a3a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 16 Dec 2016 17:05:30 -0800 Subject: scsi: cciss: use designated initializers Prepare to mark sensitive kernel structures for randomization by making sure they're using designated initializers. These were identified during allyesconfig builds of x86, arm, and arm64, with most initializer fixes extracted from grsecurity. Signed-off-by: Kees Cook Signed-off-by: Martin K. Petersen --- drivers/block/cciss.h | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 7fda30e4a241..428766d1ce80 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h) } static struct access_method SA5_access = { - SA5_submit_command, - SA5_intr_mask, - SA5_fifo_full, - SA5_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5B_access = { - SA5_submit_command, - SA5B_intr_mask, - SA5_fifo_full, - SA5B_intr_pending, - SA5_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5B_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5B_intr_pending, + .command_completed = SA5_completed, }; static struct access_method SA5_performant_access = { - SA5_submit_command, - SA5_performant_intr_mask, - SA5_fifo_full, - SA5_performant_intr_pending, - SA5_performant_completed, + .submit_command = SA5_submit_command, + .set_intr_mask = SA5_performant_intr_mask, + .fifo_full = SA5_fifo_full, + .intr_pending = SA5_performant_intr_pending, + .command_completed = SA5_performant_completed, }; struct board_type { -- cgit v1.2.3-59-g8ed1b From e0075478ca7c58c6ad6630310aaa3f64a25dcf31 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 15 Dec 2016 15:12:16 +0100 Subject: scsi: mptlan: Remove linux/miscdevice.h from mptlan.h This patch remove linux/miscdevice.h from mptlan.h since mptlan.h does not contain any miscdevice. The only user of it is mptctl.c which already include linux/miscdevice.h So no need to include it twice. Signed-off-by: Corentin Labbe Signed-off-by: Martin K. Petersen --- drivers/message/fusion/mptlan.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index 8946e19dbfc8..8a24494f8c4d 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -65,7 +65,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3-59-g8ed1b From 19099dc393f3ca1fd9690f914e50278cf2aee78a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 16 Dec 2016 12:35:39 +0300 Subject: scsi: dpt_i2o: double free if adpt_i2o_online_hba() fails There are two places where adpt_i2o_online_hba() is called. Both callers call adpt_i2o_delete_hba(pHba) if adpt_i2o_online_hba() fails and since we also free it here that causes a double free bug. Signed-off-by: Dan Carpenter Signed-off-by: Martin K. Petersen --- drivers/scsi/dpt_i2o.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 5f75e638ec95..256dd6791fcc 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -2768,16 +2768,12 @@ static int adpt_i2o_activate_hba(adpt_hba* pHba) static int adpt_i2o_online_hba(adpt_hba* pHba) { - if (adpt_i2o_systab_send(pHba) < 0) { - adpt_i2o_delete_hba(pHba); + if (adpt_i2o_systab_send(pHba) < 0) return -1; - } /* In READY state */ - if (adpt_i2o_enable_hba(pHba) < 0) { - adpt_i2o_delete_hba(pHba); + if (adpt_i2o_enable_hba(pHba) < 0) return -1; - } /* In OPERATIONAL state */ return 0; -- cgit v1.2.3-59-g8ed1b From f3505013779646704f81b41c011ab089b26c3f3e Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:54 +0530 Subject: scsi: be2iscsi: Fix use of invalidate command table req Remove shared structure inv_tbl in phba for all sessions to post invalidation IOCTL. Always allocate and then free the table after use in reset handler. Abort handler needs just one instance so define it on stack. Add checks for BE_INVLDT_CMD_TBL_SZ to not exceed invalidation command table size in IOCTL. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 85 ++++++++++++++++++++++++----------------- drivers/scsi/be2iscsi/be_main.h | 16 ++++---- drivers/scsi/be2iscsi/be_mgmt.c | 12 +++--- drivers/scsi/be2iscsi/be_mgmt.h | 40 +++++++++---------- 4 files changed, 83 insertions(+), 70 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index b5112d6d7e73..3811a0d230f2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -225,9 +225,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) struct beiscsi_conn *beiscsi_conn; struct beiscsi_hba *phba; struct iscsi_session *session; - struct invalidate_command_table *inv_tbl; + struct invldt_cmd_tbl inv_tbl; struct be_dma_mem nonemb_cmd; - unsigned int cid, tag, num_invalidate; + unsigned int cid, tag; int rc; cls_session = starget_to_session(scsi_target(sc->device)); @@ -247,35 +247,30 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) return SUCCESS; } spin_unlock_bh(&session->frwd_lock); - /* Invalidate WRB Posted for this Task */ - AMAP_SET_BITS(struct amap_iscsi_wrb, invld, - aborted_io_task->pwrb_handle->pwrb, - 1); conn = aborted_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; - + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + aborted_io_task->pwrb_handle->pwrb, + 1); /* invalidate iocb */ cid = beiscsi_conn->beiscsi_conn_cid; - inv_tbl = phba->inv_tbl; - memset(inv_tbl, 0x0, sizeof(*inv_tbl)); - inv_tbl->cid = cid; - inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; - num_invalidate = 1; - nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(struct invalidate_commands_params_in), - &nonemb_cmd.dma); + inv_tbl.cid = cid; + inv_tbl.icd = aborted_io_task->psgl_handle->sgl_index; + nonemb_cmd.size = sizeof(union be_invldt_cmds_params); + nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, + nonemb_cmd.size, + &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : Failed to allocate memory for" "mgmt_invalidate_icds\n"); return FAILED; } - nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); - tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, - cid, &nonemb_cmd); + tag = mgmt_invalidate_icds(phba, &inv_tbl, 1, cid, &nonemb_cmd); if (!tag) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, "BM_%d : mgmt_invalidate_icds could not be" @@ -303,10 +298,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) struct beiscsi_hba *phba; struct iscsi_session *session; struct iscsi_cls_session *cls_session; - struct invalidate_command_table *inv_tbl; + struct invldt_cmd_tbl *inv_tbl; struct be_dma_mem nonemb_cmd; - unsigned int cid, tag, i, num_invalidate; - int rc; + unsigned int cid, tag, i, nents; + int rc, more = 0; /* invalidate iocbs */ cls_session = starget_to_session(scsi_target(sc->device)); @@ -320,9 +315,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; cid = beiscsi_conn->beiscsi_conn_cid; - inv_tbl = phba->inv_tbl; - memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); - num_invalidate = 0; + + inv_tbl = kcalloc(BE_INVLDT_CMD_TBL_SZ, sizeof(*inv_tbl), GFP_KERNEL); + if (!inv_tbl) { + spin_unlock_bh(&session->frwd_lock); + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : invldt_cmd_tbl alloc failed\n"); + return FAILED; + } + nents = 0; for (i = 0; i < conn->session->cmds_max; i++) { abrt_task = conn->session->cmds[i]; abrt_io_task = abrt_task->dd_data; @@ -331,33 +332,47 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) if (sc->device->lun != abrt_task->sc->device->lun) continue; + /** + * Can't fit in more cmds? Normally this won't happen b'coz + * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. + */ + if (nents == BE_INVLDT_CMD_TBL_SZ) { + more = 1; + break; + } /* Invalidate WRB Posted for this Task */ AMAP_SET_BITS(struct amap_iscsi_wrb, invld, abrt_io_task->pwrb_handle->pwrb, 1); - inv_tbl->cid = cid; - inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; - num_invalidate++; - inv_tbl++; + inv_tbl[nents].cid = cid; + inv_tbl[nents].icd = abrt_io_task->psgl_handle->sgl_index; + nents++; } spin_unlock_bh(&session->frwd_lock); - inv_tbl = phba->inv_tbl; - nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(struct invalidate_commands_params_in), - &nonemb_cmd.dma); + if (more) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : number of cmds exceeds size of invalidation table\n"); + kfree(inv_tbl); + return FAILED; + } + + nonemb_cmd.size = sizeof(union be_invldt_cmds_params); + nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, + nonemb_cmd.size, + &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : Failed to allocate memory for" "mgmt_invalidate_icds\n"); + kfree(inv_tbl); return FAILED; } - nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); - memset(nonemb_cmd.va, 0, nonemb_cmd.size); - tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, + tag = mgmt_invalidate_icds(phba, inv_tbl, nents, cid, &nonemb_cmd); + kfree(inv_tbl); if (!tag) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, "BM_%d : mgmt_invalidate_icds could not be" diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 6376657e45f7..f869e3734c02 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -57,7 +57,6 @@ #define BE2_IO_DEPTH 1024 #define BE2_MAX_SESSIONS 256 -#define BE2_CMDS_PER_CXN 128 #define BE2_TMFS 16 #define BE2_NOPOUT_REQ 16 #define BE2_SGE 32 @@ -72,8 +71,13 @@ #define BEISCSI_SGLIST_ELEMENTS 30 -#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ +/** + * BE_INVLDT_CMD_TBL_SZ is 128 which is total number commands that can + * be invalidated at a time, consider it before changing the value of + * BEISCSI_CMD_PER_LUN. + */ +#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ @@ -272,11 +276,6 @@ struct hba_parameters { unsigned int num_sge; }; -struct invalidate_command_table { - unsigned short icd; - unsigned short cid; -} __packed; - #define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ (phwi_ctrlr->wrb_context[cri].ulp_num) struct hwi_wrb_context { @@ -430,7 +429,6 @@ struct beiscsi_hba { struct be_ctrl_info ctrl; unsigned int generation; unsigned int interface_handle; - struct invalidate_command_table inv_tbl[128]; struct be_aic_obj aic_obj[MAX_CPUS]; unsigned int attr_log_enable; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index ac05317bba7f..5f02e8db7df0 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -129,7 +129,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, } unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, - struct invalidate_command_table *inv_tbl, + struct invldt_cmd_tbl *inv_tbl, unsigned int num_invalidate, unsigned int cid, struct be_dma_mem *nonemb_cmd) @@ -137,9 +137,12 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_sge *sge; - struct invalidate_commands_params_in *req; + struct invldt_cmds_params_in *req; unsigned int i, tag; + if (num_invalidate > BE_INVLDT_CMD_TBL_SZ) + return 0; + mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { @@ -158,10 +161,9 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, req->ref_handle = 0; req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; for (i = 0; i < num_invalidate; i++) { - req->table[i].icd = inv_tbl->icd; - req->table[i].cid = inv_tbl->cid; + req->table[i].icd = inv_tbl[i].icd; + req->table[i].cid = inv_tbl[i].cid; req->icd_count++; - inv_tbl++; } sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index b897cfd57c72..6e19281b9d57 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -104,10 +104,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba, unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, unsigned short cid, unsigned int upload_flag); -unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, - struct invalidate_command_table *inv_tbl, - unsigned int num_invalidate, unsigned int cid, - struct be_dma_mem *nonemb_cmd); unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba, struct bsg_job *job, @@ -134,24 +130,31 @@ union iscsi_invalidate_connection_params { struct iscsi_invalidate_connection_params_out response; } __packed; -struct invalidate_commands_params_in { +#define BE_INVLDT_CMD_TBL_SZ 128 +struct invldt_cmd_tbl { + unsigned short icd; + unsigned short cid; +} __packed; + +struct invldt_cmds_params_in { struct be_cmd_req_hdr hdr; unsigned int ref_handle; unsigned int icd_count; - struct invalidate_command_table table[128]; + struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ]; unsigned short cleanup_type; unsigned short unused; } __packed; -struct invalidate_commands_params_out { +struct invldt_cmds_params_out { + struct be_cmd_resp_hdr hdr; unsigned int ref_handle; unsigned int icd_count; - unsigned int icd_status[128]; + unsigned int icd_status[BE_INVLDT_CMD_TBL_SZ]; } __packed; -union invalidate_commands_params { - struct invalidate_commands_params_in request; - struct invalidate_commands_params_out response; +union be_invldt_cmds_params { + struct invldt_cmds_params_in request; + struct invldt_cmds_params_out response; } __packed; struct mgmt_hba_attributes { @@ -231,16 +234,6 @@ struct be_bsg_vendor_cmd { #define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws) -/* MGMT CMD flags */ - -#define MGMT_CMDH_FREE (1<<0) - -/* --- MGMT_ERROR_CODES --- */ -/* Error Codes returned in the status field of the CMD response header */ -#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */ -#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */ - /* the CMD_RESPONSE_HEADER */ - #define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\ pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ bus_address.u.a32.address_lo; \ @@ -265,6 +258,11 @@ struct beiscsi_endpoint { u16 cid_vld; }; +unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, + struct invldt_cmd_tbl *inv_tbl, + unsigned int num_invalidate, unsigned int cid, + struct be_dma_mem *nonemb_cmd); + unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, unsigned short cid, -- cgit v1.2.3-59-g8ed1b From 987132167f4bfb132cd56601f77d2bd5ba9cff4a Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:55 +0530 Subject: scsi: be2iscsi: Fix for crash in beiscsi_eh_device_reset System crashes when sg_reset is executed in a loop. CPU: 13 PID: 7073 Comm: sg_reset Tainted: G E 4.8.0-rc1+ #4 RIP: 0010:[] [] beiscsi_eh_device_reset+0x160/0x520 [be2iscsi] Call Trace: [] ? scsi_host_alloc_command+0x47/0xc0 [] scsi_try_bus_device_reset+0x2a/0x50 [] scsi_ioctl_reset+0x13e/0x260 [] scsi_ioctl+0x137/0x3d0 [] sg_ioctl+0x572/0xc20 [sg] [] do_vfs_ioctl+0xa7/0x5d0 The accesses to beiscsi_io_task is being protected in device reset handler with frwd_lock but the freeing of task can happen under back_lock. Hold the reference of iscsi_task till invalidation completes. This prevents use of ICD when invalidation of that ICD is being processed. Use frwd_lock for iscsi_tasks looping and back_lock to access beiscsi_io_task structures. Rewrite mgmt_invalidation_icds to handle allocation and freeing of IOCTL buffer in one place. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 121 +++++++++++++++++----------------------- drivers/scsi/be2iscsi/be_mgmt.c | 107 ++++++++++++++++++++--------------- drivers/scsi/be2iscsi/be_mgmt.h | 8 +-- 3 files changed, 114 insertions(+), 122 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 3811a0d230f2..7feecc0b4903 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -226,8 +226,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) struct beiscsi_hba *phba; struct iscsi_session *session; struct invldt_cmd_tbl inv_tbl; - struct be_dma_mem nonemb_cmd; - unsigned int cid, tag; + unsigned int cid; int rc; cls_session = starget_to_session(scsi_target(sc->device)); @@ -259,64 +258,47 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) cid = beiscsi_conn->beiscsi_conn_cid; inv_tbl.cid = cid; inv_tbl.icd = aborted_io_task->psgl_handle->sgl_index; - nonemb_cmd.size = sizeof(union be_invldt_cmds_params); - nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, - nonemb_cmd.size, - &nonemb_cmd.dma); - if (nonemb_cmd.va == NULL) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, - "BM_%d : Failed to allocate memory for" - "mgmt_invalidate_icds\n"); - return FAILED; - } - - tag = mgmt_invalidate_icds(phba, &inv_tbl, 1, cid, &nonemb_cmd); - if (!tag) { + rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); + if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, - "BM_%d : mgmt_invalidate_icds could not be" - "submitted\n"); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - + "BM_%d : sc %p invalidation failed %d\n", + sc, rc); return FAILED; } - rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); - if (rc != -EBUSY) - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - return iscsi_eh_abort(sc); } static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) { - struct iscsi_task *abrt_task; - struct beiscsi_io_task *abrt_io_task; - struct iscsi_conn *conn; + struct beiscsi_invldt_cmd_tbl { + struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; + struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; + } *inv_tbl; + struct iscsi_cls_session *cls_session; struct beiscsi_conn *beiscsi_conn; - struct beiscsi_hba *phba; + struct beiscsi_io_task *io_task; struct iscsi_session *session; - struct iscsi_cls_session *cls_session; - struct invldt_cmd_tbl *inv_tbl; - struct be_dma_mem nonemb_cmd; - unsigned int cid, tag, i, nents; + struct beiscsi_hba *phba; + struct iscsi_conn *conn; + struct iscsi_task *task; + unsigned int i, nents; int rc, more = 0; - /* invalidate iocbs */ cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; + spin_lock_bh(&session->frwd_lock); if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { spin_unlock_bh(&session->frwd_lock); return FAILED; } + conn = session->leadconn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; - cid = beiscsi_conn->beiscsi_conn_cid; - inv_tbl = kcalloc(BE_INVLDT_CMD_TBL_SZ, sizeof(*inv_tbl), GFP_KERNEL); + inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_KERNEL); if (!inv_tbl) { spin_unlock_bh(&session->frwd_lock); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, @@ -324,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) return FAILED; } nents = 0; + /* take back_lock to prevent task from getting cleaned up under us */ + spin_lock(&session->back_lock); for (i = 0; i < conn->session->cmds_max; i++) { - abrt_task = conn->session->cmds[i]; - abrt_io_task = abrt_task->dd_data; - if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) + task = conn->session->cmds[i]; + if (!task->sc) continue; - if (sc->device->lun != abrt_task->sc->device->lun) + if (sc->device->lun != task->sc->device->lun) continue; /** * Can't fit in more cmds? Normally this won't happen b'coz @@ -341,52 +324,48 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) break; } - /* Invalidate WRB Posted for this Task */ + /* get a task ref till FW processes the req for the ICD used */ + __iscsi_get_task(task); + io_task = task->dd_data; + /* mark WRB invalid which have been not processed by FW yet */ AMAP_SET_BITS(struct amap_iscsi_wrb, invld, - abrt_io_task->pwrb_handle->pwrb, + io_task->pwrb_handle->pwrb, 1); - inv_tbl[nents].cid = cid; - inv_tbl[nents].icd = abrt_io_task->psgl_handle->sgl_index; + inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; + inv_tbl->task[nents] = task; nents++; } + spin_unlock_bh(&session->back_lock); spin_unlock_bh(&session->frwd_lock); + rc = SUCCESS; + if (!nents) + goto end_reset; + if (more) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : number of cmds exceeds size of invalidation table\n"); - kfree(inv_tbl); - return FAILED; + rc = FAILED; + goto end_reset; } - nonemb_cmd.size = sizeof(union be_invldt_cmds_params); - nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, - nonemb_cmd.size, - &nonemb_cmd.dma); - if (nonemb_cmd.va == NULL) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, - "BM_%d : Failed to allocate memory for" - "mgmt_invalidate_icds\n"); - kfree(inv_tbl); - return FAILED; - } - tag = mgmt_invalidate_icds(phba, inv_tbl, nents, - cid, &nonemb_cmd); - kfree(inv_tbl); - if (!tag) { + if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, - "BM_%d : mgmt_invalidate_icds could not be" - " submitted\n"); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - return FAILED; + "BM_%d : cid %u scmds invalidation failed\n", + beiscsi_conn->beiscsi_conn_cid); + rc = FAILED; } - rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); - if (rc != -EBUSY) - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - return iscsi_eh_device_reset(sc); +end_reset: + for (i = 0; i < nents; i++) + iscsi_put_task(inv_tbl->task[i]); + kfree(inv_tbl); + + if (rc == SUCCESS) + rc = iscsi_eh_device_reset(sc); + return rc; } /*------------------- PCI Driver operations and data ----------------- */ diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 5f02e8db7df0..110c0d076d9a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -128,52 +128,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } -unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, - struct invldt_cmd_tbl *inv_tbl, - unsigned int num_invalidate, unsigned int cid, - struct be_dma_mem *nonemb_cmd) - -{ - struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb; - struct be_sge *sge; - struct invldt_cmds_params_in *req; - unsigned int i, tag; - - if (num_invalidate > BE_INVLDT_CMD_TBL_SZ) - return 0; - - mutex_lock(&ctrl->mbox_lock); - wrb = alloc_mcc_wrb(phba, &tag); - if (!wrb) { - mutex_unlock(&ctrl->mbox_lock); - return 0; - } - - req = nonemb_cmd->va; - memset(req, 0, sizeof(*req)); - sge = nonembedded_sgl(wrb); - - be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, - OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS, - sizeof(*req)); - req->ref_handle = 0; - req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; - for (i = 0; i < num_invalidate; i++) { - req->table[i].icd = inv_tbl[i].icd; - req->table[i].cid = inv_tbl[i].cid; - req->icd_count++; - } - sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); - sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); - sge->len = cpu_to_le32(nonemb_cmd->size); - - be_mcc_notify(phba, tag); - mutex_unlock(&ctrl->mbox_lock); - return tag; -} - unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, unsigned short cid, @@ -1496,3 +1450,64 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); } + +int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, + struct invldt_cmd_tbl *inv_tbl, + unsigned int nents) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct invldt_cmds_params_in *req; + struct be_dma_mem nonemb_cmd; + struct be_mcc_wrb *wrb; + unsigned int i, tag; + struct be_sge *sge; + int rc; + + if (!nents || nents > BE_INVLDT_CMD_TBL_SZ) + return -EINVAL; + + nonemb_cmd.size = sizeof(union be_invldt_cmds_params); + nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, + nonemb_cmd.size, + &nonemb_cmd.dma); + if (!nonemb_cmd.va) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, + "BM_%d : invldt_cmds_params alloc failed\n"); + return -ENOMEM; + } + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return -ENOMEM; + } + + req = nonemb_cmd.va; + be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS, + sizeof(*req)); + req->ref_handle = 0; + req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; + for (i = 0; i < nents; i++) { + req->table[i].icd = inv_tbl[i].icd; + req->table[i].cid = inv_tbl[i].cid; + req->icd_count++; + } + sge = nonembedded_sgl(wrb); + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); + sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma)); + sge->len = cpu_to_le32(nonemb_cmd.size); + + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return rc; +} diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 6e19281b9d57..f301d57320ec 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -258,16 +258,14 @@ struct beiscsi_endpoint { u16 cid_vld; }; -unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, - struct invldt_cmd_tbl *inv_tbl, - unsigned int num_invalidate, unsigned int cid, - struct be_dma_mem *nonemb_cmd); - unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, unsigned short cid, unsigned short issue_reset, unsigned short savecfg_flag); +int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, + struct invldt_cmd_tbl *inv_tbl, + unsigned int nents); int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type); -- cgit v1.2.3-59-g8ed1b From faa0a22d54230ae9658c419e495fc1a469e191f3 Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:56 +0530 Subject: scsi: be2iscsi: Take iscsi_task ref in abort handler Hold the reference of iscsi_task till invalidation completes. This prevents use of ICD when invalidation of that ICD is being processed. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 45 +++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 7feecc0b4903..260842a1f14e 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -218,47 +218,40 @@ static int beiscsi_slave_configure(struct scsi_device *sdev) static int beiscsi_eh_abort(struct scsi_cmnd *sc) { + struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; struct iscsi_cls_session *cls_session; - struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; - struct beiscsi_io_task *aborted_io_task; - struct iscsi_conn *conn; + struct beiscsi_io_task *abrt_io_task; struct beiscsi_conn *beiscsi_conn; - struct beiscsi_hba *phba; struct iscsi_session *session; struct invldt_cmd_tbl inv_tbl; - unsigned int cid; + struct beiscsi_hba *phba; + struct iscsi_conn *conn; int rc; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->frwd_lock); - if (!aborted_task || !aborted_task->sc) { - /* we raced */ - spin_unlock_bh(&session->frwd_lock); - return SUCCESS; - } - - aborted_io_task = aborted_task->dd_data; - if (!aborted_io_task->scsi_cmnd) { - /* raced or invalid command */ - spin_unlock_bh(&session->frwd_lock); + /* check if we raced, task just got cleaned up under us */ + spin_lock_bh(&session->back_lock); + if (!abrt_task || !abrt_task->sc) { + spin_unlock_bh(&session->back_lock); return SUCCESS; } - spin_unlock_bh(&session->frwd_lock); - - conn = aborted_task->conn; + /* get a task ref till FW processes the req for the ICD used */ + __iscsi_get_task(abrt_task); + abrt_io_task = abrt_task->dd_data; + conn = abrt_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; - /* Invalidate WRB Posted for this Task */ + /* mark WRB invalid which have been not processed by FW yet */ AMAP_SET_BITS(struct amap_iscsi_wrb, invld, - aborted_io_task->pwrb_handle->pwrb, - 1); - /* invalidate iocb */ - cid = beiscsi_conn->beiscsi_conn_cid; - inv_tbl.cid = cid; - inv_tbl.icd = aborted_io_task->psgl_handle->sgl_index; + abrt_io_task->pwrb_handle->pwrb, 1); + inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; + spin_unlock_bh(&session->back_lock); + rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); + iscsi_put_task(abrt_task); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, "BM_%d : sc %p invalidation failed %d\n", -- cgit v1.2.3-59-g8ed1b From 392b7d2f122210da594671606321223167a5a85c Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:57 +0530 Subject: scsi: be2iscsi: Set WRB invalid bit for SkyHawk invalid bit in WRB indicates to FW that IO was invalidated before WRB was fetched from host memory. For SkyHawk, this invalid bit in WRB is at a different offset. Use amap_iscsi_wrb_v2 to mark invalid bit for SkyHawk. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 19 ++++++++++++++----- drivers/scsi/be2iscsi/be_main.h | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 260842a1f14e..b509acd33772 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -244,8 +244,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; /* mark WRB invalid which have been not processed by FW yet */ - AMAP_SET_BITS(struct amap_iscsi_wrb, invld, - abrt_io_task->pwrb_handle->pwrb, 1); + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + abrt_io_task->pwrb_handle->pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, + abrt_io_task->pwrb_handle->pwrb, 1); + } inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; spin_unlock_bh(&session->back_lock); @@ -321,9 +326,13 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) __iscsi_get_task(task); io_task = task->dd_data; /* mark WRB invalid which have been not processed by FW yet */ - AMAP_SET_BITS(struct amap_iscsi_wrb, invld, - io_task->pwrb_handle->pwrb, - 1); + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + io_task->pwrb_handle->pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, + io_task->pwrb_handle->pwrb, 1); + } inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index f869e3734c02..8ba8c1215922 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -840,7 +840,7 @@ struct amap_iscsi_wrb_v2 { u8 diff_enbl; /* DWORD 11 */ u8 u_run; /* DWORD 11 */ u8 o_run; /* DWORD 11 */ - u8 invalid; /* DWORD 11 */ + u8 invld; /* DWORD 11 */ u8 dsp; /* DWORD 11 */ u8 dmsg; /* DWORD 11 */ u8 rsvd4; /* DWORD 11 */ -- cgit v1.2.3-59-g8ed1b From 3f7f62ee5b10de42b3ff1a33599fde4a2094960a Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:58 +0530 Subject: scsi: be2iscsi: Add checks to validate completions Added check in beiscsi_process_cq for pio_handle. pio_handle is cleared in beiscsi_put_wrb_handle. This catches any case where task gets cleaned up just before completion. Use back_lock before accessing pio_handle. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index b509acd33772..832d4f0fb33f 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -960,6 +960,10 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, unsigned long flags; spin_lock_irqsave(&pwrb_context->wrb_lock, flags); + if (!pwrb_context->wrb_handles_available) { + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); + return NULL; + } pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; pwrb_context->wrb_handles_available--; if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) @@ -1010,6 +1014,7 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, pwrb_context->free_index = 0; else pwrb_context->free_index++; + pwrb_handle->pio_handle = NULL; spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); } @@ -1220,6 +1225,7 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, uint16_t wrb_index, cid, cri_index; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle; + struct iscsi_session *session; struct iscsi_task *task; phwi_ctrlr = phba->phwi_ctrlr; @@ -1238,8 +1244,12 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, cri_index = BE_GET_CRI_FROM_CID(cid); pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; + session = beiscsi_conn->conn->session; + spin_lock_bh(&session->back_lock); task = pwrb_handle->pio_handle; - iscsi_put_task(task); + if (task) + __iscsi_put_task(task); + spin_unlock_bh(&session->back_lock); } static void @@ -1319,16 +1329,16 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, struct sol_cqe *psol) { + struct iscsi_conn *conn = beiscsi_conn->conn; + struct iscsi_session *session = conn->session; + struct common_sol_cqe csol_cqe = {0}; struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle; struct iscsi_wrb *pwrb = NULL; - struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; - unsigned int type; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - struct common_sol_cqe csol_cqe = {0}; uint16_t cri_index = 0; + uint8_t type; phwi_ctrlr = phba->phwi_ctrlr; @@ -1341,11 +1351,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; + spin_lock_bh(&session->back_lock); task = pwrb_handle->pio_handle; + if (!task) { + spin_unlock_bh(&session->back_lock); + return; + } pwrb = pwrb_handle->pwrb; type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; - spin_lock_bh(&session->back_lock); switch (type) { case HWH_TYPE_IO: case HWH_TYPE_IO_RD: -- cgit v1.2.3-59-g8ed1b From d7401055480d80599b6813fdf556d519ced4d71e Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:55:59 +0530 Subject: scsi: be2iscsi: Fix iSCSI cmd cleanup IOCTL Prepare the IOCTL with appropriate sizes of buffers of V0 and V1. Set missing chute number in V1 IOCTL. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_cmds.c | 33 ++++++++++++++++++--------------- drivers/scsi/be2iscsi/be_main.c | 3 +-- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index be65da2988fb..94aae458489b 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1700,31 +1700,34 @@ int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp) struct be_ctrl_info *ctrl = &phba->ctrl; struct iscsi_cleanup_req_v1 *req_v1; struct iscsi_cleanup_req *req; + u16 hdr_ring_id, data_ring_id; struct be_mcc_wrb *wrb; int status; mutex_lock(&ctrl->mbox_lock); wrb = wrb_from_mbox(&ctrl->mbox_mem); - req = embedded_payload(wrb); - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, - OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); - /** - * TODO: Check with FW folks the chute value to be set. - * For now, use the ULP_MASK as the chute value. - */ + hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp); + data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp); if (is_chip_be2_be3r(phba)) { + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); req->chute = (1 << ulp); - req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp); - req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp); + /* BE2/BE3 FW creates 8-bit ring id */ + req->hdr_ring_id = hdr_ring_id; + req->data_ring_id = data_ring_id; } else { - req_v1 = (struct iscsi_cleanup_req_v1 *)req; + req_v1 = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0); + be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_CLEANUP, + sizeof(*req_v1)); req_v1->hdr.version = 1; - req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, - ulp)); - req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, - ulp)); + req_v1->chute = (1 << ulp); + req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id); + req_v1->data_ring_id = cpu_to_le16(data_ring_id); } status = be_mbox_notify(ctrl); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 832d4f0fb33f..1b5a07ff93ea 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -2747,7 +2747,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - /* get async_ctx for each ULP */ + /* get async_ctx for each ULP */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); @@ -3814,7 +3814,6 @@ static int hwi_init_port(struct beiscsi_hba *phba) /** * Now that the default PDU rings have been created, * let EP know about it. - * Call beiscsi_cmd_iscsi_cleanup before posting? */ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, ulp_num); -- cgit v1.2.3-59-g8ed1b From b7d98ca7fb476e9f9382445b3a2b973e6c0de3ab Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:00 +0530 Subject: scsi: be2iscsi: Remove redundant receive buffers posting This duplicate code got added during manual merging. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1b5a07ff93ea..c6af6e5a23b8 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3859,14 +3859,6 @@ static int hwi_init_port(struct beiscsi_hba *phba) phwi_ctrlr->wrb_context[cri].cid] = async_arr_idx++; } - /** - * Now that the default PDU rings have been created, - * let EP know about it. - */ - beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, - ulp_num); - beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, - ulp_num); } } -- cgit v1.2.3-59-g8ed1b From fa1261c4b683828f1b012267aff5b9322fd9ab71 Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:01 +0530 Subject: scsi: be2iscsi: Remove unused struct members Fix errors reported in static analysis. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be.h | 3 -- drivers/scsi/be2iscsi/be_cmds.c | 8 +++--- drivers/scsi/be2iscsi/be_cmds.h | 17 ++++++------ drivers/scsi/be2iscsi/be_iscsi.c | 2 +- drivers/scsi/be2iscsi/be_main.c | 24 +++------------- drivers/scsi/be2iscsi/be_main.h | 22 --------------- drivers/scsi/be2iscsi/be_mgmt.c | 9 ++---- drivers/scsi/be2iscsi/be_mgmt.h | 60 ---------------------------------------- 8 files changed, 21 insertions(+), 124 deletions(-) diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index b1d0fdc5d5e1..ca9440fb2325 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -84,7 +84,6 @@ static inline void queue_tail_inc(struct be_queue_info *q) /*ISCSI */ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ - bool enable; u32 min_eqd; /* in usecs */ u32 max_eqd; /* in usecs */ u32 prev_eqd; /* in usecs */ @@ -94,8 +93,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ }; struct be_eq_obj { - bool todo_mcc_cq; - bool todo_cq; u32 cq_count; struct be_queue_info q; struct beiscsi_hba *phba; diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 94aae458489b..5d59e2630ce6 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -676,10 +676,10 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, bool embedded, u8 sge_cnt) { if (embedded) - wrb->embedded |= MCC_WRB_EMBEDDED_MASK; + wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK; else - wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << - MCC_WRB_SGE_CNT_SHIFT; + wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << + MCC_WRB_SGE_CNT_SHIFT; wrb->payload_length = payload_len; be_dws_cpu_to_le(wrb, 8); } @@ -1599,7 +1599,7 @@ int beiscsi_cmd_function_reset(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_post_sgl_pages_req *req = embedded_payload(wrb); + struct be_post_sgl_pages_req *req; int status; mutex_lock(&ctrl->mbox_lock); diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 328fb5b973cd..1d40e83b0790 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -31,10 +31,16 @@ struct be_sge { __le32 len; }; -#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ -#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */ struct be_mcc_wrb { - u32 embedded; /* dword 0 */ + u32 emb_sgecnt_special; /* dword 0 */ + /* bits 0 - embedded */ + /* bits 1 - 2 reserved */ + /* bits 3 - 7 sge count */ + /* bits 8 - 23 reserved */ + /* bits 24 - 31 special */ +#define MCC_WRB_EMBEDDED_MASK 1 +#define MCC_WRB_SGE_CNT_SHIFT 3 +#define MCC_WRB_SGE_CNT_MASK 0x1F u32 payload_length; /* dword 1 */ u32 tag0; /* dword 2 */ u32 tag1; /* dword 3 */ @@ -1133,11 +1139,6 @@ struct tcp_connect_and_offload_out { } __packed; -struct be_mcc_wrb_context { - struct MCC_WRB *wrb; - int *users_final_status; -} __packed; - #define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */ #define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */ #define DB_DEF_PDU_REARM_SHIFT 14 diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index ba258217614e..0ec06256a1b8 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1114,7 +1114,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, nonemb_cmd.size = req_memsize; memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (tag <= 0) { + if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index c6af6e5a23b8..a8e67c6186f8 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -67,8 +67,6 @@ beiscsi_##_name##_disp(struct device *dev,\ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct beiscsi_hba *phba = iscsi_host_priv(shost); \ - uint32_t param_val = 0; \ - param_val = phba->attr_##_name;\ return snprintf(buf, PAGE_SIZE, "%d\n",\ phba->attr_##_name);\ } @@ -642,7 +640,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; - phba->params.eq_timer = 64; phba->params.num_eq_entries = 1024; phba->params.num_cq_entries = 1024; phba->params.wrbs_per_cxn = 256; @@ -1335,7 +1332,6 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle; - struct iscsi_wrb *pwrb = NULL; struct iscsi_task *task; uint16_t cri_index = 0; uint8_t type; @@ -1357,7 +1353,6 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, spin_unlock_bh(&session->back_lock); return; } - pwrb = pwrb_handle->pwrb; type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; switch (type) { @@ -1721,13 +1716,12 @@ beiscsi_hdq_post_handles(struct beiscsi_hba *phba, struct list_head *hfree_list; struct phys_addr *pasync_sge; u32 ring_id, doorbell = 0; - u16 index, num_entries; u32 doorbell_offset; u16 prod = 0, cons; + u16 index; phwi_ctrlr = phba->phwi_ctrlr; pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); - num_entries = pasync_ctx->num_entries; if (header) { cons = pasync_ctx->async_header.free_entries; hfree_list = &pasync_ctx->async_header.free_list; @@ -2384,13 +2378,10 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { uint8_t mem_descr_index, ulp_num; - unsigned int num_cq_pages, num_async_pdu_buf_pages; + unsigned int num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; - num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ - sizeof(struct sol_cqe)); - phba->params.hwi_ws_sz = sizeof(struct hwi_controller); phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * @@ -3377,7 +3368,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr) { - unsigned int wrb_mem_index, offset, size, num_wrb_rings; + unsigned int num_wrb_rings; u64 pa_addr_lo; unsigned int idx, num, i, ulp_num; struct mem_array *pwrb_arr; @@ -3442,10 +3433,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - wrb_mem_index = 0; - offset = 0; - size = 0; - if (ulp_count > 1) { ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; @@ -3673,7 +3660,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - struct hd_async_context *pasync_ctx; int i, eq_for_mcc, ulp_num; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) @@ -3710,8 +3696,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba) q = &phwi_context->be_def_dataq[ulp_num]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); - - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; } } @@ -3937,7 +3921,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) static int beiscsi_init_controller(struct beiscsi_hba *phba) { - int ret = -ENOMEM; + int ret; ret = beiscsi_get_memory(phba); if (ret < 0) { diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 8ba8c1215922..73cc2f775291 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -243,19 +243,7 @@ struct hba_parameters { unsigned int num_cq_entries; unsigned int num_eq_entries; unsigned int wrbs_per_cxn; - unsigned int crashmode; - unsigned int hba_num; - - unsigned int mgmt_ws_sz; unsigned int hwi_ws_sz; - - unsigned int eto; - unsigned int ldto; - - unsigned int dbg_flags; - unsigned int num_cxn; - - unsigned int eq_timer; /** * These are calculated from other params. They're here * for debug purposes @@ -333,7 +321,6 @@ struct beiscsi_hba { struct be_bus_address pci_pa; /* CSR */ /* PCI representation of our HBA */ struct pci_dev *pcidev; - unsigned short asic_revision; unsigned int num_cpus; unsigned int nxt_cqid; struct msix_entry msix_entries[MAX_CPUS]; @@ -354,7 +341,6 @@ struct beiscsi_hba { spinlock_t io_sgl_lock; spinlock_t mgmt_sgl_lock; spinlock_t async_pdu_lock; - unsigned int age; struct list_head hba_queue; #define BE_MAX_SESSION 2048 #define BE_SET_CID_TO_CRI(cri_index, cid) \ @@ -523,10 +509,6 @@ struct beiscsi_io_task { struct scsi_cmnd *scsi_cmnd; int num_sg; struct hwi_wrb_context *pwrb_context; - unsigned int cmd_sn; - unsigned int flags; - unsigned short cid; - unsigned short header_len; itt_t libiscsi_itt; struct be_cmd_bhs *cmd_bhs; struct be_bus_address bhs_pa; @@ -1040,10 +1022,8 @@ struct hwi_controller { struct list_head io_sgl_list; struct list_head eh_sgl_list; struct sgl_handle *psgl_handle_base; - unsigned int wrb_mem_index; struct hwi_wrb_context *wrb_context; - struct mcc_wrb *pmcc_wrb_base; struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; struct hwi_context_memory *phwi_ctxt; @@ -1060,9 +1040,7 @@ enum hwh_type_enum { }; struct wrb_handle { - enum hwh_type_enum type; unsigned short wrb_index; - struct iscsi_task *pio_handle; struct iscsi_wrb *pwrb; }; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 110c0d076d9a..8acacd5ec796 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -66,7 +66,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct bsg_job *job, struct be_dma_mem *nonemb_cmd) { - struct be_cmd_resp_hdr *resp; struct be_mcc_wrb *wrb; struct be_sge *mcc_sge; unsigned int tag = 0; @@ -76,7 +75,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, nonemb_cmd->size = job->request_payload.payload_len; memset(nonemb_cmd->va, 0, nonemb_cmd->size); - resp = nonemb_cmd->va; region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; @@ -1022,7 +1020,6 @@ unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba) unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_cmd_get_session_resp *resp; struct be_cmd_get_session_req *req; struct be_dma_mem *nonemb_cmd; struct be_mcc_wrb *wrb; @@ -1037,7 +1034,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) } nonemb_cmd = &phba->boot_struct.nonemb_cmd; - nonemb_cmd->size = sizeof(*resp); + nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp); nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, nonemb_cmd->size, &nonemb_cmd->dma); @@ -1052,7 +1049,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, - sizeof(*resp)); + sizeof(struct be_cmd_get_session_resp)); req->session_handle = phba->boot_struct.s_handle; sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); @@ -1297,7 +1294,7 @@ beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); - return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", + return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n", phba->fw_config.phys_port); } diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index f301d57320ec..308f1472f98a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -36,66 +36,6 @@ #define PCICFG_UE_STATUS_MASK_LOW 0xA8 #define PCICFG_UE_STATUS_MASK_HI 0xAC -/** - * Pseudo amap definition in which each bit of the actual structure is defined - * as a byte: used to calculate offset/shift/mask of each field - */ -struct amap_mcc_sge { - u8 pa_lo[32]; /* dword 0 */ - u8 pa_hi[32]; /* dword 1 */ - u8 length[32]; /* DWORD 2 */ -} __packed; - -/** - * Pseudo amap definition in which each bit of the actual structure is defined - * as a byte: used to calculate offset/shift/mask of each field - */ -struct amap_mcc_wrb_payload { - union { - struct amap_mcc_sge sgl[19]; - u8 embedded[59 * 32]; /* DWORDS 57 to 115 */ - } u; -} __packed; - -/** - * Pseudo amap definition in which each bit of the actual structure is defined - * as a byte: used to calculate offset/shift/mask of each field - */ -struct amap_mcc_wrb { - u8 embedded; /* DWORD 0 */ - u8 rsvd0[2]; /* DWORD 0 */ - u8 sge_count[5]; /* DWORD 0 */ - u8 rsvd1[16]; /* DWORD 0 */ - u8 special[8]; /* DWORD 0 */ - u8 payload_length[32]; - u8 tag[64]; /* DWORD 2 */ - u8 rsvd2[32]; /* DWORD 4 */ - struct amap_mcc_wrb_payload payload; -}; - -struct mcc_sge { - u32 pa_lo; /* dword 0 */ - u32 pa_hi; /* dword 1 */ - u32 length; /* DWORD 2 */ -} __packed; - -struct mcc_wrb_payload { - union { - struct mcc_sge sgl[19]; - u32 embedded[59]; /* DWORDS 57 to 115 */ - } u; -} __packed; - -#define MCC_WRB_EMBEDDED_MASK 0x00000001 - -struct mcc_wrb { - u32 dw[0]; /* DWORD 0 */ - u32 payload_length; - u32 tag[2]; /* DWORD 2 */ - u32 rsvd2[1]; /* DWORD 4 */ - struct mcc_wrb_payload payload; -}; - int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, struct beiscsi_endpoint *beiscsi_ep, -- cgit v1.2.3-59-g8ed1b From 29e80b7ce36ffcc3efd6062d87ddaf6c6f43a542 Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:02 +0530 Subject: scsi: be2iscsi: Remove wq_name from beiscsi_hba wq_name is used only to set WQ name when its being allocated. Remove it from beiscsi_hba structure and define locally. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 9 +++++---- drivers/scsi/be2iscsi/be_main.h | 1 - 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a8e67c6186f8..a68f1beda693 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5623,11 +5623,12 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev) static int beiscsi_dev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { - struct beiscsi_hba *phba = NULL; - struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; + struct hwi_controller *phwi_ctrlr; + struct beiscsi_hba *phba = NULL; struct be_eq_obj *pbe_eq; unsigned int s_handle; + char wq_name[20]; int ret, i; ret = beiscsi_enable_pci(pcidev); @@ -5739,9 +5740,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; - snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", + snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", phba->shost->host_no); - phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); + phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 73cc2f775291..30379de98dcb 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -410,7 +410,6 @@ struct beiscsi_hba { u8 port_name; u8 port_speed; char fw_ver_str[BEISCSI_VER_STRLEN]; - char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; unsigned int generation; -- cgit v1.2.3-59-g8ed1b From 413f365657a8b9669bd0ba3628e9fde9ce63604e Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:03 +0530 Subject: scsi: be2iscsi: Add checks to validate CID alloc/free Set CID slot to 0xffff to indicate empty. Check if connection already exists in conn_table before binding. Check if endpoint already NULL before putting back CID. Break ep->conn link in free_ep to ignore completions after freeing. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_iscsi.c | 163 ++++++++++++++++++++------------------- drivers/scsi/be2iscsi/be_main.c | 7 +- drivers/scsi/be2iscsi/be_main.h | 1 + 3 files changed, 87 insertions(+), 84 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 0ec06256a1b8..a4844578e357 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -165,33 +165,6 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid) return cls_conn; } -/** - * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table - * @beiscsi_conn: The pointer to beiscsi_conn structure - * @phba: The phba instance - * @cid: The cid to free - */ -static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, - struct beiscsi_conn *beiscsi_conn, - unsigned int cid) -{ - uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); - - if (phba->conn_table[cri_index]) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Connection table already occupied. Detected clash\n"); - - return -EINVAL; - } else { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cri_index, beiscsi_conn); - - phba->conn_table[cri_index] = beiscsi_conn; - } - return 0; -} - /** * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection * @cls_session: pointer to iscsi cls session @@ -212,6 +185,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct hwi_wrb_context *pwrb_context; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; + uint16_t cri_index; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) @@ -229,20 +203,34 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, return -EEXIST; } - - pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( - beiscsi_ep->ep_cid)]; + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); + if (phba->conn_table[cri_index]) { + if (beiscsi_conn != phba->conn_table[cri_index] || + beiscsi_ep != phba->conn_table[cri_index]->ep) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n", + cri_index, + beiscsi_ep->ep_cid, + beiscsi_conn, + phba->conn_table[cri_index]); + return -EINVAL; + } + } beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; beiscsi_conn->ep = beiscsi_ep; beiscsi_ep->conn = beiscsi_conn; + /** + * Each connection is associated with a WRBQ kept in wrb_context. + * Store doorbell offset for transmit path. + */ + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", - beiscsi_conn, conn, beiscsi_ep->ep_cid); - - return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); + "BS_%d : cid %d phba->conn_table[%u]=%p\n", + beiscsi_ep->ep_cid, cri_index, beiscsi_conn); + phba->conn_table[cri_index] = beiscsi_conn; + return 0; } static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) @@ -973,9 +961,9 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) */ static int beiscsi_get_cid(struct beiscsi_hba *phba) { - unsigned short cid = 0xFFFF, cid_from_ulp; - struct ulp_cid_info *cid_info = NULL; uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; + unsigned short cid, cid_from_ulp; + struct ulp_cid_info *cid_info; /* Find the ULP which has more CID available */ cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? @@ -984,20 +972,27 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba) BEISCSI_ULP1_AVLBL_CID(phba) : 0; cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? BEISCSI_ULP0 : BEISCSI_ULP1; - - if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { - cid_info = phba->cid_array_info[cid_from_ulp]; - if (!cid_info->avlbl_cids) - return cid; - - cid = cid_info->cid_array[cid_info->cid_alloc++]; - - if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( - phba, cid_from_ulp)) - cid_info->cid_alloc = 0; - - cid_info->avlbl_cids--; + /** + * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp + * is ZERO for both, ULP 1 is returned. + * Check if ULP is loaded before getting new CID. + */ + if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) + return BE_INVALID_CID; + + cid_info = phba->cid_array_info[cid_from_ulp]; + cid = cid_info->cid_array[cid_info->cid_alloc]; + if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : failed to get cid: available %u:%u\n", + cid_info->avlbl_cids, cid_info->cid_free); + return BE_INVALID_CID; } + /* empty the slot */ + cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID; + if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp)) + cid_info->cid_alloc = 0; + cid_info->avlbl_cids--; return cid; } @@ -1008,22 +1003,28 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba) */ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) { - uint16_t cid_post_ulp; - struct hwi_controller *phwi_ctrlr; - struct hwi_wrb_context *pwrb_context; - struct ulp_cid_info *cid_info = NULL; uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + struct ulp_cid_info *cid_info; + uint16_t cid_post_ulp; phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; cid_post_ulp = pwrb_context->ulp_num; cid_info = phba->cid_array_info[cid_post_ulp]; - cid_info->avlbl_cids++; - + /* fill only in empty slot */ + if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : failed to put cid %u: available %u:%u\n", + cid, cid_info->avlbl_cids, cid_info->cid_free); + return; + } cid_info->cid_array[cid_info->cid_free++] = cid; if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) cid_info->cid_free = 0; + cid_info->avlbl_cids++; } /** @@ -1037,8 +1038,8 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; - phba->ep_array[BE_GET_CRI_FROM_CID - (beiscsi_ep->ep_cid)] = NULL; + /* clear this to track freeing in beiscsi_ep_disconnect */ + phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL; /** * Check if any connection resource allocated by driver @@ -1049,6 +1050,11 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) return; beiscsi_conn = beiscsi_ep->conn; + /** + * Break ep->conn link here so that completions after + * this are ignored. + */ + beiscsi_ep->conn = NULL; if (beiscsi_conn->login_in_progress) { beiscsi_free_mgmt_task_handles(beiscsi_conn, beiscsi_conn->task); @@ -1079,7 +1085,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn\n"); beiscsi_ep->ep_cid = beiscsi_get_cid(phba); - if (beiscsi_ep->ep_cid == 0xFFFF) { + if (beiscsi_ep->ep_cid == BE_INVALID_CID) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : No free cid available\n"); return ret; @@ -1284,26 +1290,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) return ret; } -/** - * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table - * @phba: The phba instance - * @cid: The cid to free - */ -static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, - unsigned int cid) -{ - uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); - - if (phba->conn_table[cri_index]) - phba->conn_table[cri_index] = NULL; - else { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : Connection table Not occupied.\n"); - return -EINVAL; - } - return 0; -} - /** * beiscsi_ep_disconnect - Tears down the TCP connection * @ep: endpoint to be used @@ -1318,13 +1304,23 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) unsigned int tag; uint8_t mgmt_invalidate_flag, tcp_upload_flag; unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; + uint16_t cri_index; beiscsi_ep = ep->dd_data; phba = beiscsi_ep->phba; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n", + "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n", beiscsi_ep->ep_cid); + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); + if (!phba->ep_array[cri_index]) { + __beiscsi_log(phba, KERN_ERR, + "BS_%d : ep_array at %u cid %u empty\n", + cri_index, + beiscsi_ep->ep_cid); + return; + } + if (beiscsi_ep->conn) { beiscsi_conn = beiscsi_ep->conn; iscsi_suspend_queue(beiscsi_conn->conn); @@ -1356,7 +1352,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) free_ep: msleep(BEISCSI_LOGOUT_SYNC_DELAY); beiscsi_free_ep(beiscsi_ep); - beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); + if (!phba->conn_table[cri_index]) + __beiscsi_log(phba, KERN_ERR, + "BS_%d : conn_table empty at %u: cid %u\n", + cri_index, + beiscsi_ep->ep_cid); + phba->conn_table[cri_index] = NULL; iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a68f1beda693..a8a23d61fd90 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4074,9 +4074,10 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) } /* Allocate memory for CID array */ - ptr_cid_info->cid_array = kzalloc(sizeof(void *) * - BEISCSI_GET_CID_COUNT(phba, - ulp_num), GFP_KERNEL); + ptr_cid_info->cid_array = + kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), + sizeof(*ptr_cid_info->cid_array), + GFP_KERNEL); if (!ptr_cid_info->cid_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory" diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 30379de98dcb..02a9b2d0d643 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -343,6 +343,7 @@ struct beiscsi_hba { spinlock_t async_pdu_lock; struct list_head hba_queue; #define BE_MAX_SESSION 2048 +#define BE_INVALID_CID 0xffff #define BE_SET_CID_TO_CRI(cri_index, cid) \ (phba->cid_to_cri_map[cid] = cri_index) #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) -- cgit v1.2.3-59-g8ed1b From dd940972f36779577701f20965847e9b56b79a0e Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:04 +0530 Subject: scsi: be2iscsi: Reinit SGL handle, CID tables after TPE After TPE recovery, CID table needs to be repopulated as per CIDs in WRBQ creation responses. SGL handles table needs to be recreated for posting and its indices need to be resetted. This is achieved by calling beiscsi_cleanup_port when disabling and beiscsi_init_port in enabling port. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 61 ++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 41 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a8a23d61fd90..d96f706893a6 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3919,31 +3919,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) kfree(phba->phwi_ctrlr); } -static int beiscsi_init_controller(struct beiscsi_hba *phba) -{ - int ret; - - ret = beiscsi_get_memory(phba); - if (ret < 0) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : beiscsi_dev_probe -" - "Failed in beiscsi_alloc_memory\n"); - return ret; - } - - ret = hwi_init_controller(phba); - if (ret) - goto free_init; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : Return success from beiscsi_init_controller"); - - return 0; - -free_init: - beiscsi_free_mem(phba); - return ret; -} - static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; @@ -4217,33 +4192,30 @@ static int beiscsi_init_port(struct beiscsi_hba *phba) { int ret; - ret = beiscsi_init_controller(phba); + ret = hwi_init_controller(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : beiscsi_dev_probe - Failed in" - "beiscsi_init_controller\n"); + "BM_%d : init controller failed\n"); return ret; } ret = beiscsi_init_sgl_handle(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : beiscsi_dev_probe - Failed in" - "beiscsi_init_sgl_handle\n"); - goto do_cleanup_ctrlr; + "BM_%d : init sgl handles failed\n"); + goto cleanup_port; } ret = hba_setup_cid_tbls(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed in hba_setup_cid_tbls\n"); + "BM_%d : setup CID table failed\n"); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); - goto do_cleanup_ctrlr; + goto cleanup_port; } - return ret; -do_cleanup_ctrlr: +cleanup_port: hwi_cleanup_port(phba); return ret; } @@ -5403,10 +5375,10 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba) phba->shost->max_id = phba->params.cxns_per_ctrl; phba->shost->can_queue = phba->params.ios_per_ctrl; - ret = hwi_init_controller(phba); - if (ret) { + ret = beiscsi_init_port(phba); + if (ret < 0) { __beiscsi_log(phba, KERN_ERR, - "BM_%d : init controller failed %d\n", ret); + "BM_%d : init port failed\n"); goto disable_msix; } @@ -5512,6 +5484,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) cancel_work_sync(&pbe_eq->mcc_work); } hwi_cleanup_port(phba); + beiscsi_cleanup_port(phba); } static void beiscsi_sess_work(struct work_struct *work) @@ -5722,11 +5695,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->shost->max_id = phba->params.cxns_per_ctrl; phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = beiscsi_get_memory(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : alloc host mem failed\n"); + goto free_port; + } + ret = beiscsi_init_port(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : beiscsi_dev_probe-" - "Failed in beiscsi_init_port\n"); + "BM_%d : init port failed\n"); + beiscsi_free_mem(phba); goto free_port; } @@ -5868,7 +5848,6 @@ static void beiscsi_remove(struct pci_dev *pcidev) /* free all resources */ destroy_workqueue(phba->wq); - beiscsi_cleanup_port(phba); beiscsi_free_mem(phba); /* ctrl uninit */ -- cgit v1.2.3-59-g8ed1b From 5fa7db2111dfcacfd4c986a5eb4d9568386154f4 Mon Sep 17 00:00:00 2001 From: Ketan Mukadam Date: Tue, 13 Dec 2016 15:56:05 +0530 Subject: scsi: be2iscsi: Add warning message for unsupported adapter Add a warning message to indicate obsolete/unsupported BE2 Adapter Family devices Signed-off-by: Ketan Mukadam Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 2 ++ drivers/scsi/be2iscsi/be_mgmt.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index d96f706893a6..6372613e7516 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5640,6 +5640,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_DEVICE_ID2: phba->generation = BE_GEN2; phba->iotask_fn = beiscsi_iotask; + dev_warn(&pcidev->dev, + "Obsolete/Unsupported BE2 Adapter Family\n"); break; case BE_DEVICE_ID2: case OC_DEVICE_ID3: diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 8acacd5ec796..2f6d5c2ac329 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1262,7 +1262,8 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, case BE_DEVICE_ID1: case OC_DEVICE_ID1: case OC_DEVICE_ID2: - return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n"); + return snprintf(buf, PAGE_SIZE, + "Obsolete/Unsupported BE2 Adapter Family\n"); break; case BE_DEVICE_ID2: case OC_DEVICE_ID3: -- cgit v1.2.3-59-g8ed1b From a5c1be7005e2992e3ae2ffa4e510e9bd4c2922e8 Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Tue, 13 Dec 2016 15:56:06 +0530 Subject: scsi: be2iscsi: Update driver version Version 11.2.1.0 Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 02a9b2d0d643..218857926566 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include #define DRV_NAME "be2iscsi" -#define BUILD_STR "11.2.0.0" +#define BUILD_STR "11.2.1.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" -- cgit v1.2.3-59-g8ed1b From 703e747a6b6fa51afa6632b6963569c654062de6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 16 Dec 2016 14:10:43 +0000 Subject: scsi: qedi: return via va_end to match corresponding va_start Although on most systems va_end is a no-op, it is good practice to use va_end on the function return path, especially since the va_start documenation states: "Each invocation of va_start() must be matched by a corresponding invocation of va_end() in the same function." Found with static analysis by CoverityScan, CIDs 1389477-1389479 Signed-off-by: Colin Ian King Acked-by: Manish Rangankar Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi_dbg.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 2bdedb9c39bc..8fd28b056f73 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -52,7 +52,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, vaf.va = &va; if (!(qedi_dbg_log & QEDI_LOG_WARN)) - return; + goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), @@ -60,6 +60,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, else pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); +ret: va_end(va); } @@ -80,7 +81,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, vaf.va = &va; if (!(qedi_dbg_log & QEDI_LOG_NOTICE)) - return; + goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", @@ -89,6 +90,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, else pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); +ret: va_end(va); } @@ -109,7 +111,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, vaf.va = &va; if (!(qedi_dbg_log & level)) - return; + goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), @@ -117,6 +119,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, else pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); +ret: va_end(va); } -- cgit v1.2.3-59-g8ed1b From 364757e4682c0a046dc7da20d359a9a37dfff703 Mon Sep 17 00:00:00 2001 From: Cao jin Date: Mon, 19 Dec 2016 14:20:29 +0800 Subject: scsi: qla4xxx: comments correction Signed-off-by: Cao jin Acked-by: Nilesh Javali Signed-off-by: Martin K. Petersen --- drivers/scsi/qla4xxx/ql4_os.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 9fbb33fc90c7..ac52150d1569 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -9539,15 +9539,15 @@ exit_host_reset: * driver calls the following device driver's callbacks * * - Fatal Errors - link_reset - * - Non-Fatal Errors - driver's pci_error_detected() which + * - Non-Fatal Errors - driver's error_detected() which * returns CAN_RECOVER, NEED_RESET or DISCONNECT. * * PCI AER driver calls - * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled + * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() * returns RECOVERED or NEED_RESET if fw_hung * NEED_RESET - driver's slot_reset() * DISCONNECT - device is dead & cannot recover - * RECOVERED - driver's pci_resume() + * RECOVERED - driver's resume() */ static pci_ers_result_t qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) -- cgit v1.2.3-59-g8ed1b From 66cc820f9cbf12f1c6c84fb7392847b7fa5e76c4 Mon Sep 17 00:00:00 2001 From: Dolev Raviv Date: Thu, 22 Dec 2016 18:39:42 -0800 Subject: scsi: ufs: dump debug info during failures Inserts driver dumps for UFS Host Controller registers, Transfer Requests and Task Management Requests. The dumps will occur on driver initialization failure, ufshcd_abort() and on error handling path. Signed-off-by: Dolev Raviv Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 129 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/ufs/ufshci.h | 3 ++ 2 files changed, 132 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 20e5e5fb048c..6e2e0cd86ce3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -94,6 +94,9 @@ _ret; \ }) +#define ufshcd_hex_dump(prefix_str, buf, len) \ +print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) + static u32 ufs_query_desc_max_size[] = { QUERY_DESC_DEVICE_MAX_SIZE, QUERY_DESC_CONFIGURAION_MAX_SIZE, @@ -267,6 +270,77 @@ static inline void ufshcd_remove_non_printable(char *val) *val = ' '; } +static void ufshcd_print_host_regs(struct ufs_hba *hba) +{ + /* + * hex_dump reads its data without the readl macro. This might + * cause inconsistency issues on some platform, as the printed + * values may be from cache and not the most recent value. + * To know whether you are looking at an un-cached version verify + * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked + * during platform/pci probe function. + */ + ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE); + dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", + hba->ufs_version, hba->capabilities); + dev_err(hba->dev, + "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n", + (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks); +} + +static +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) +{ + struct ufshcd_lrb *lrbp; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + + dev_err(hba->dev, "UPIU[%d] - Transfer Request Descriptor\n", + tag); + ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, + sizeof(struct utp_transfer_req_desc)); + dev_err(hba->dev, "UPIU[%d] - Request UPIU\n", tag); + ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, + sizeof(struct utp_upiu_req)); + dev_err(hba->dev, "UPIU[%d] - Response UPIU\n", tag); + ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, + sizeof(struct utp_upiu_rsp)); + if (pr_prdt) { + int prdt_length = le16_to_cpu( + lrbp->utr_descriptor_ptr->prd_table_length); + + dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries\n", tag, + prdt_length); + ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, + sizeof(struct ufshcd_sg_entry) * + prdt_length); + } + } +} + +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) +{ + struct utp_task_req_desc *tmrdp; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutmrs) { + tmrdp = &hba->utmrdl_base_addr[tag]; + dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); + ufshcd_hex_dump("TM TRD: ", &tmrdp->header, + sizeof(struct request_desc_header)); + dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n", + tag); + ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu, + sizeof(struct utp_upiu_req)); + dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n", + tag); + ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu, + sizeof(struct utp_task_req_desc)); + } +} + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -2391,6 +2465,32 @@ out: return -ENOMEM; } +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + * @hba: per-adapter instance + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + dev_info(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + __func__, + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + /** * ufshcd_host_memory_configure - configure local reference block with * memory offsets @@ -2973,6 +3073,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, sizeof(struct ufs_pa_layer_attr)); } + ufshcd_print_pwr_info(hba); + return ret; } @@ -3656,6 +3758,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ + if (host_byte(result) != DID_OK) + ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -4327,6 +4431,22 @@ static void ufshcd_check_errors(struct ufs_hba *hba) scsi_block_requests(hba->host); hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; + + /* dump controller state before resetting */ + if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) { + bool pr_prdt = !!(hba->saved_err & + SYSTEM_BUS_FATAL_ERROR); + + dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", + __func__, hba->saved_err, + hba->saved_uic_err); + + ufshcd_print_host_regs(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_tmrs(hba, hba->outstanding_tasks); + ufshcd_print_trs(hba, hba->outstanding_reqs, + pr_prdt); + } schedule_work(&hba->eh_work); } } @@ -4617,6 +4737,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) __func__, tag); } + /* Print Transfer Request of aborted task */ + dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); + scsi_print_command(hba->lrb[tag].cmd); + ufshcd_print_host_regs(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_trs(hba, 1 << tag, true); + lrbp = &hba->lrb[tag]; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -5256,6 +5383,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) goto out; ufshcd_init_pwr_info(hba); + ufshcd_print_pwr_info(hba); /* set the default level for urgent bkops */ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; @@ -6795,6 +6923,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = ufshcd_hba_enable(hba); if (err) { dev_err(hba->dev, "Host controller enable failed\n"); + ufshcd_print_host_regs(hba); goto out_remove_scsi_host; } diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 8c5190e2e1c9..d14e9b965d1e 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -72,6 +72,9 @@ enum { REG_UIC_COMMAND_ARG_1 = 0x94, REG_UIC_COMMAND_ARG_2 = 0x98, REG_UIC_COMMAND_ARG_3 = 0x9C, + + UFSHCI_REG_SPACE_SIZE = 0xA0, + REG_UFS_CCAP = 0x100, REG_UFS_CRYPTOCAP = 0x104, -- cgit v1.2.3-59-g8ed1b From 7ff5ab4736333431311a4cbbfb574b155758b08c Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:39:51 -0800 Subject: scsi: ufs: add tracing support This change adds the ftrace support for following: 1. UFS initialization time 2. Clock gating states 3. Clock scaling states 4. Power management APIs latency 5. BKOPs enable/disable Usage: echo 1 > /sys/kernel/debug/tracing/events/ufs/enable cat /sys/kernel/debug/tracing/trace_pipe Reviewed-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 85 ++++++++++++++++++--- include/trace/events/ufs.h | 185 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 258 insertions(+), 12 deletions(-) create mode 100644 include/trace/events/ufs.h diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6e2e0cd86ce3..0b8c380392b9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -45,6 +45,9 @@ #include "ufs_quirks.h" #include "unipro.h" +#define CREATE_TRACE_POINTS +#include + #define UFSHCD_REQ_SENSE_SIZE 18 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ @@ -801,6 +804,8 @@ start: case REQ_CLKS_OFF: if (cancel_delayed_work(&hba->clk_gating.gate_work)) { hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); break; } /* @@ -811,6 +816,8 @@ start: case CLKS_OFF: scsi_block_requests(hba->host); hba->clk_gating.state = REQ_CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); schedule_work(&hba->clk_gating.ungate_work); /* * fall through to check if we should wait for this @@ -855,6 +862,8 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.is_suspended || (hba->clk_gating.state == REQ_CLKS_ON)) { hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); goto rel_lock; } @@ -870,6 +879,8 @@ static void ufshcd_gate_work(struct work_struct *work) if (ufshcd_can_hibern8_during_gating(hba)) { if (ufshcd_uic_hibern8_enter(hba)) { hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); goto out; } ufshcd_set_link_hibern8(hba); @@ -893,9 +904,11 @@ static void ufshcd_gate_work(struct work_struct *work) * new requests arriving before the current cancel work is done. */ spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_gating.state == REQ_CLKS_OFF) + if (hba->clk_gating.state == REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_OFF; - + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } rel_lock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: @@ -918,6 +931,7 @@ static void __ufshcd_release(struct ufs_hba *hba) return; hba->clk_gating.state = REQ_CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); schedule_delayed_work(&hba->clk_gating.gate_work, msecs_to_jiffies(hba->clk_gating.delay_ms)); } @@ -3932,6 +3946,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) } hba->auto_bkops_enabled = true; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); /* No need of URGENT_BKOPS exception from the device */ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); @@ -3982,6 +3997,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) } hba->auto_bkops_enabled = false; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); out: return err; } @@ -5377,6 +5393,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) static int ufshcd_probe_hba(struct ufs_hba *hba) { int ret; + ktime_t start = ktime_get(); ret = ufshcd_link_startup(hba); if (ret) @@ -5468,6 +5485,9 @@ out: ufshcd_hba_exit(hba); } + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->uic_link_state, hba->curr_dev_pwr_mode); return ret; } @@ -5817,11 +5837,14 @@ out: if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); } - } else if (on) { + } else if (!ret && on) { spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); spin_unlock_irqrestore(hba->host->host_lock, flags); } + return ret; } @@ -6304,6 +6327,7 @@ disable_clks: __ufshcd_setup_clocks(hba, false, true); hba->clk_gating.state = CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); /* * Disable the host irq as host controller as there won't be any * host controller transaction expected till resume. @@ -6436,6 +6460,7 @@ out: int ufshcd_system_suspend(struct ufs_hba *hba) { int ret = 0; + ktime_t start = ktime_get(); if (!hba || !hba->is_powered) return 0; @@ -6462,6 +6487,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba) ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); out: + trace_ufshcd_system_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->uic_link_state, hba->curr_dev_pwr_mode); if (!ret) hba->is_sys_suspended = true; return ret; @@ -6477,6 +6505,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend); int ufshcd_system_resume(struct ufs_hba *hba) { + int ret = 0; + ktime_t start = ktime_get(); + if (!hba) return -EINVAL; @@ -6485,9 +6516,14 @@ int ufshcd_system_resume(struct ufs_hba *hba) * Let the runtime resume take care of resuming * if runtime suspended. */ - return 0; - - return ufshcd_resume(hba, UFS_SYSTEM_PM); + goto out; + else + ret = ufshcd_resume(hba, UFS_SYSTEM_PM); +out: + trace_ufshcd_system_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->uic_link_state, hba->curr_dev_pwr_mode); + return ret; } EXPORT_SYMBOL(ufshcd_system_resume); @@ -6501,13 +6537,21 @@ EXPORT_SYMBOL(ufshcd_system_resume); */ int ufshcd_runtime_suspend(struct ufs_hba *hba) { + int ret = 0; + ktime_t start = ktime_get(); + if (!hba) return -EINVAL; if (!hba->is_powered) - return 0; - - return ufshcd_suspend(hba, UFS_RUNTIME_PM); + goto out; + else + ret = ufshcd_suspend(hba, UFS_RUNTIME_PM); +out: + trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->uic_link_state, hba->curr_dev_pwr_mode); + return ret; } EXPORT_SYMBOL(ufshcd_runtime_suspend); @@ -6534,13 +6578,21 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend); */ int ufshcd_runtime_resume(struct ufs_hba *hba) { + int ret = 0; + ktime_t start = ktime_get(); + if (!hba) return -EINVAL; if (!hba->is_powered) - return 0; - - return ufshcd_resume(hba, UFS_RUNTIME_PM); + goto out; + else + ret = ufshcd_resume(hba, UFS_RUNTIME_PM); +out: + trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->uic_link_state, hba->curr_dev_pwr_mode); + return ret; } EXPORT_SYMBOL(ufshcd_runtime_resume); @@ -6684,6 +6736,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) clki->max_freq, ret); break; } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled up", clki->name, + clki->curr_freq, + clki->max_freq); + clki->curr_freq = clki->max_freq; } else if (!scale_up && clki->min_freq) { @@ -6696,6 +6753,10 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) clki->min_freq, ret); break; } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled down", clki->name, + clki->curr_freq, + clki->min_freq); clki->curr_freq = clki->min_freq; } } diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h new file mode 100644 index 000000000000..57743b6ed0b0 --- /dev/null +++ b/include/trace/events/ufs.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ufs + +#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UFS_H + +#include + +#define UFS_LINK_STATES \ + EM(UIC_LINK_OFF_STATE) \ + EM(UIC_LINK_ACTIVE_STATE) \ + EMe(UIC_LINK_HIBERN8_STATE) + +#define UFS_PWR_MODES \ + EM(UFS_ACTIVE_PWR_MODE) \ + EM(UFS_SLEEP_PWR_MODE) \ + EMe(UFS_POWERDOWN_PWR_MODE) + +#define UFSCHD_CLK_GATING_STATES \ + EM(CLKS_OFF) \ + EM(CLKS_ON) \ + EM(REQ_CLKS_OFF) \ + EMe(REQ_CLKS_ON) + +/* Enums require being exported to userspace, for user tool parsing */ +#undef EM +#undef EMe +#define EM(a) TRACE_DEFINE_ENUM(a); +#define EMe(a) TRACE_DEFINE_ENUM(a); + +UFS_LINK_STATES; +UFS_PWR_MODES; +UFSCHD_CLK_GATING_STATES; + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a) { a, #a }, +#define EMe(a) { a, #a } + +TRACE_EVENT(ufshcd_clk_gating, + + TP_PROTO(const char *dev_name, int state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(int, state) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->state = state; + ), + + TP_printk("%s: gating state changed to %s", + __get_str(dev_name), + __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES)) +); + +TRACE_EVENT(ufshcd_clk_scaling, + + TP_PROTO(const char *dev_name, const char *state, const char *clk, + u32 prev_state, u32 curr_state), + + TP_ARGS(dev_name, state, clk, prev_state, curr_state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + __string(clk, clk) + __field(u32, prev_state) + __field(u32, curr_state) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(state, state); + __assign_str(clk, clk); + __entry->prev_state = prev_state; + __entry->curr_state = curr_state; + ), + + TP_printk("%s: %s %s from %u to %u Hz", + __get_str(dev_name), __get_str(state), __get_str(clk), + __entry->prev_state, __entry->curr_state) +); + +TRACE_EVENT(ufshcd_auto_bkops_state, + + TP_PROTO(const char *dev_name, const char *state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(state, state); + ), + + TP_printk("%s: auto bkops - %s", + __get_str(dev_name), __get_str(state)) +); + +DECLARE_EVENT_CLASS(ufshcd_template, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + + TP_ARGS(dev_name, err, usecs, dev_state, link_state), + + TP_STRUCT__entry( + __field(s64, usecs) + __field(int, err) + __string(dev_name, dev_name) + __field(int, dev_state) + __field(int, link_state) + ), + + TP_fast_assign( + __entry->usecs = usecs; + __entry->err = err; + __assign_str(dev_name, dev_name); + __entry->dev_state = dev_state; + __entry->link_state = link_state; + ), + + TP_printk( + "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d", + __get_str(dev_name), + __entry->usecs, + __print_symbolic(__entry->dev_state, UFS_PWR_MODES), + __print_symbolic(__entry->link_state, UFS_LINK_STATES), + __entry->err + ) +); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_init, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); +#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3-59-g8ed1b From e7d38257a40f880c6625fac33956d5e99dcd63a6 Mon Sep 17 00:00:00 2001 From: Dolev Raviv Date: Thu, 22 Dec 2016 18:40:07 -0800 Subject: scsi: ufs: fix multiple ufs spec violation When a command to a W-LU is timed out via scsi, error handling will treat it as any other LU and send commands such as START_STOP with wrong format or task abort. Those commands are illegal for W-LU according to the UFS spec. To solve it, when an error is recognized those steps are skipped and the last step, reset and restore process, is initiated. Signed-off-by: Dolev Raviv Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 0b8c380392b9..a6a4a4302ca9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -218,6 +218,7 @@ static struct ufs_dev_fix ufs_fixups[] = { static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); static int ufshcd_probe_hba(struct ufs_hba *hba); @@ -4730,6 +4731,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; + lrbp = &hba->lrb[tag]; if (!ufshcd_valid_tag(hba, tag)) { dev_err(hba->dev, "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", @@ -4737,6 +4739,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) BUG(); } + /* + * Task abort to the device W-LUN is illegal. When this command + * will fail, due to spec violation, scsi err handling next step + * will be to send LU reset which, again, is a spec violation. + * To avoid these unnecessary/illegal step we skip to the last error + * handling stage: reset and restore. + */ + if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) + return ufshcd_eh_host_reset_handler(cmd); + ufshcd_hold(hba, false); reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* If command is already aborted/completed, return SUCCESS */ @@ -4760,7 +4772,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ufshcd_print_pwr_info(hba); ufshcd_print_trs(hba, 1 << tag, true); - lrbp = &hba->lrb[tag]; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_QUERY_TASK, &resp); -- cgit v1.2.3-59-g8ed1b From b427411abb3d61c234cb7dd58bc9b9f92c0fd8cb Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 22 Dec 2016 18:40:39 -0800 Subject: scsi: ufs: Add sysfs node to dynamically control clock gating Provide an option to enable/disable clock gating during runtime. Write 1 or 0 to "clkgate_enable" sysfs node to enable/disable clock gating. Signed-off-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshcd.h | 4 ++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a6a4a4302ca9..0da9e16317f5 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -970,6 +970,41 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev, return count; } +static ssize_t ufshcd_clkgate_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled); +} + +static ssize_t ufshcd_clkgate_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags; + u32 value; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + value = !!value; + if (value == hba->clk_gating.is_enabled) + goto out; + + if (value) { + ufshcd_release(hba); + } else { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.active_reqs++; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + hba->clk_gating.is_enabled = value; +out: + return count; +} + static void ufshcd_init_clk_gating(struct ufs_hba *hba) { if (!ufshcd_is_clkgating_allowed(hba)) @@ -979,13 +1014,23 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + hba->clk_gating.is_enabled = true; + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; sysfs_attr_init(&hba->clk_gating.delay_attr.attr); hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; - hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; + hba->clk_gating.delay_attr.attr.mode = 0644; if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); + + hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; + hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; + sysfs_attr_init(&hba->clk_gating.enable_attr.attr); + hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; + hba->clk_gating.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); } static void ufshcd_exit_clk_gating(struct ufs_hba *hba) @@ -993,6 +1038,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba)) return; device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + device_remove_file(hba->dev, &hba->clk_gating.enable_attr); cancel_work_sync(&hba->clk_gating.ungate_work); cancel_delayed_work_sync(&hba->clk_gating.gate_work); } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 08cd26ed2382..0882ba67ccb1 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -320,6 +320,8 @@ enum clk_gating_state { * @is_suspended: clk gating is suspended when set to 1 which can be used * during suspend/resume * @delay_attr: sysfs attribute to control delay_attr + * @enable_attr: sysfs attribute to enable/disable clock gating + * @is_enabled: Indicates the current status of clock gating * @active_reqs: number of requests that are pending and should be waited for * completion before gating clocks. */ @@ -330,6 +332,8 @@ struct ufs_clk_gating { unsigned long delay_ms; bool is_suspended; struct device_attribute delay_attr; + struct device_attribute enable_attr; + bool is_enabled; int active_reqs; }; -- cgit v1.2.3-59-g8ed1b From fcb0c4b08a18fe35c3adc594d3edc8e335ff7960 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 22 Dec 2016 18:40:50 -0800 Subject: scsi: ufs: Add sysfs node to dynamically control clock scaling Provide an option to enable/disable clock scaling during runtime. Write 1/0 to "clkscale_enable" sysfs node to enable/disable clock scaling. Signed-off-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 95 +++++++++++++++++++++++++++++++++++++++-------- drivers/scsi/ufs/ufshcd.h | 4 +- 2 files changed, 82 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 0da9e16317f5..4468ba059696 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -230,6 +230,9 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); +static void ufshcd_resume_clkscaling(struct ufs_hba *hba); +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); @@ -713,16 +716,58 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) { - if (ufshcd_is_clkscaling_enabled(hba)) { - devfreq_suspend_device(hba->devfreq); - hba->clk_scaling.window_start_t = 0; - } + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; } static void ufshcd_resume_clkscaling(struct ufs_hba *hba) { - if (ufshcd_is_clkscaling_enabled(hba)) - devfreq_resume_device(hba->devfreq); + devfreq_resume_device(hba->devfreq); +} + +static ssize_t ufshcd_clkscale_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed); +} + +static ssize_t ufshcd_clkscale_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int err; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + value = !!value; + if (value == hba->clk_scaling.is_allowed) + goto out; + + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + + if (value) { + ufshcd_resume_clkscaling(hba); + } else { + ufshcd_suspend_clkscaling(hba); + err = ufshcd_scale_clks(hba, true); + if (err) + dev_err(hba->dev, "%s: failed to scale clocks up %d\n", + __func__, err); + } + hba->clk_scaling.is_allowed = value; + + ufshcd_release(hba); + pm_runtime_put_sync(hba->dev); +out: + return count; } static void ufshcd_ungate_work(struct work_struct *work) @@ -758,7 +803,8 @@ static void ufshcd_ungate_work(struct work_struct *work) hba->clk_gating.is_suspended = false; } unblock_reqs: - ufshcd_resume_clkscaling(hba); + if (hba->clk_scaling.is_allowed) + ufshcd_resume_clkscaling(hba); scsi_unblock_requests(hba->host); } @@ -1046,7 +1092,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) /* Must be called with host lock acquired */ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { - if (!ufshcd_is_clkscaling_enabled(hba)) + if (!ufshcd_is_clkscaling_supported(hba)) return; if (!hba->clk_scaling.is_busy_started) { @@ -1059,7 +1105,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) { struct ufs_clk_scaling *scaling = &hba->clk_scaling; - if (!ufshcd_is_clkscaling_enabled(hba)) + if (!ufshcd_is_clkscaling_supported(hba)) return; if (!hba->outstanding_reqs && scaling->is_busy_started) { @@ -5526,12 +5572,15 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) pm_runtime_put_sync(hba->dev); } + /* Resume devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + ufshcd_resume_clkscaling(hba); + hba->clk_scaling.is_allowed = true; + } + if (!hba->is_init_prefetch) hba->is_init_prefetch = true; - /* Resume devfreq after UFS device is detected */ - ufshcd_resume_clkscaling(hba); - out: /* * If we failed to initialize the device or the device is not @@ -6484,7 +6533,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_urgent_bkops(hba); hba->clk_gating.is_suspended = false; - ufshcd_resume_clkscaling(hba); + if (hba->clk_scaling.is_allowed) + ufshcd_resume_clkscaling(hba); /* Schedule clock gating in case of no access to UFS device yet */ ufshcd_release(hba); @@ -6702,6 +6752,8 @@ void ufshcd_remove(struct ufs_hba *hba) ufshcd_hba_stop(hba, true); ufshcd_exit_clk_gating(hba); + if (ufshcd_is_clkscaling_supported(hba)) + device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -6835,7 +6887,7 @@ static int ufshcd_devfreq_target(struct device *dev, bool release_clk_hold = false; unsigned long irq_flags; - if (!ufshcd_is_clkscaling_enabled(hba)) + if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL; spin_lock_irqsave(hba->host->host_lock, irq_flags); @@ -6883,7 +6935,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, struct ufs_clk_scaling *scaling = &hba->clk_scaling; unsigned long flags; - if (!ufshcd_is_clkscaling_enabled(hba)) + if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL; memset(stat, 0, sizeof(*stat)); @@ -6919,6 +6971,16 @@ static struct devfreq_dev_profile ufs_devfreq_profile = { .target = ufshcd_devfreq_target, .get_dev_status = ufshcd_devfreq_get_dev_status, }; +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) +{ + hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); + hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; + hba->clk_scaling.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); +} /** * ufshcd_init - Driver initialization routine @@ -7045,7 +7107,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_remove_scsi_host; } - if (ufshcd_is_clkscaling_enabled(hba)) { + if (ufshcd_is_clkscaling_supported(hba)) { hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile, "simple_ondemand", NULL); if (IS_ERR(hba->devfreq)) { @@ -7056,6 +7118,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) } /* Suspend devfreq until the UFS device is detected */ ufshcd_suspend_clkscaling(hba); + ufshcd_clkscaling_init_sysfs(hba); } /* Hold auto suspend until async scan completes */ diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 0882ba67ccb1..bdd2284967f2 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -342,6 +342,8 @@ struct ufs_clk_scaling { bool is_busy_started; unsigned long tot_busy_t; unsigned long window_start_t; + struct device_attribute enable_attr; + bool is_allowed; }; /** @@ -580,7 +582,7 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; } -static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba) +static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_CLK_SCALING; } -- cgit v1.2.3-59-g8ed1b From 09690d5a6ae1b7e4cb5ac429c311b99d09352c12 Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:00 -0800 Subject: scsi: ufs: provide sysfs attribute to select the PM level This patch provides the sysfs attribute to choose the power management level for UFS runtime and system suspend. Reviewed-by: Sujit Reddy Thumma Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/ufs/ufshcd.h | 2 + 2 files changed, 146 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 4468ba059696..3036f606545d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -686,6 +686,28 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +static const char *ufschd_uic_link_state_to_string( + enum uic_link_state state) +{ + switch (state) { + case UIC_LINK_OFF_STATE: return "OFF"; + case UIC_LINK_ACTIVE_STATE: return "ACTIVE"; + case UIC_LINK_HIBERN8_STATE: return "HIBERN8"; + default: return "UNKNOWN"; + } +} + +static const char *ufschd_ufs_dev_pwr_mode_to_string( + enum ufs_dev_pwr_mode state) +{ + switch (state) { + case UFS_ACTIVE_PWR_MODE: return "ACTIVE"; + case UFS_SLEEP_PWR_MODE: return "SLEEP"; + case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN"; + default: return "UNKNOWN"; + } +} + u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) { /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ @@ -6709,6 +6731,127 @@ int ufshcd_runtime_idle(struct ufs_hba *hba) } EXPORT_SYMBOL(ufshcd_runtime_idle); +static inline ssize_t ufshcd_pm_lvl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, + bool rpm) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags, value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (rpm) + hba->rpm_lvl = value; + else + hba->spm_lvl = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t ufshcd_rpm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int curr_len; + u8 lvl; + + curr_len = snprintf(buf, PAGE_SIZE, + "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n", + hba->rpm_lvl, + ufschd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].dev_state), + ufschd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].link_state)); + + curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len), + "\nAll available Runtime PM levels info:\n"); + for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) + curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len), + "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n", + lvl, + ufschd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[lvl].dev_state), + ufschd_uic_link_state_to_string( + ufs_pm_lvl_states[lvl].link_state)); + + return curr_len; +} + +static ssize_t ufshcd_rpm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufshcd_pm_lvl_store(dev, attr, buf, count, true); +} + +static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba) +{ + hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show; + hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store; + sysfs_attr_init(&hba->rpm_lvl_attr.attr); + hba->rpm_lvl_attr.attr.name = "rpm_lvl"; + hba->rpm_lvl_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->rpm_lvl_attr)) + dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n"); +} + +static ssize_t ufshcd_spm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int curr_len; + u8 lvl; + + curr_len = snprintf(buf, PAGE_SIZE, + "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n", + hba->spm_lvl, + ufschd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->spm_lvl].dev_state), + ufschd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->spm_lvl].link_state)); + + curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len), + "\nAll available System PM levels info:\n"); + for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) + curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len), + "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n", + lvl, + ufschd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[lvl].dev_state), + ufschd_uic_link_state_to_string( + ufs_pm_lvl_states[lvl].link_state)); + + return curr_len; +} + +static ssize_t ufshcd_spm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufshcd_pm_lvl_store(dev, attr, buf, count, false); +} + +static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba) +{ + hba->spm_lvl_attr.show = ufshcd_spm_lvl_show; + hba->spm_lvl_attr.store = ufshcd_spm_lvl_store; + sysfs_attr_init(&hba->spm_lvl_attr.attr); + hba->spm_lvl_attr.attr.name = "spm_lvl"; + hba->spm_lvl_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->spm_lvl_attr)) + dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n"); +} + +static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba) +{ + ufshcd_add_rpm_lvl_sysfs_nodes(hba); + ufshcd_add_spm_lvl_sysfs_nodes(hba); +} + /** * ufshcd_shutdown - shutdown routine * @hba: per adapter instance @@ -7133,6 +7276,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_set_ufs_dev_active(hba); async_schedule(ufshcd_async_scan, hba); + ufshcd_add_sysfs_nodes(hba); return 0; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index bdd2284967f2..787323be0919 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -435,6 +435,8 @@ struct ufs_hba { enum ufs_pm_level rpm_lvl; /* Desired UFS power management level during system PM */ enum ufs_pm_level spm_lvl; + struct device_attribute rpm_lvl_attr; + struct device_attribute spm_lvl_attr; int pm_op_in_progress; struct ufshcd_lrb *lrb; -- cgit v1.2.3-59-g8ed1b From 0c8f75869ed8941fcbf21643fced7bc652f68f14 Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:11 -0800 Subject: scsi: ufs: set default UFS power management level UFS device and link can be put in multiple different low power modes hence UFS driver supports multiple different low power modes. This change sets the default UFS power management level which should put the link hibernate state and device in sleep state. This default power management level gives good power savings with relatively less enter/exit latencies. Reviewed-by: Yaniv Gardi Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 3036f606545d..9c16e08c3742 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -191,6 +191,22 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) return ufs_pm_lvl_states[lvl].link_state; } +static inline enum ufs_pm_level +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, + enum uic_link_state link_state) +{ + enum ufs_pm_level lvl; + + for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { + if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && + (ufs_pm_lvl_states[lvl].link_state == link_state)) + return lvl; + } + + /* if no match found, return the level 0 */ + return UFS_PM_LVL_0; +} + static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, @@ -7264,6 +7280,18 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_clkscaling_init_sysfs(hba); } + /* + * Set the default power management level for runtime and system PM. + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); -- cgit v1.2.3-59-g8ed1b From 4e768e7645ec4ffa92ee163643777b261ae97142 Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:22 -0800 Subject: scsi: ufs: add capability to keep auto bkops always enabled UFS device requires to perform bkops (back ground operations) periodically but host can control (via auto-bkops parameter of device) when device can perform bkops based on its performance requirements. In general, host would like to enable the device's auto-bkops only when it's not doing any regular data transfer but sometimes device may not behave properly if host keeps the auto-bkops disabled. This change adds the capability to let the device auto-bkops always enabled except suspend. Reviewed-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 33 ++++++++++++++++++++++----------- drivers/scsi/ufs/ufshcd.h | 13 +++++++++++++ 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9c16e08c3742..10840306f663 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4134,18 +4134,25 @@ out: } /** - * ufshcd_force_reset_auto_bkops - force enable of auto bkops + * ufshcd_force_reset_auto_bkops - force reset auto bkops state * @hba: per adapter instance * * After a device reset the device may toggle the BKOPS_EN flag * to default value. The s/w tracking variables should be updated - * as well. Do this by forcing enable of auto bkops. + * as well. This function would change the auto-bkops state based on + * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. */ -static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) { - hba->auto_bkops_enabled = false; - hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; - ufshcd_enable_auto_bkops(hba); + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); + } else { + hba->auto_bkops_enabled = true; + hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; + ufshcd_disable_auto_bkops(hba); + } } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -6564,11 +6571,15 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_old_link_state; } - /* - * If BKOPs operations are urgently needed at this moment then - * keep auto-bkops enabled or else disable it. - */ - ufshcd_urgent_bkops(hba); + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) + ufshcd_enable_auto_bkops(hba); + else + /* + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. + */ + ufshcd_urgent_bkops(hba); + hba->clk_gating.is_suspended = false; if (hba->clk_scaling.is_allowed) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 787323be0919..f25d4684ef0e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -566,6 +566,14 @@ struct ufs_hba { * CAUTION: Enabling this might reduce overall UFS throughput. */ #define UFSHCD_CAP_INTR_AGGR (1 << 4) + /* + * This capability allows the device auto-bkops to be always enabled + * except during suspend (both runtime and suspend). + * Enabling this capability means that device will always be allowed + * to do background operation when it's active but it might degrade + * the performance of ongoing read/write operations. + */ +#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -663,6 +671,11 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba) BUG_ON(!hba); return hba->priv; } +static inline bool ufshcd_keep_autobkops_enabled_except_suspend( + struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; +} extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); -- cgit v1.2.3-59-g8ed1b From d2aebb9b269b3e094b2d2d03121c3863b683a25b Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:33 -0800 Subject: scsi: ufs: fix setting init power mode Immediately after successful UFS link startup, UFS link power mode would be in PWM-G1, 1-lane, SLOW-AUTO mode. But currently we are doing few of the DME local/peer attributes access before setting the "hba->pwr_info" to default power mode. If we are doing link startup as part of error recovery then old power mode might be set to FAST mode and doing DME peer access (after link startup but before updating "hba->pwr_info" to default power mode) unintentionally tries to switch from FAST to FAST_AUTO mode (if UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE quirk is enabled). Above issue is fixed by setting the default power mode immediately after successful link startup. Reviewed-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 10840306f663..184106fa21da 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3512,6 +3512,10 @@ link_startup: goto link_startup; } + /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ + ufshcd_init_pwr_info(hba); + ufshcd_print_pwr_info(hba); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { ret = ufshcd_disable_device_tx_lcc(hba); if (ret) @@ -5547,9 +5551,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; - ufshcd_init_pwr_info(hba); - ufshcd_print_pwr_info(hba); - /* set the default level for urgent bkops */ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; hba->is_urgent_bkops_lvl_checked = false; -- cgit v1.2.3-59-g8ed1b From 911a0771b6fa7bac5eae753c17c87ecb77c77283 Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Thu, 22 Dec 2016 18:41:48 -0800 Subject: scsi: ufs: add time profiling support This patch adds the profiling support for some of the time critical operations like hibern8 enter/exit, clock gating & clock scaling. Reviewed-by: Venkat Gopalakrishnan Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 24 ++++++++++++++++++++++++ include/trace/events/ufs.h | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 184106fa21da..58c4df405528 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3021,11 +3021,14 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) { int ret; struct uic_command uic_cmd = {0}; + ktime_t start = ktime_get(); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", @@ -3061,11 +3064,15 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { struct uic_command uic_cmd = {0}; int ret; + ktime_t start = ktime_get(); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + if (ret) { dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", __func__, ret); @@ -5950,6 +5957,8 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; unsigned long flags; + ktime_t start = ktime_get(); + bool clk_state_changed = false; if (!head || list_empty(head)) goto out; @@ -5963,6 +5972,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) continue; + clk_state_changed = on ^ clki->enabled; if (on && !clki->enabled) { ret = clk_prepare_enable(clki->clk); if (ret) { @@ -5997,6 +6007,10 @@ out: spin_unlock_irqrestore(hba->host->host_lock, flags); } + if (clk_state_changed) + trace_ufshcd_profile_clk_gating(dev_name(hba->dev), + (on ? "on" : "off"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); return ret; } @@ -6996,6 +7010,8 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) int ret = 0; struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head; + ktime_t start = ktime_get(); + bool clk_state_changed = false; if (!head || list_empty(head)) goto out; @@ -7009,6 +7025,8 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) if (scale_up && clki->max_freq) { if (clki->curr_freq == clki->max_freq) continue; + + clk_state_changed = true; ret = clk_set_rate(clki->clk, clki->max_freq); if (ret) { dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", @@ -7026,6 +7044,8 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) } else if (!scale_up && clki->min_freq) { if (clki->curr_freq == clki->min_freq) continue; + + clk_state_changed = true; ret = clk_set_rate(clki->clk, clki->min_freq); if (ret) { dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", @@ -7047,6 +7067,10 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); out: + if (clk_state_changed) + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); return ret; } diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 57743b6ed0b0..e9634dff51a8 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -123,6 +123,46 @@ TRACE_EVENT(ufshcd_auto_bkops_state, __get_str(dev_name), __get_str(state)) ); +DECLARE_EVENT_CLASS(ufshcd_profiling_template, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + + TP_ARGS(dev_name, profile_info, time_us, err), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(profile_info, profile_info) + __field(s64, time_us) + __field(int, err) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(profile_info, profile_info); + __entry->time_us = time_us; + __entry->err = err; + ), + + TP_printk("%s: %s: took %lld usecs, err %d", + __get_str(dev_name), __get_str(profile_info), + __entry->time_us, __entry->err) +); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + DECLARE_EVENT_CLASS(ufshcd_template, TP_PROTO(const char *dev_name, int err, s64 usecs, int dev_state, int link_state), -- cgit v1.2.3-59-g8ed1b From 1a07f2d96eb9085396cbfb2d6d42d4003aa2934a Mon Sep 17 00:00:00 2001 From: Lee Susman Date: Thu, 22 Dec 2016 18:42:03 -0800 Subject: scsi: ufs: add trace event for ufs commands Use the ftrace infrastructure to conditionally trace ufs command events. New trace event is created, which samples the following ufs command data: - device name - optional identification string - task tag - doorbell register - number of transfer bytes - interrupt status register - request start LBA - command opcode Currently we only fully trace read(10) and write(10) commands. All other commands which pass through ufshcd_send_command() will be printed with "-1" in the lba and transfer_len fields. Usage: echo 1 > /sys/kernel/debug/tracing/events/ufs/enable cat /sys/kernel/debug/tracing/trace_pipe Signed-off-by: Lee Susman Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 42 +++++++++++++++++++++++++++++++++++++++++- include/trace/events/ufs.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 58c4df405528..2c88757eb701 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -293,6 +293,41 @@ static inline void ufshcd_remove_non_printable(char *val) *val = ' '; } +static void ufshcd_add_command_trace(struct ufs_hba *hba, + unsigned int tag, const char *str) +{ + sector_t lba = -1; + u8 opcode = 0; + u32 intr, doorbell; + struct ufshcd_lrb *lrbp; + int transfer_len = -1; + + if (!trace_ufshcd_command_enabled()) + return; + + lrbp = &hba->lrb[tag]; + + if (lrbp->cmd) { /* data phase exists */ + opcode = (u8)(*lrbp->cmd->cmnd); + if ((opcode == READ_10) || (opcode == WRITE_10)) { + /* + * Currently we only fully trace read(10) and write(10) + * commands + */ + if (lrbp->cmd->request && lrbp->cmd->request->bio) + lba = + lrbp->cmd->request->bio->bi_iter.bi_sector; + transfer_len = be32_to_cpu( + lrbp->ucd_req_ptr->sc.exp_data_transfer_len); + } + } + + intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + trace_ufshcd_command(dev_name(hba->dev), str, tag, + doorbell, transfer_len, intr, lba, opcode); +} + static void ufshcd_print_host_regs(struct ufs_hba *hba) { /* @@ -1166,6 +1201,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); + ufshcd_add_command_trace(hba, task_tag, "send"); } /** @@ -3955,6 +3991,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, lrbp = &hba->lrb[index]; cmd = lrbp->cmd; if (cmd) { + ufshcd_add_command_trace(hba, index, "complete"); result = ufshcd_transfer_rsp_status(hba, lrbp); scsi_dma_unmap(cmd); cmd->result = result; @@ -3966,8 +4003,11 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { - if (hba->dev_cmd.complete) + if (hba->dev_cmd.complete) { + ufshcd_add_command_trace(hba, index, + "dev_complete"); complete(hba->dev_cmd.complete); + } } } diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index e9634dff51a8..bf6f82673492 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -219,6 +219,44 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init, TP_PROTO(const char *dev_name, int err, s64 usecs, int dev_state, int link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +TRACE_EVENT(ufshcd_command, + TP_PROTO(const char *dev_name, const char *str, unsigned int tag, + u32 doorbell, int transfer_len, u32 intr, u64 lba, + u8 opcode), + + TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(str, str) + __field(unsigned int, tag) + __field(u32, doorbell) + __field(int, transfer_len) + __field(u32, intr) + __field(u64, lba) + __field(u8, opcode) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(str, str); + __entry->tag = tag; + __entry->doorbell = doorbell; + __entry->transfer_len = transfer_len; + __entry->intr = intr; + __entry->lba = lba; + __entry->opcode = opcode; + ), + + TP_printk( + "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x", + __get_str(str), __get_str(dev_name), __entry->tag, + __entry->doorbell, __entry->transfer_len, + __entry->intr, __entry->lba, (u32)__entry->opcode + ) +); + #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ -- cgit v1.2.3-59-g8ed1b From ff8e20c66249210563aaea5382b08c94933f720d Mon Sep 17 00:00:00 2001 From: Dolev Raviv Date: Thu, 22 Dec 2016 18:42:18 -0800 Subject: scsi: ufs: Improve fatal error logs Errors such as UIC error, illegal OCS values, and others may require more information for debugging. Such information could be hibern8 events, events sequences, recoverable errors, error history, and more. This patch improves tracking of important errors and events in debug level to be enabled when debugging a such issues. It includes: * UIC error history * Successful hibern8 events * Successful command after hibern8 exit * Clk-freq info * Failed device command * Infrastructure for dumping host controller debug information Signed-off-by: Dolev Raviv Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 203 ++++++++++++++++++++++++++++++++++++---------- drivers/scsi/ufs/ufshcd.h | 47 +++++++++++ 2 files changed, 208 insertions(+), 42 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2c88757eb701..be6322e38d74 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -328,6 +328,37 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, doorbell, transfer_len, intr, lba, opcode); } +static void ufshcd_print_clk_freqs(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + return; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && + clki->max_freq) + dev_err(hba->dev, "clk: %s, rate: %u\n", + clki->name, clki->curr_freq); + } +} + +static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, + struct ufs_uic_err_reg_hist *err_hist, char *err_name) +{ + int i; + + for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { + int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH; + + if (err_hist->reg[p] == 0) + continue; + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, + err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); + } +} + static void ufshcd_print_host_regs(struct ufs_hba *hba) { /* @@ -344,6 +375,21 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba) dev_err(hba->dev, "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n", (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks); + dev_err(hba->dev, + "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n", + ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), + hba->ufs_stats.hibern8_exit_cnt); + + ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); + ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); + ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); + ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); + ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); + + ufshcd_print_clk_freqs(hba); + + if (hba->vops && hba->vops->dbg_register_dump) + hba->vops->dbg_register_dump(hba); } static @@ -355,22 +401,30 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) for_each_set_bit(tag, &bitmap, hba->nutrs) { lrbp = &hba->lrb[tag]; - dev_err(hba->dev, "UPIU[%d] - Transfer Request Descriptor\n", - tag); + dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", + tag, ktime_to_us(lrbp->issue_time_stamp)); + dev_err(hba->dev, + "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", + tag, (u64)lrbp->utrd_dma_addr); + ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, sizeof(struct utp_transfer_req_desc)); - dev_err(hba->dev, "UPIU[%d] - Request UPIU\n", tag); + dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_req_dma_addr); ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, sizeof(struct utp_upiu_req)); - dev_err(hba->dev, "UPIU[%d] - Response UPIU\n", tag); + dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_rsp_dma_addr); ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, sizeof(struct utp_upiu_rsp)); if (pr_prdt) { int prdt_length = le16_to_cpu( lrbp->utr_descriptor_ptr->prd_table_length); - dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries\n", tag, - prdt_length); + dev_err(hba->dev, + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", + tag, prdt_length, + (u64)lrbp->ucd_prdt_dma_addr); ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, sizeof(struct ufshcd_sg_entry) * prdt_length); @@ -399,6 +453,32 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) } } +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + * @hba: per-adapter instance + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + __func__, + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -1196,6 +1276,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { + hba->lrb[task_tag].issue_time_stamp = ktime_get(); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); @@ -1877,6 +1958,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) int resp; int err = 0; + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); switch (resp) { @@ -2646,32 +2728,6 @@ out: return -ENOMEM; } -/** - * ufshcd_print_pwr_info - print power params as saved in hba - * power info - * @hba: per-adapter instance - */ -static void ufshcd_print_pwr_info(struct ufs_hba *hba) -{ - static const char * const names[] = { - "INVALID MODE", - "FAST MODE", - "SLOW_MODE", - "INVALID MODE", - "FASTAUTO_MODE", - "SLOWAUTO_MODE", - "INVALID MODE", - }; - - dev_info(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", - __func__, - hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, - hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, - names[hba->pwr_info.pwr_rx], - names[hba->pwr_info.pwr_tx], - hba->pwr_info.hs_rate); -} - /** * ufshcd_host_memory_configure - configure local reference block with * memory offsets @@ -2734,12 +2790,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) } hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); + hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr + + (i * sizeof(struct utp_transfer_req_desc)); hba->lrb[i].ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i); + hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr; hba->lrb[i].ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; + hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr + + response_offset; hba->lrb[i].ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; + hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr + + prdt_offset; } } @@ -2763,7 +2826,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) - dev_err(hba->dev, + dev_dbg(hba->dev, "dme-link-startup: error code %d\n", ret); return ret; } @@ -3113,9 +3176,12 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", __func__, ret); ret = ufshcd_link_recovery(hba); - } else + } else { ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, POST_CHANGE); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); + hba->ufs_stats.hibern8_exit_cnt++; + } return ret; } @@ -3885,7 +3951,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) switch (ocs) { case OCS_SUCCESS: result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); - + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); switch (result) { case UPIU_TRANSACTION_RESPONSE: /* @@ -3946,7 +4012,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) default: result |= DID_ERROR << 16; dev_err(hba->dev, - "OCS error from controller = %x\n", ocs); + "OCS error from controller = %x for tag %d\n", + ocs, lrbp->task_tag); + ufshcd_print_host_regs(hba); break; } /* end of switch */ @@ -4555,6 +4623,14 @@ out: pm_runtime_put_sync(hba->dev); } +static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist, + u32 reg) +{ + reg_hist->reg[reg_hist->pos] = reg; + reg_hist->tstamp[reg_hist->pos] = ktime_get(); + reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH; +} + /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance @@ -4567,15 +4643,20 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); /* Ignore LINERESET indication, as this is not an error */ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { /* * To know whether this error is fatal or not, DB timeout * must be checked but this error is handled separately. */ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); + ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg); + } /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); + if (reg) + ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg); + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; else if (hba->dev_quirks & @@ -4589,16 +4670,22 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); - if (reg) + if (reg) { + ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg); hba->uic_error |= UFSHCD_UIC_NL_ERROR; + } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); - if (reg) + if (reg) { + ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg); hba->uic_error |= UFSHCD_UIC_TL_ERROR; + } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); - if (reg) + if (reg) { + ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg); hba->uic_error |= UFSHCD_UIC_DME_ERROR; + } dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", __func__, hba->uic_error); @@ -4965,12 +5052,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) UFS_QUERY_TASK, &resp); if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { /* cmd pending in the device */ + dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", + __func__, tag); break; } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { /* * cmd not pending in the device, check if it is * in transition. */ + dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + __func__, tag); reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (reg & (1 << tag)) { /* sleep for max. 200us to stabilize */ @@ -4978,8 +5069,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) continue; } /* command completed already */ + dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", + __func__, tag); goto out; } else { + dev_err(hba->dev, + "%s: no response from device. tag = %d, err %d\n", + __func__, tag, err); if (!err) err = resp; /* service response error */ goto out; @@ -4994,14 +5090,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { - if (!err) + if (!err) { err = resp; /* service response error */ + dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", + __func__, tag, err); + } goto out; } err = ufshcd_clear_cmd(hba, tag); - if (err) + if (err) { + dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", + __func__, tag, err); goto out; + } scsi_dma_unmap(cmd); @@ -5583,6 +5685,20 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) ufshcd_vops_apply_dev_quirks(hba); } +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) +{ + int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist); + + hba->ufs_stats.hibern8_exit_cnt = 0; + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + + memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size); + memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size); + memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size); + memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size); + memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size); +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -5602,6 +5718,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; hba->is_urgent_bkops_lvl_checked = false; + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + /* UniPro link is active now */ ufshcd_set_link_active(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index f25d4684ef0e..0ea49f982cac 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -152,6 +152,10 @@ struct ufs_pm_lvl_states { * @ucd_req_ptr: UCD address of the command * @ucd_rsp_ptr: Response UPIU address for this command * @ucd_prdt_ptr: PRDT address of the command + * @utrd_dma_addr: UTRD dma address for debug + * @ucd_prdt_dma_addr: PRDT dma address for debug + * @ucd_rsp_dma_addr: UPIU response dma address for debug + * @ucd_req_dma_addr: UPIU request dma address for debug * @cmd: pointer to SCSI command * @sense_buffer: pointer to sense buffer address of the SCSI command * @sense_bufflen: Length of the sense buffer @@ -160,6 +164,7 @@ struct ufs_pm_lvl_states { * @task_tag: Task tag of the command * @lun: LUN of the command * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) + * @issue_time_stamp: time stamp for debug purposes */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; @@ -167,6 +172,11 @@ struct ufshcd_lrb { struct utp_upiu_rsp *ucd_rsp_ptr; struct ufshcd_sg_entry *ucd_prdt_ptr; + dma_addr_t utrd_dma_addr; + dma_addr_t ucd_req_dma_addr; + dma_addr_t ucd_rsp_dma_addr; + dma_addr_t ucd_prdt_dma_addr; + struct scsi_cmnd *cmd; u8 *sense_buffer; unsigned int sense_bufflen; @@ -176,6 +186,7 @@ struct ufshcd_lrb { int task_tag; u8 lun; /* UPIU LUN id field is only 8-bit wide */ bool intr_cmd; + ktime_t issue_time_stamp; }; /** @@ -355,6 +366,41 @@ struct ufs_init_prefetch { u32 icc_level; }; +#define UIC_ERR_REG_HIST_LENGTH 8 +/** + * struct ufs_uic_err_reg_hist - keeps history of uic errors + * @pos: index to indicate cyclic buffer position + * @reg: cyclic buffer for registers value + * @tstamp: cyclic buffer for time stamp + */ +struct ufs_uic_err_reg_hist { + int pos; + u32 reg[UIC_ERR_REG_HIST_LENGTH]; + ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH]; +}; + +/** + * struct ufs_stats - keeps usage/err statistics + * @hibern8_exit_cnt: Counter to keep track of number of exits, + * reset this after link-startup. + * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. + * Clear after the first successful command completion. + * @pa_err: tracks pa-uic errors + * @dl_err: tracks dl-uic errors + * @nl_err: tracks nl-uic errors + * @tl_err: tracks tl-uic errors + * @dme_err: tracks dme errors + */ +struct ufs_stats { + u32 hibern8_exit_cnt; + ktime_t last_hibern8_exit_tstamp; + struct ufs_uic_err_reg_hist pa_err; + struct ufs_uic_err_reg_hist dl_err; + struct ufs_uic_err_reg_hist nl_err; + struct ufs_uic_err_reg_hist tl_err; + struct ufs_uic_err_reg_hist dme_err; +}; + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -531,6 +577,7 @@ struct ufs_hba { u32 uic_error; u32 saved_err; u32 saved_uic_err; + struct ufs_stats ufs_stats; /* Device management request data */ struct ufs_dev_cmd dev_cmd; -- cgit v1.2.3-59-g8ed1b From ab3dabb3e8cf077850f20610f73a0def1fed10cb Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 2 Jan 2017 11:04:58 -0300 Subject: scsi: ufs-qcom: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/scsi/ufs/ufs-qcom.ko | grep alias $ After this patch: $ modinfo drivers/scsi/ufs/ufs-qcom.ko | grep alias alias: of:N*T*Cqcom,ufshcC* alias: of:N*T*Cqcom,ufshc Signed-off-by: Javier Martinez Canillas Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufs-qcom.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index abe617372661..5ff8a6bf6fd3 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1692,6 +1692,7 @@ static const struct of_device_id ufs_qcom_of_match[] = { { .compatible = "qcom,ufshc"}, {}, }; +MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); static const struct dev_pm_ops ufs_qcom_pm_ops = { .suspend = ufshcd_pltfrm_suspend, -- cgit v1.2.3-59-g8ed1b From d177c408113e5d8b664b2ae34e5cd7b27c686a12 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 3 Jan 2017 20:24:48 +0800 Subject: scsi: hisi_sas: service v2 hw CQ ISR with tasklet Currently the all the slot processing for the completion queue is done in ISR context. It is judged that the slot processing can take a long time, especially when a SATA NCQ completes (upto 32 slots). So, as a solution, defer the bulk of the ISR processing to tasklet context. Each CQ will have its down tasklet. Signed-off-by: John Garry Reviewed-by: Xiang Chen Reviewed-by: Zhangfei Gao Tested-by: Hanjun Guo Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas.h | 1 + drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index c0cd505a9ef7..9216deaa3ff5 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -95,6 +95,7 @@ struct hisi_sas_port { struct hisi_sas_cq { struct hisi_hba *hisi_hba; + struct tasklet_struct tasklet; int rd_point; int id; }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index b934aec1eebb..e50626020b84 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2481,20 +2481,17 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } -static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) +static void cq_tasklet_v2_hw(unsigned long val) { - struct hisi_sas_cq *cq = p; + struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_itct *itct; struct hisi_sas_complete_v2_hdr *complete_queue; - u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id; + u32 rd_point = cq->rd_point, wr_point, dev_id; int queue = cq->id; complete_queue = hisi_hba->complete_hdr[queue]; - irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); - - hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); @@ -2545,6 +2542,18 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); +} + +static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + int queue = cq->id; + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + tasklet_schedule(&cq->tasklet); + return IRQ_HANDLED; } @@ -2726,6 +2735,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) for (i = 0; i < hisi_hba->queue_count; i++) { int idx = i + 96; /* First cq interrupt is irq96 */ + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + struct tasklet_struct *t = &cq->tasklet; irq = irq_map[idx]; if (!irq) { @@ -2742,6 +2753,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) irq, rc); return -ENOENT; } + tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } return 0; -- cgit v1.2.3-59-g8ed1b From 64d63187325884b96f684f1d839ec5d5ce231b92 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 3 Jan 2017 20:24:49 +0800 Subject: scsi: hisi_sas: lock sensitive regions when servicing CQ interrupt There is a bug in the current driver in that certain hisi_hba and port structure elements which we access when servicing the CQ interrupt do not use thread-safe accesses; these include hisi_sas_port linked-list of active slots (hisi_sas_port.entry), bitmap of currently allocated IPTT (in hisi_hba.slot_index_tags), and completion queue read pointer. As a solution, lock these elements with the hisi_hba.lock. Signed-off-by: John Garry Reviewed-by: Xiang Chen Reviewed-by: Zhangfei Gao Tested-by: Hanjun Guo Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 2 ++ drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 8a1be0ba8a22..854fbeaade3e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1596,6 +1596,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) hisi_hba->complete_hdr[queue]; u32 irq_value, rd_point = cq->rd_point, wr_point; + spin_lock(&hisi_hba->lock); irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); @@ -1628,6 +1629,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + spin_unlock(&hisi_hba->lock); return IRQ_HANDLED; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index e50626020b84..69b0f06d1a96 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2493,6 +2493,7 @@ static void cq_tasklet_v2_hw(unsigned long val) complete_queue = hisi_hba->complete_hdr[queue]; + spin_lock(&hisi_hba->lock); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); @@ -2542,6 +2543,7 @@ static void cq_tasklet_v2_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + spin_unlock(&hisi_hba->lock); } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) -- cgit v1.2.3-59-g8ed1b From da7b66e720dcfb6754d6e3310a7ef315009b13db Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 3 Jan 2017 20:24:50 +0800 Subject: scsi: hisi_sas: lock sensitive region in hisi_sas_slot_abort() When we call hisi_sas_slot_task_free() we should grab the hisi_hba.lock, as hisi_sas_slot_task_free() accesses common hisi_hba elements. Function hisi_sas_slot_abort() is missing this, so add it. Signed-off-by: John Garry Reviewed-by: Zhangfei Gao Tested-by: Hanjun Guo Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index d50e9cfefd24..22dba0143bdc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -146,6 +146,7 @@ static void hisi_sas_slot_abort(struct work_struct *work) struct scsi_lun lun; struct device *dev = &hisi_hba->pdev->dev; int tag = abort_slot->idx; + unsigned long flags; if (!(task->task_proto & SAS_PROTOCOL_SSP)) { dev_err(dev, "cannot abort slot for non-ssp task\n"); @@ -159,7 +160,9 @@ static void hisi_sas_slot_abort(struct work_struct *work) hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); out: /* Do cleanup for this task */ + spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_task_free(hisi_hba, task, abort_slot); + spin_unlock_irqrestore(&hisi_hba->lock, flags); if (task->task_done) task->task_done(task); if (sas_dev) -- cgit v1.2.3-59-g8ed1b From 352e5fd105982a9ea47d00c7256d6a2b7bb44d89 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 30 Dec 2016 06:57:47 -0800 Subject: scsi: lpfc: Reinstate lpfc_soft_wwn parameter The lpfc 11.2.0.4 patch set deprecated, by removing, the lpfc_soft_wwn parameter support. This patch reinstates support, but adds a warning in the enablement of the feature that indicates Broadcom (Emulex) does not support the feature. Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 4 + drivers/scsi/lpfc/lpfc_attr.c | 223 ++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_init.c | 16 ++- 3 files changed, 240 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index c2e6ffd2f372..6593b073c524 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -728,6 +728,8 @@ struct lpfc_hba { uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; + uint64_t cfg_soft_wwnn; + uint64_t cfg_soft_wwpn; uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; @@ -832,6 +834,8 @@ struct lpfc_hba { #define VPD_PORT 0x8 /* valid vpd port data */ #define VPD_MASK 0xf /* mask for any vpd data */ + uint8_t soft_wwn_enable; + struct timer_list fcp_poll_timer; struct timer_list eratt_poll; uint32_t eratt_poll_interval; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 8301c08b8768..6c104d79abe7 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1987,6 +1987,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); +static char *lpfc_soft_wwn_key = "C99G71SL8032A"; #define WWN_SZ 8 /** * lpfc_wwn_set - Convert string to the 8 byte WWN value. @@ -2030,6 +2031,223 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) } return 0; } +/** + * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string lpfc_soft_wwn_key. + * @count: must be size of lpfc_soft_wwn_key. + * + * Returns: + * -EINVAL if the buffer does not contain lpfc_soft_wwn_key + * length of buf indicates success + **/ +static ssize_t +lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + unsigned int cnt = count; + + /* + * We're doing a simple sanity check for soft_wwpn setting. + * We require that the user write a specific key to enable + * the soft_wwpn attribute to be settable. Once the attribute + * is written, the enable key resets. If further updates are + * desired, the key must be written again to re-enable the + * attribute. + * + * The "key" is not secret - it is a hardcoded string shown + * here. The intent is to protect against the random user or + * application that is just writing attributes. + */ + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if ((cnt != strlen(lpfc_soft_wwn_key)) || + (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) + return -EINVAL; + + phba->soft_wwn_enable = 1; + + dev_printk(KERN_WARNING, &phba->pcidev->dev, + "lpfc%d: soft_wwpn assignment has been enabled.\n", + phba->brd_no); + dev_printk(KERN_WARNING, &phba->pcidev->dev, + " The soft_wwpn feature is not supported by Broadcom."); + + return count; +} +static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, + lpfc_soft_wwn_enable_store); + +/** + * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the wwpn in hexadecimal. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + (unsigned long long)phba->cfg_soft_wwpn); +} + +/** + * lpfc_soft_wwpn_store - Set the ww port name of the adapter + * @dev class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: contains the wwpn in hexadecimal. + * @count: number of wwpn bytes in buf + * + * Returns: + * -EACCES hba reset not enabled, adapter over temp + * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid + * -EIO error taking adapter offline or online + * value of count on success + **/ +static ssize_t +lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct completion online_compl; + int stat1 = 0, stat2 = 0; + unsigned int cnt = count; + u8 wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_enable_hba_reset) + return -EACCES; + spin_lock_irq(&phba->hbalock); + if (phba->over_temp_state == HBA_OVER_TEMP) { + spin_unlock_irq(&phba->hbalock); + return -EACCES; + } + spin_unlock_irq(&phba->hbalock); + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if (!phba->soft_wwn_enable) + return -EINVAL; + + /* lock setting wwpn, wwnn down */ + phba->soft_wwn_enable = 0; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (!rc) { + /* not able to set wwpn, unlock it */ + phba->soft_wwn_enable = 1; + return rc; + } + + phba->cfg_soft_wwpn = wwn_to_u64(wwpn); + fc_host_port_name(shost) = phba->cfg_soft_wwpn; + if (phba->cfg_soft_wwnn) + fc_host_node_name(shost) = phba->cfg_soft_wwnn; + + dev_printk(KERN_NOTICE, &phba->pcidev->dev, + "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); + + stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (stat1) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0463 lpfc_soft_wwpn attribute set failed to " + "reinit adapter - %d\n", stat1); + init_completion(&online_compl); + rc = lpfc_workq_post_event(phba, &stat2, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + + wait_for_completion(&online_compl); + if (stat2) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0464 lpfc_soft_wwpn attribute set failed to " + "reinit adapter - %d\n", stat2); + return (stat1 || stat2) ? -EIO : count; +} +static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR, + lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); + +/** + * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: on return contains the wwnn in hexadecimal. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + (unsigned long long)phba->cfg_soft_wwnn); +} + +/** + * lpfc_soft_wwnn_store - sets the ww node name of the adapter + * @cdev: class device that is converted into a Scsi_host. + * @buf: contains the ww node name in hexadecimal. + * @count: number of wwnn bytes in buf. + * + * Returns: + * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid + * value of count on success + **/ +static ssize_t +lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + u8 wwnn[WWN_SZ]; + int rc; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if (!phba->soft_wwn_enable) + return -EINVAL; + + rc = lpfc_wwn_set(buf, cnt, wwnn); + if (!rc) { + /* Allow wwnn to be set many times, as long as the enable + * is set. However, once the wwpn is set, everything locks. + */ + return rc; + } + + phba->cfg_soft_wwnn = wwn_to_u64(wwnn); + + dev_printk(KERN_NOTICE, &phba->pcidev->dev, + "lpfc%d: soft_wwnn set. Value will take effect upon " + "setting of the soft_wwpn\n", phba->brd_no); + + return count; +} +static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR, + lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); /** * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for @@ -4538,6 +4756,9 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_enable_bg, + &dev_attr_lpfc_soft_wwnn, + &dev_attr_lpfc_soft_wwpn, + &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_EnableXLane, @@ -5566,6 +5787,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) else phba->cfg_poll = lpfc_poll; + phba->cfg_soft_wwnn = 0L; + phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 43d2b06bdc82..4776fd85514f 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -319,26 +319,36 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /** * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, + * cfg_soft_wwnn, cfg_soft_wwpn * @vport: pointer to lpfc vport data structure. * + * * Return codes * None. **/ void lpfc_update_vport_wwn(struct lpfc_vport *vport) { + /* If the soft name exists then update it using the service params */ + if (vport->phba->cfg_soft_wwnn) + u64_to_wwn(vport->phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); + if (vport->phba->cfg_soft_wwpn) + u64_to_wwn(vport->phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + /* - * If the name is empty + * If the name is empty or there exists a soft name * then copy the service params name, otherwise use the fc name */ - if (vport->fc_nodename.u.wwn[0] == 0) + if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); - if (vport->fc_portname.u.wwn[0] == 0) + if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); else -- cgit v1.2.3-59-g8ed1b From e2e8f619adaaa1b31c3739640beb0bd72ac87f4d Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Mon, 26 Dec 2016 14:23:09 +0100 Subject: scsi: qla2xxx: silence -Wformat-security warning qla24xx_enable_msix() calls scnprintf() with a non-literal format string. This makes clang report -Wformat-security warnings when compiling this function: drivers/scsi/qla2xxx/qla_isr.c:3083:7: error: format string is not a string literal (potentially insecure) [-Werror,-Wformat-security] msix_entries[i].name); ^~~~~~~~~~~~~~~~~~~~ drivers/scsi/qla2xxx/qla_isr.c:3083:7: note: treat the string as an argument to avoid this msix_entries[i].name); ^ "%s", drivers/scsi/qla2xxx/qla_isr.c:3119:7: error: format string is not a string literal (potentially insecure) [-Werror,-Wformat-security] msix_entries[QLA_ATIO_VECTOR].name); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/scsi/qla2xxx/qla_isr.c:3119:7: note: treat the string as an argument to avoid this msix_entries[QLA_ATIO_VECTOR].name); ^ "%s", Even though msix_entries[...].name are initialized as literal strings with no % character and are never modified, introduce a "%s" format parameter in order to silence this -Wformat-security warning and make clang able to detect at compile time real bugs related to string formatting. [mkp: typo] Signed-off-by: Nicolas Iooss Reviewed-by: Bart Van Assche Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_isr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5093ca9b02ec..474b415217df 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3080,7 +3080,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->handle = rsp; rsp->msix = qentry; scnprintf(qentry->name, sizeof(qentry->name), - msix_entries[i].name); + "%s", msix_entries[i].name); if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3116,7 +3116,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) rsp->msix = qentry; qentry->handle = rsp; scnprintf(qentry->name, sizeof(qentry->name), - msix_entries[QLA_ATIO_VECTOR].name); + "%s", msix_entries[QLA_ATIO_VECTOR].name); qentry->in_use = 1; ret = request_irq(qentry->vector, msix_entries[QLA_ATIO_VECTOR].handler, -- cgit v1.2.3-59-g8ed1b From 44a8f954449c604b7b0a73a0316a8c4ed3487784 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Mon, 26 Dec 2016 14:23:10 +0100 Subject: scsi: qla2xxx: make msix_entries const msix_entries and qla82xx_msix_entries arrays are never modified in drivers/scsi/qla2xxx/qla_isr.c. Move their contents to read-only data. Signed-off-by: Nicolas Iooss Reviewed-by: Bart Van Assche Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_isr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 474b415217df..b9c113e47346 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3003,14 +3003,14 @@ struct qla_init_msix_entry { irq_handler_t handler; }; -static struct qla_init_msix_entry msix_entries[] = { +static const struct qla_init_msix_entry msix_entries[] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, }; -static struct qla_init_msix_entry qla82xx_msix_entries[] = { +static const struct qla_init_msix_entry qla82xx_msix_entries[] = { { "qla2xxx (default)", qla82xx_msix_default }, { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, }; @@ -3284,7 +3284,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, struct qla_msix_entry *msix, int vector_type) { - struct qla_init_msix_entry *intr = &msix_entries[vector_type]; + const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret; -- cgit v1.2.3-59-g8ed1b From 577419f70463378c224d4b92e31b22c2877c4389 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Dec 2016 22:20:38 +0000 Subject: scsi: qla2xxx: rename {vendor|hba}_indentifer to {vendor|hba}_identifer Rename the vendor_indentifer and hba_indentifer fields to correct spelling. Signed-off-by: Colin Ian King Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 4 ++-- drivers/scsi/qla2xxx/qla_gs.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index f7df01b76714..ef699b66b685 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2247,7 +2247,7 @@ struct ct_fdmiv2_hba_attr { uint32_t num_ports; uint8_t fabric_name[WWN_SIZE]; uint8_t bios_name[32]; - uint8_t vendor_indentifer[8]; + uint8_t vendor_identifier[8]; } a; }; @@ -2422,7 +2422,7 @@ struct ct_sns_req { } rsnn_nn; struct { - uint8_t hba_indentifier[8]; + uint8_t hba_identifier[8]; } ghat; struct { diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 94e8a8592f69..ee3df8794806 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1939,15 +1939,15 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) /* Vendor Identifier */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER); - snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer), + snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), "%s", "QLGC"); - alen = strlen(eiter->a.vendor_indentifer); + alen = strlen(eiter->a.vendor_identifier); alen += 4 - (alen & 3); eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; ql_dbg(ql_dbg_disc, vha, 0x20b1, - "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer); + "Vendor Identifier = %s.\n", eiter->a.vendor_identifier); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); -- cgit v1.2.3-59-g8ed1b From 984dc46c5715fec4329861f93f30c76b8d680e8d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 8 Jan 2017 10:41:15 +0100 Subject: scsi: bfa: remove bfa_fcs_mod_s Just call the functions directly instead of obsfucating the call chain. This was in reply to a patch from Kees Cook to constify the function pointer struct bfa_fcs_mod_s, but it turns out there is no reason to have this indirection at all. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/bfa/bfa_fcs.c | 181 ++++++++++++--------------------------------- drivers/scsi/bfa/bfa_fcs.h | 4 - 2 files changed, 46 insertions(+), 139 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 1e7e139d71ea..4aa61e20e82d 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -27,24 +27,6 @@ BFA_TRC_FILE(FCS, FCS); -/* - * FCS sub-modules - */ -struct bfa_fcs_mod_s { - void (*attach) (struct bfa_fcs_s *fcs); - void (*modinit) (struct bfa_fcs_s *fcs); - void (*modexit) (struct bfa_fcs_s *fcs); -}; - -#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } - -static struct bfa_fcs_mod_s fcs_modules[] = { - { bfa_fcs_port_attach, NULL, NULL }, - { bfa_fcs_uf_attach, NULL, NULL }, - { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, - bfa_fcs_fabric_modexit }, -}; - /* * fcs_api BFA FCS API */ @@ -58,52 +40,19 @@ bfa_fcs_exit_comp(void *fcs_cbarg) complete(&bfad->comp); } - - /* - * fcs_api BFA FCS API - */ - -/* - * fcs attach -- called once to initialize data structures at driver attach time + * fcs initialization, called once after bfa initialization is complete */ void -bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, - bfa_boolean_t min_cfg) +bfa_fcs_init(struct bfa_fcs_s *fcs) { - int i; - struct bfa_fcs_mod_s *mod; - - fcs->bfa = bfa; - fcs->bfad = bfad; - fcs->min_cfg = min_cfg; - fcs->num_rport_logins = 0; - - bfa->fcs = BFA_TRUE; - fcbuild_init(); - - for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { - mod = &fcs_modules[i]; - if (mod->attach) - mod->attach(fcs); - } + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); + bfa_trc(fcs, 0); } /* - * fcs initialization, called once after bfa initialization is complete + * fcs_api BFA FCS API */ -void -bfa_fcs_init(struct bfa_fcs_s *fcs) -{ - int i; - struct bfa_fcs_mod_s *mod; - - for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { - mod = &fcs_modules[i]; - if (mod->modinit) - mod->modinit(fcs); - } -} /* * FCS update cfg - reset the pwwn/nwwn of fabric base logical port @@ -180,26 +129,14 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, void bfa_fcs_exit(struct bfa_fcs_s *fcs) { - struct bfa_fcs_mod_s *mod; - int nmods, i; - bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); - - nmods = ARRAY_SIZE(fcs_modules); - - for (i = 0; i < nmods; i++) { - - mod = &fcs_modules[i]; - if (mod->modexit) { - bfa_wc_up(&fcs->wc); - mod->modexit(fcs); - } - } - + bfa_wc_up(&fcs->wc); + bfa_trc(fcs, 0); + bfa_lps_delete(fcs->fabric.lps); + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE); bfa_wc_wait(&fcs->wc); } - /* * Fabric module implementation. */ @@ -1127,62 +1064,6 @@ bfa_fcs_fabric_stop_comp(void *cbarg) * fcs_fabric_public fabric public functions */ -/* - * Attach time initialization. - */ -void -bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) -{ - struct bfa_fcs_fabric_s *fabric; - - fabric = &fcs->fabric; - memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); - - /* - * Initialize base fabric. - */ - fabric->fcs = fcs; - INIT_LIST_HEAD(&fabric->vport_q); - INIT_LIST_HEAD(&fabric->vf_q); - fabric->lps = bfa_lps_alloc(fcs->bfa); - WARN_ON(!fabric->lps); - - /* - * Initialize fabric delete completion handler. Fabric deletion is - * complete when the last vport delete is complete. - */ - bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); - bfa_wc_up(&fabric->wc); /* For the base port */ - - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); -} - -void -bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) -{ - bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); - bfa_trc(fcs, 0); -} - -/* - * Module cleanup - */ -void -bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) -{ - struct bfa_fcs_fabric_s *fabric; - - bfa_trc(fcs, 0); - - /* - * Cleanup base fabric. - */ - fabric = &fcs->fabric; - bfa_lps_delete(fabric->lps); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); -} - /* * Fabric module stop -- stop FCS actions */ @@ -1633,12 +1514,6 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) } } -void -bfa_fcs_port_attach(struct bfa_fcs_s *fcs) -{ - bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); -} - /* * BFA FCS UF ( Unsolicited Frames) */ @@ -1706,8 +1581,44 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) bfa_uf_free(uf); } +/* + * fcs attach -- called once to initialize data structures at driver attach time + */ void -bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) +bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, + bfa_boolean_t min_cfg) { + struct bfa_fcs_fabric_s *fabric = &fcs->fabric; + + fcs->bfa = bfa; + fcs->bfad = bfad; + fcs->min_cfg = min_cfg; + fcs->num_rport_logins = 0; + + bfa->fcs = BFA_TRUE; + fcbuild_init(); + + bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); + + memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); + + /* + * Initialize base fabric. + */ + fabric->fcs = fcs; + INIT_LIST_HEAD(&fabric->vport_q); + INIT_LIST_HEAD(&fabric->vf_q); + fabric->lps = bfa_lps_alloc(fcs->bfa); + WARN_ON(!fabric->lps); + + /* + * Initialize fabric delete completion handler. Fabric deletion is + * complete when the last vport delete is complete. + */ + bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); + bfa_wc_up(&fabric->wc); /* For the base port */ + + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 0f797a55d504..e60f72b766ea 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -808,9 +808,7 @@ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); /* * fabric protected interface functions */ -void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, @@ -827,8 +825,6 @@ void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name); u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); -void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); -- cgit v1.2.3-59-g8ed1b From eab5c1503b604216e352151618cd78d5806dee1a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 18 Nov 2016 07:28:16 +0100 Subject: scsi: pmcraid: switch to pci_alloc_irq_vectors Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/pmcraid.c | 92 ++++++++++++++++++++++---------------------------- drivers/scsi/pmcraid.h | 1 - 2 files changed, 41 insertions(+), 52 deletions(-) diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 337982cf3d63..49e70a383afa 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4587,16 +4587,14 @@ static void pmcraid_tasklet_function(unsigned long instance) static void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) { + struct pci_dev *pdev = pinstance->pdev; int i; for (i = 0; i < pinstance->num_hrrq; i++) - free_irq(pinstance->hrrq_vector[i].vector, - &(pinstance->hrrq_vector[i])); + free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); - if (pinstance->interrupt_mode) { - pci_disable_msix(pinstance->pdev); - pinstance->interrupt_mode = 0; - } + pinstance->interrupt_mode = 0; + pci_free_irq_vectors(pdev); } /** @@ -4609,60 +4607,52 @@ void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) static int pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) { - int rc; struct pci_dev *pdev = pinstance->pdev; + unsigned int irq_flag = PCI_IRQ_LEGACY, flag; + int num_hrrq, rc, i; + irq_handler_t isr; - if ((pmcraid_enable_msix) && - (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) { - int num_hrrq = PMCRAID_NUM_MSIX_VECTORS; - struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS]; - int i; - for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++) - entries[i].entry = i; - - num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq); - if (num_hrrq < 0) - goto pmcraid_isr_legacy; - - for (i = 0; i < num_hrrq; i++) { - pinstance->hrrq_vector[i].hrrq_id = i; - pinstance->hrrq_vector[i].drv_inst = pinstance; - pinstance->hrrq_vector[i].vector = entries[i].vector; - rc = request_irq(pinstance->hrrq_vector[i].vector, - pmcraid_isr_msix, 0, - PMCRAID_DRIVER_NAME, - &(pinstance->hrrq_vector[i])); - - if (rc) { - int j; - for (j = 0; j < i; j++) - free_irq(entries[j].vector, - &(pinstance->hrrq_vector[j])); - pci_disable_msix(pdev); - goto pmcraid_isr_legacy; - } - } + if (pmcraid_enable_msix) + irq_flag |= PCI_IRQ_MSIX; - pinstance->num_hrrq = num_hrrq; + num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS, + irq_flag); + if (num_hrrq < 0) + return num_hrrq; + + if (pdev->msix_enabled) { + flag = 0; + isr = pmcraid_isr_msix; + } else { + flag = IRQF_SHARED; + isr = pmcraid_isr; + } + + for (i = 0; i < num_hrrq; i++) { + struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i]; + + vec->hrrq_id = i; + vec->drv_inst = pinstance; + rc = request_irq(pci_irq_vector(pdev, i), isr, flag, + PMCRAID_DRIVER_NAME, vec); + if (rc) + goto out_unwind; + } + + pinstance->num_hrrq = num_hrrq; + if (pdev->msix_enabled) { pinstance->interrupt_mode = 1; iowrite32(DOORBELL_INTR_MODE_MSIX, pinstance->int_regs.host_ioa_interrupt_reg); ioread32(pinstance->int_regs.host_ioa_interrupt_reg); - goto pmcraid_isr_out; } -pmcraid_isr_legacy: - /* If MSI-X registration failed fallback to legacy mode, where - * only one hrrq entry will be used - */ - pinstance->hrrq_vector[0].hrrq_id = 0; - pinstance->hrrq_vector[0].drv_inst = pinstance; - pinstance->hrrq_vector[0].vector = pdev->irq; - pinstance->num_hrrq = 1; - - rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED, - PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]); -pmcraid_isr_out: + return 0; + +out_unwind: + while (--i > 0) + free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); + pci_free_irq_vectors(pdev); return rc; } diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index e1d150f3fd4d..568b18a2f47d 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -628,7 +628,6 @@ struct pmcraid_interrupts { /* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */ struct pmcraid_isr_param { struct pmcraid_instance *drv_inst; - u16 vector; /* allocated msi-x vector */ u8 hrrq_id; /* hrrq entry index */ }; -- cgit v1.2.3-59-g8ed1b From b6f0ec3621d73bc2976a4f2ee2bf9d02ecfd16b6 Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Tue, 3 Jan 2017 16:01:40 -0800 Subject: scsi: esas2r: Fix format string type mistakes This adds the missing __printf attribute which allows compile time format string checking (and will be used by the coming initify gcc plugin). Additionally, this fixes the warnings exposed by the attribute. Signed-off-by: Emese Revfy [kees: split scsi/acpi, merged attr and fix, new commit messages] Signed-off-by: Kees Cook Signed-off-by: Martin K. Petersen --- drivers/scsi/esas2r/esas2r_init.c | 2 +- drivers/scsi/esas2r/esas2r_ioctl.c | 2 +- drivers/scsi/esas2r/esas2r_log.h | 4 ++-- drivers/scsi/esas2r/esas2r_main.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index d6e53aee2295..6432a50b26d8 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a) flags |= IRQF_SHARED; esas2r_log(ESAS2R_LOG_INFO, - "esas2r_claim_interrupts irq=%d (%p, %s, %x)", + "esas2r_claim_interrupts irq=%d (%p, %s, %lx)", a->pcid->irq, a, a->name, flags); if (request_irq(a->pcid->irq, diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 3e8483410f61..b35ed3829421 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); if (ioctl == NULL) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler kzalloc failed for %d bytes", + "ioctl_handler kzalloc failed for %zu bytes", sizeof(struct atto_express_ioctl)); return -ENOMEM; } diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h index 7b6397bb5b94..75b9d23cd736 100644 --- a/drivers/scsi/esas2r/esas2r_log.h +++ b/drivers/scsi/esas2r/esas2r_log.h @@ -61,8 +61,8 @@ enum { #endif }; -int esas2r_log(const long level, const char *format, ...); -int esas2r_log_dev(const long level, +__printf(2, 3) int esas2r_log(const long level, const char *format, ...); +__printf(3, 4) int esas2r_log_dev(const long level, const struct device *dev, const char *format, ...); diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 5092c821d088..f2e9d8aa979c 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj, GFP_KERNEL); if (a->local_atto_ioctl == NULL) { esas2r_log(ESAS2R_LOG_WARN, - "write_hw kzalloc failed for %d bytes", + "write_hw kzalloc failed for %zu bytes", sizeof(struct atto_ioctl)); return -ENOMEM; } @@ -1186,7 +1186,7 @@ retry: } else { esas2r_log(ESAS2R_LOG_CRIT, "unable to allocate a request for a " - "device reset (%d:%d)!", + "device reset (%d:%llu)!", cmd->device->id, cmd->device->lun); } -- cgit v1.2.3-59-g8ed1b From 26cf9155bf6b7deeaafe8fc8f233fca5e5398c92 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Thu, 5 Jan 2017 10:45:09 +0200 Subject: scsi: ufs: ufshcd_query_descriptor_retry should be static Fix the following compilation warning: drivers/scsi/ufs/ufshcd.c:2076:5: warning: no previous prototype for ufshcd_query_descriptor_retry [-Wmissing-prototypes] Also do not export the function, it should not be used out of ufs context. Signed-off-by: Tomas Winkler Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index be6322e38d74..5e95e2b42008 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2410,9 +2410,11 @@ out: * The buf_len parameter will contain, on return, the length parameter * received on the response. */ -int ufshcd_query_descriptor_retry(struct ufs_hba *hba, - enum query_opcode opcode, enum desc_idn idn, u8 index, - u8 selector, u8 *desc_buf, int *buf_len) +static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, + enum desc_idn idn, u8 index, + u8 selector, + u8 *desc_buf, int *buf_len) { int err; int retries; @@ -2426,7 +2428,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, return err; } -EXPORT_SYMBOL(ufshcd_query_descriptor_retry); /** * ufshcd_read_desc_param - read the specified descriptor parameter -- cgit v1.2.3-59-g8ed1b From 8209b6d54e7e2f8b305da831c78629b65d144f91 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Thu, 5 Jan 2017 10:45:10 +0200 Subject: scsi: ufs: unexport descritpor reading functions Unexport ufshcd_read_device_desc and ufshcd_read_string_desc there is no really possibility to calling them directly outside of UFS context. Signed-off-by: Tomas Winkler Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 9 ++++----- drivers/scsi/ufs/ufshcd.h | 7 ------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5e95e2b42008..b9b653d332e0 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2545,11 +2545,10 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, return err; } -int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) { return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); } -EXPORT_SYMBOL(ufshcd_read_device_desc); /** * ufshcd_read_string_desc - read string descriptor @@ -2561,8 +2560,9 @@ EXPORT_SYMBOL(ufshcd_read_device_desc); * * Return 0 in case of success, non-zero otherwise */ -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, - u32 size, bool ascii) +#define ASCII_STD true +static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, + u8 *buf, u32 size, bool ascii) { int err = 0; @@ -2618,7 +2618,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, out: return err; } -EXPORT_SYMBOL(ufshcd_read_string_desc); /** * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 0ea49f982cac..7ffcde21975b 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -781,8 +781,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } -int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size); - static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) { return (pwr_info->pwr_rx == FAST_MODE || @@ -791,11 +789,6 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) pwr_info->pwr_tx == FASTAUTO_MODE); } -#define ASCII_STD true - -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, - u32 size, bool ascii); - /* Expose Query-Request API */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); -- cgit v1.2.3-59-g8ed1b From d79713f911918e43576338e33396c7f889f5d272 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Thu, 5 Jan 2017 10:45:11 +0200 Subject: scsi: ufs: ufshcd_get_max_icc_level fix endianity handling Reading big endian value from a buffer requires explicit cast. Fix sparse warning: drivers/scsi/ufs/ufshcd.c:4825:24: warning: cast to restricted __be16 Signed-off-by: Tomas Winkler Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b9b653d332e0..99731a062f80 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5266,7 +5266,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) u16 unit; for (i = start_scan; i >= 0; i--) { - data = be16_to_cpu(*((u16 *)(buff + 2*i))); + data = be16_to_cpup((__be16 *)&buff[2 * i]); unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> ATTR_ICC_LVL_UNIT_OFFSET; curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; -- cgit v1.2.3-59-g8ed1b From 93fdd5ac64bbe80dac6416f048405362d7ef0945 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Thu, 5 Jan 2017 10:45:12 +0200 Subject: scsi: ufs: refactor device descriptor reading Pull device descriptor reading out of ufs quirk so it can be used also for other purposes. Revamp the fixup setup: 1. Rename ufs_device_info to ufs_dev_desc as very similar name ufs_dev_info is already in use. 2. Make the handlers static as they are not used out of the ufshdc.c file. [mkp: applied by hand] Signed-off-by: Tomas Winkler Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufs.h | 12 ++++++++++++ drivers/scsi/ufs/ufs_quirks.h | 28 ++++++---------------------- drivers/scsi/ufs/ufshcd.c | 40 +++++++++++++++++++--------------------- 3 files changed, 37 insertions(+), 43 deletions(-) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 8e6709a3fb6b..318e4a1f76c9 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -523,4 +523,16 @@ struct ufs_dev_info { bool is_lu_power_on_wp; }; +#define MAX_MODEL_LEN 16 +/** + * ufs_dev_desc - ufs device details from the device descriptor + * + * @wmanufacturerid: card details + * @model: card model + */ +struct ufs_dev_desc { + u16 wmanufacturerid; + char model[MAX_MODEL_LEN + 1]; +}; + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 08b799d4efcc..71f73d1d1ad1 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -21,41 +21,28 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" -#define MAX_MODEL_LEN 16 - #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD -/** - * ufs_device_info - ufs device details - * @wmanufacturerid: card details - * @model: card model - */ -struct ufs_device_info { - u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; -}; - /** * ufs_dev_fix - ufs device quirk info * @card: ufs card details * @quirk: device quirk */ struct ufs_dev_fix { - struct ufs_device_info card; + struct ufs_dev_desc card; unsigned int quirk; }; #define END_FIX { { 0 }, 0 } /* add specific device quirk */ -#define UFS_FIX(_vendor, _model, _quirk) \ - { \ - .card.wmanufacturerid = (_vendor),\ - .card.model = (_model), \ - .quirk = (_quirk), \ - } +#define UFS_FIX(_vendor, _model, _quirk) { \ + .card.wmanufacturerid = (_vendor),\ + .card.model = (_model), \ + .quirk = (_quirk), \ +} /* * If UFS device is having issue in processing LCC (Line Control @@ -144,7 +131,4 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) -struct ufs_hba; -void ufs_advertise_fixup_device(struct ufs_hba *hba); - #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 99731a062f80..dfad19ff166c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5452,8 +5452,8 @@ out: return ret; } -static int ufs_get_device_info(struct ufs_hba *hba, - struct ufs_device_info *card_data) +static int ufs_get_device_desc(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) { int err; u8 model_index; @@ -5472,7 +5472,7 @@ static int ufs_get_device_info(struct ufs_hba *hba, * getting vendor (manufacturerID) and Bank Index in big endian * format */ - card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; @@ -5486,36 +5486,26 @@ static int ufs_get_device_info(struct ufs_hba *hba, } str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; - strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), + strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], MAX_MODEL_LEN)); /* Null terminate the model string */ - card_data->model[MAX_MODEL_LEN] = '\0'; + dev_desc->model[MAX_MODEL_LEN] = '\0'; out: return err; } -void ufs_advertise_fixup_device(struct ufs_hba *hba) +static void ufs_fixup_device_setup(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) { - int err; struct ufs_dev_fix *f; - struct ufs_device_info card_data; - - card_data.wmanufacturerid = 0; - - err = ufs_get_device_info(hba, &card_data); - if (err) { - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", - __func__, err); - return; - } for (f = ufs_fixups; f->quirk; f++) { - if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || - (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && - (STR_PRFX_EQUAL(f->card.model, card_data.model) || + if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || + f->card.wmanufacturerid == UFS_ANY_VENDOR) && + (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || !strcmp(f->card.model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } @@ -5707,6 +5697,7 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) */ static int ufshcd_probe_hba(struct ufs_hba *hba) { + struct ufs_dev_desc card = {0}; int ret; ktime_t start = ktime_get(); @@ -5732,7 +5723,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; - ufs_advertise_fixup_device(hba); + ret = ufs_get_device_desc(hba, &card); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufs_fixup_device_setup(hba, &card); ufshcd_tune_unipro_params(hba); ret = ufshcd_set_vccq_rail_unused(hba, -- cgit v1.2.3-59-g8ed1b From 47069a81b6312380f163f711ff879bd7a9d0da2a Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 10 Jan 2017 20:16:43 +0800 Subject: scsi: remove useless acpi functions in the header file commit f1bc1e4c44b1 ("ata: acpi: rework the ata acpi bind support") removed scsi_register_acpi_bus_type() and scsi_unregister_acpi_bus_type(), but forgot to remove them in the header file, do it now. Signed-off-by: Hanjun Guo Reviewed-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- include/scsi/scsi.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 8ec7c30e35af..a1e1930b7a87 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -29,16 +29,6 @@ enum scsi_timeouts { */ #define SCAN_WILD_CARD ~0 -#ifdef CONFIG_ACPI -struct acpi_bus_type; - -extern int -scsi_register_acpi_bus_type(struct acpi_bus_type *bus); - -extern void -scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus); -#endif - /** scsi_status_is_good - check the status return. * * @status: the status passed up from the driver (including host and -- cgit v1.2.3-59-g8ed1b From 45f4f2eb3da3cbff02c3d77c784c81320c733056 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:43 -0500 Subject: scsi: megaraid_sas: Add new pci device Ids for SAS3.5 Generic Megaraid Controllers This patch contains new pci device ids for SAS3.5 Generic Megaraid Controllers Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 12 +++++++++--- drivers/scsi/megaraid/megaraid_sas_base.c | 14 +++++++++++++- drivers/scsi/megaraid/megaraid_sas_fusion.c | 30 ++++++++++++++++++++++------- 3 files changed, 45 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fdd519c1dd57..cb82195a8be1 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -56,6 +56,11 @@ #define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf #define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052 #define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053 +#define PCI_DEVICE_ID_LSI_VENTURA 0x0014 +#define PCI_DEVICE_ID_LSI_HARPOON 0x0016 +#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 +#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B +#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C /* * Intel HBA SSDIDs @@ -100,7 +105,7 @@ */ /* - * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for + * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for * protocol between the software and firmware. Commands are issued using * "message frames" */ @@ -1435,7 +1440,7 @@ enum FW_BOOT_CONTEXT { * register set for both 1068 and 1078 controllers * structure extended for 1078 registers */ - + struct megasas_register_set { u32 doorbell; /*0000h*/ u32 fusion_seq_offset; /*0004h*/ @@ -1478,7 +1483,7 @@ struct megasas_register_set { u32 inbound_high_queue_port ; /*00C4h*/ - u32 reserved_5; /*00C8h*/ + u32 inbound_single_queue_port; /*00C8h*/ u32 res_6[11]; /*CCh*/ u32 host_diag; u32 seq_offset; @@ -2142,6 +2147,7 @@ struct megasas_instance { u8 is_rdpq; bool dev_handle; bool fw_sync_cache_support; + bool is_ventura; }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d5cf15eb8c5e..e00b3dece088 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -155,6 +155,12 @@ static struct pci_device_id megasas_pci_table[] = { /* Intruder 24 port*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, + /* VENTURA */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, {} }; @@ -5714,6 +5720,12 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->pdev = pdev; switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_VENTURA: + case PCI_DEVICE_ID_LSI_HARPOON: + case PCI_DEVICE_ID_LSI_TOMCAT: + case PCI_DEVICE_ID_LSI_VENTURA_4PORT: + case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: + instance->is_ventura = true; case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: @@ -5738,7 +5750,7 @@ static int megasas_probe_one(struct pci_dev *pdev, if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) fusion->adapter_type = THUNDERBOLT_SERIES; - else + else if (!instance->is_ventura) fusion->adapter_type = INVADER_SERIES; } break; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 24778ba4b6e8..8d7a39782512 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -189,15 +189,29 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, */ static void megasas_fire_cmd_fusion(struct megasas_instance *instance, - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, bool is_32bit) { + struct megasas_register_set __iomem *regs = instance->reg_set; + unsigned long flags; + + if (is_32bit) + writel(le32_to_cpu(req_desc->u.low), + &(regs)->inbound_single_queue_port); + else if (instance->is_ventura) { + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &(regs)->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &(regs)->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); + } else { #if defined(writeq) && defined(CONFIG_64BIT) u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | le32_to_cpu(req_desc->u.low)); writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else - unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(le32_to_cpu(req_desc->u.low), @@ -207,6 +221,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); #endif + } } /** @@ -850,7 +865,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - megasas_fire_cmd_fusion(instance, &req_desc); + megasas_fire_cmd_fusion(instance, &req_desc, false); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); @@ -2224,7 +2239,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, */ atomic_inc(&instance->fw_outstanding); - megasas_fire_cmd_fusion(instance, req_desc); + megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); return 0; } @@ -2595,7 +2610,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, return DCMD_NOT_FIRED; } - megasas_fire_cmd_fusion(instance, req_desc); + megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); return DCMD_SUCCESS; } @@ -2888,7 +2903,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))) && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); if (refire_cmd) - megasas_fire_cmd_fusion(instance, req_desc); + megasas_fire_cmd_fusion(instance, req_desc, + instance->is_ventura); else megasas_return_cmd(instance, cmd_mfi); } @@ -3067,7 +3083,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, mr_request->tmReqFlags.isTMForLD = 1; init_completion(&cmd_fusion->done); - megasas_fire_cmd_fusion(instance, req_desc); + megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ); -- cgit v1.2.3-59-g8ed1b From 2493c67e518c772a573c3b1ad02e7ced5b53f6ca Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:44 -0500 Subject: scsi: megaraid_sas: 128 MSIX Support SAS3.5 Generic Megaraid based Controllers will have the support for 128 MSI-X vectors, resulting in the need to support 128 reply queues Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 25 ++++++++++++++++++------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 4 ++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index cb82195a8be1..36aac88571fc 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2148,6 +2148,7 @@ struct megasas_instance { bool dev_handle; bool fw_sync_cache_support; bool is_ventura; + bool msix_combined; }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e00b3dece088..6801a449d236 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5072,13 +5072,7 @@ static int megasas_init_fw(struct megasas_instance *instance) goto fail_ready_state; } - /* - * MSI-X host index 0 is common for all adapter. - * It is used for all MPT based Adapters. - */ - instance->reply_post_host_index_addr[0] = - (u32 __iomem *)((u8 __iomem *)instance->reg_set + - MPI2_REPLY_POST_HOST_INDEX_OFFSET); + /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & @@ -5098,6 +5092,9 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; + if (instance->msix_vectors > 16) + instance->msix_combined = true; + if (rdpq_enable) instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; @@ -5131,6 +5128,20 @@ static int megasas_init_fw(struct megasas_instance *instance) else instance->msix_vectors = 0; } + /* + * MSI-X host index 0 is common for all adapter. + * It is used for all MPT based Adapters. + */ + if (instance->msix_combined) { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); + } else { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_REPLY_POST_HOST_INDEX_OFFSET); + } + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (i < 0) goto fail_setup_irqs; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8d7a39782512..413e20308871 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2391,7 +2391,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) * pending to be completed */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { - if (fusion->adapter_type == INVADER_SERIES) + if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); @@ -2407,7 +2407,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) return IRQ_NONE; wmb(); - if (fusion->adapter_type == INVADER_SERIES) + if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); -- cgit v1.2.3-59-g8ed1b From 45d446038c7b93c40b2fe5ba0e95380f19e0493e Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:45 -0500 Subject: scsi: megaraid_sas: EEDP Escape Mode Support for SAS3.5 Generic Megaraid Controllers An UNMAP command on a PI formatted device will leave the Logical Block Application Tag and Logical Block Reference Tag as all F's (for those LBAs that are unmapped). To avoid IO errors if those LBAs are subsequently read before they are written with valid tag fields, the MPI SCSI IO requests need to set the EEDPFlags element EEDP Escape Mode field, Bits [7:6] appropriately. A value of 2 should be set to disable all PI checks if the Logical Block Application Tag is 0xFFFF for PI types 1 and 2. A value of 3 should be set to disable all PI checks if the Logical Block Application Tag is 0xFFFF and the Logical Block Reference Tag is 0xFFFFFFFF for PI type 3. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 1 + drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 413e20308871..fe69c4a0eb22 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1589,6 +1589,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | + MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { io_request->EEDPFlags = cpu_to_le16( diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index e3bee04c1eb1..9d22adea5861 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -175,6 +175,8 @@ enum REGION_TYPE { #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +/* EEDP escape mode */ +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) -- cgit v1.2.3-59-g8ed1b From fdd84e2514b0157219720cf8f3f55757938a39cd Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:46 -0500 Subject: scsi: megaraid_sas: SAS3.5 Generic Megaraid Controllers Stream Detection and IO Coalescing Detect sequential Write IOs and pass the hint that it is part of sequential stream to help HBA Firmware do the Full Stripe Writes. For read IOs on certain RAID volumes like Read Ahead volumes,this will help driver to send it to Firmware even if the IOs can potentially be sent to hardware directly (called fast path) bypassing firmware. Design: 8 streams are maintained per RAID volume as per the combined firmware/driver design. When there is no stream detected the LRU stream is used for next potential stream and LRU/MRU map is updated to make this as MRU stream. Every time a stream is detected the MRU map is updated to make the current stream as MRU stream. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 43 +++++++- drivers/scsi/megaraid/megaraid_sas_fp.c | 2 + drivers/scsi/megaraid/megaraid_sas_fusion.c | 165 +++++++++++++++++++++++----- drivers/scsi/megaraid/megaraid_sas_fusion.h | 117 +++++++++++++++++++- 5 files changed, 297 insertions(+), 31 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 36aac88571fc..3d86bc65222d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2070,6 +2070,7 @@ struct megasas_instance { /* used to sync fire the cmd to fw */ spinlock_t hba_lock; /* used to synch producer, consumer ptrs in dpc */ + spinlock_t stream_lock; spinlock_t completion_lock; struct dma_pool *frame_dma_pool; struct dma_pool *sense_dma_pool; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 6801a449d236..b2b1d29ec942 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5001,7 +5001,7 @@ static int megasas_init_fw(struct megasas_instance *instance) struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; unsigned long bar_list; - int i, loop, fw_msix_count = 0; + int i, j, loop, fw_msix_count = 0; struct IOV_111 *iovPtr; struct fusion_context *fusion; @@ -5194,6 +5194,36 @@ static int megasas_init_fw(struct megasas_instance *instance) } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + + /* stream detection initialization */ + if (instance->is_ventura) { + fusion->stream_detect_by_ld = + kzalloc(sizeof(struct LD_STREAM_DETECT *) + * MAX_LOGICAL_DRIVES_EXT, + GFP_KERNEL); + if (!fusion->stream_detect_by_ld) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detection for pool of LDs\n"); + goto fail_get_ld_pd_list; + } + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { + fusion->stream_detect_by_ld[i] = + kmalloc(sizeof(struct LD_STREAM_DETECT), + GFP_KERNEL); + if (!fusion->stream_detect_by_ld[i]) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detect by LD\n "); + for (j = 0; j < i; ++j) + kfree(fusion->stream_detect_by_ld[j]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + goto fail_get_ld_pd_list; + } + fusion->stream_detect_by_ld[i]->mru_bit_map + = MR_STREAM_BITMAP; + } + } + if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); @@ -5313,6 +5343,8 @@ static int megasas_init_fw(struct megasas_instance *instance) return 0; +fail_get_ld_pd_list: + instance->instancet->disable_intr(instance); fail_get_pd_list: instance->instancet->disable_intr(instance); fail_init_adapter: @@ -5846,6 +5878,7 @@ static int megasas_probe_one(struct pci_dev *pdev, spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); + spin_lock_init(&instance->stream_lock); spin_lock_init(&instance->completion_lock); mutex_init(&instance->reset_mutex); @@ -6353,6 +6386,14 @@ skip_firing_dcmds: if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); + if (instance->is_ventura) { + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) + kfree(fusion->stream_detect_by_ld[i]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + } + + if (instance->ctrl_context) { megasas_release_fusion(instance); pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index f237d0003df3..a4e213be69d2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -935,6 +935,8 @@ MR_BuildRaidContext(struct megasas_instance *instance, ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); + /*check read ahead bit*/ + io_info->ra_capable = raid->capability.ra_capable; /* * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index fe69c4a0eb22..974fb3c37ad4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1703,6 +1703,90 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, } } +/** + * megasas_stream_detect - stream detection on read and and write IOs + * @instance: Adapter soft state + * @cmd: Command to be prepared + * @io_info: IO Request info + * + */ + +/** stream detection on read and and write IOs */ +static void megasas_stream_detect(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct IO_REQUEST_INFO *io_info) +{ + struct fusion_context *fusion = instance->ctrl_context; + u32 device_id = io_info->ldTgtId; + struct LD_STREAM_DETECT *current_ld_sd + = fusion->stream_detect_by_ld[device_id]; + u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; + u32 shifted_values, unshifted_values; + u32 index_value_mask, shifted_values_mask; + int i; + bool is_read_ahead = false; + struct STREAM_DETECT *current_sd; + /* find possible stream */ + for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { + stream_num = + (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + /* if we found a stream, update the raid + * context and also update the mruBitMap + */ + /* boundary condition */ + if ((current_sd->next_seq_lba) && + (io_info->ldStartBlock >= current_sd->next_seq_lba) && + (io_info->ldStartBlock <= (current_sd->next_seq_lba+32)) && + (current_sd->is_read == io_info->isRead)) { + + if ((io_info->ldStartBlock != current_sd->next_seq_lba) + && ((!io_info->isRead) || (!is_read_ahead))) + /* + * Once the API availible we need to change this. + * At this point we are not allowing any gap + */ + continue; + + cmd->io_request->RaidContext.raid_context_g35.stream_detected = true; + current_sd->next_seq_lba = + io_info->ldStartBlock + io_info->numBlocks; + /* + * update the mruBitMap LRU + */ + shifted_values_mask = + (1 << i * BITS_PER_INDEX_STREAM) - 1; + shifted_values = ((*track_stream & shifted_values_mask) + << BITS_PER_INDEX_STREAM); + index_value_mask = + STREAM_MASK << i * BITS_PER_INDEX_STREAM; + unshifted_values = + *track_stream & ~(shifted_values_mask | + index_value_mask); + *track_stream = + unshifted_values | shifted_values | stream_num; + return; + + } + + } + /* + * if we did not find any stream, create a new one + * from the least recently used + */ + stream_num = + (*track_stream >> ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + current_sd->is_read = io_info->isRead; + current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; + *track_stream = + (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); + return; + +} + /** * megasas_build_ldio_fusion - Prepares IOs to devices * @instance: Adapter soft state @@ -1725,15 +1809,17 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; + unsigned long spinlock_flags; device_id = MEGASAS_DEV_INDEX(scp); fusion = instance->ctrl_context; io_request = cmd->io_request; - io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); - io_request->RaidContext.status = 0; - io_request->RaidContext.exStatus = 0; + io_request->RaidContext.raid_context.VirtualDiskTgtId = + cpu_to_le16(device_id); + io_request->RaidContext.raid_context.status = 0; + io_request->RaidContext.raid_context.exStatus = 0; req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; @@ -1804,11 +1890,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { - io_request->RaidContext.regLockFlags = 0; + io_request->RaidContext.raid_context.regLockFlags = 0; fp_possible = 0; } else { if (MR_BuildRaidContext(instance, &io_info, - &io_request->RaidContext, + &io_request->RaidContext.raid_context, local_map_ptr, &raidLUN)) fp_possible = io_info.fpOkForIo; } @@ -1819,6 +1905,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? raw_smp_processor_id() % instance->msix_vectors : 0; + if (instance->is_ventura) { + spin_lock_irqsave(&instance->stream_lock, spinlock_flags); + megasas_stream_detect(instance, cmd, &io_info); + spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags); + /* In ventura if stream detected for a read and it is read ahead + * capable make this IO as LDIO + */ + if (io_request->RaidContext.raid_context_g35.stream_detected && + io_info.isRead && io_info.ra_capable) + fp_possible = false; + } + if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); @@ -1827,15 +1925,16 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { - if (io_request->RaidContext.regLockFlags == + if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.Type = MPI2_TYPE_CUDA; - io_request->RaidContext.nseg = 0x1; + io_request->RaidContext.raid_context.Type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context.nseg = 0x1; io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - io_request->RaidContext.regLockFlags |= + io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } @@ -1862,22 +1961,24 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); } else { - io_request->RaidContext.timeoutValue = + io_request->RaidContext.raid_context.timeoutValue = cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { if (io_info.do_fp_rlbypass || - (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)) + (io_request->RaidContext.raid_context.regLockFlags + == REGION_TYPE_UNUSED)) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.Type = MPI2_TYPE_CUDA; - io_request->RaidContext.regLockFlags |= + io_request->RaidContext.raid_context.Type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); - io_request->RaidContext.nseg = 0x1; + io_request->RaidContext.raid_context.nseg = 0x1; } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); @@ -1913,7 +2014,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); /* get RAID_Context pointer */ - pRAID_Context = &io_request->RaidContext; + pRAID_Context = &io_request->RaidContext.raid_context; /* Check with FW team */ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); pRAID_Context->regLockRowLBA = 0; @@ -2000,7 +2101,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, io_request = cmd->io_request; /* get RAID_Context pointer */ - pRAID_Context = &io_request->RaidContext; + pRAID_Context = &io_request->RaidContext.raid_context; pRAID_Context->regLockFlags = 0; pRAID_Context->regLockRowLBA = 0; pRAID_Context->regLockLength = 0; @@ -2094,9 +2195,9 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->Control = 0; io_request->EEDPBlockSize = 0; io_request->ChainOffset = 0; - io_request->RaidContext.RAIDFlags = 0; - io_request->RaidContext.Type = 0; - io_request->RaidContext.nseg = 0; + io_request->RaidContext.raid_context.RAIDFlags = 0; + io_request->RaidContext.raid_context.Type = 0; + io_request->RaidContext.raid_context.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); /* @@ -2143,8 +2244,8 @@ megasas_build_io_fusion(struct megasas_instance *instance, /* numSGE store lower 8 bit of sge_count. * numSGEExt store higher 8 bit of sge_count */ - io_request->RaidContext.numSGE = sge_count; - io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8); + io_request->RaidContext.raid_context.numSGE = sge_count; + io_request->RaidContext.raid_context.numSGEExt = (u8)(sge_count >> 8); io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); @@ -2303,8 +2404,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) cmd_fusion->scmd->SCp.ptr = NULL; scmd_local = cmd_fusion->scmd; - status = scsi_io_req->RaidContext.status; - extStatus = scsi_io_req->RaidContext.exStatus; + status = scsi_io_req->RaidContext.raid_context.status; + extStatus = scsi_io_req->RaidContext.raid_context.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: @@ -2337,8 +2438,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ /* Map the FW Cmd Status */ map_cmd_status(cmd_fusion, status, extStatus); - scsi_io_req->RaidContext.status = 0; - scsi_io_req->RaidContext.exStatus = 0; + scsi_io_req->RaidContext.raid_context.status = 0; + scsi_io_req->RaidContext.raid_context.exStatus = 0; if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); @@ -2905,7 +3006,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); if (refire_cmd) megasas_fire_cmd_fusion(instance, req_desc, - instance->is_ventura); + instance->is_ventura); else megasas_return_cmd(instance, cmd_mfi); } @@ -3394,7 +3495,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance, /* Core fusion reset function */ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { - int retval = SUCCESS, i, convert = 0; + int retval = SUCCESS, i, j, convert = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; @@ -3559,6 +3660,16 @@ transition_to_ready: shost_for_each_device(sdev, shost) megasas_update_sdev_properties(sdev); + /* reset stream detection array */ + if (instance->is_ventura) { + for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { + memset(fusion->stream_detect_by_ld[j], + 0, sizeof(struct LD_STREAM_DETECT)); + fusion->stream_detect_by_ld[j]->mru_bit_map + = MR_STREAM_BITMAP; + } + } + clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 9d22adea5861..3909a26a5d31 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -133,12 +133,95 @@ struct RAID_CONTEXT { u8 resvd2; }; +/* + * Raid Context structure which describes ventura MegaRAID specific + * IO Paramenters ,This resides at offset 0x60 where the SGL normally + * starts in MPT IO Frames + */ +struct RAID_CONTEXT_G35 { +#if defined(__BIG_ENDIAN_BITFIELD) + u16 resvd0:8; + u16 nseg:4; + u16 type:4; +#else + u16 type:4; /* 0x00 */ + u16 nseg:4; /* 0x00 */ + u16 resvd0:8; +#endif + u16 timeout_value; /* 0x02 -0x03 */ + union { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u16 set_divert:4; + u16 cpu_sel:4; + u16 log:1; + u16 rw:1; + u16 sbs:1; + u16 sqn:1; + u16 fwn:1; + u16 c2f:1; + u16 sld:1; + u16 reserved:1; +#else + u16 reserved:1; + u16 sld:1; + u16 c2f:1; + u16 fwn:1; + u16 sqn:1; + u16 sbs:1; + u16 rw:1; + u16 log:1; + u16 cpu_sel:4; + u16 set_divert:4; +#endif + } bits; + u16 s; + } routing_flags; /* 0x04 -0x05 routing flags */ + u16 virtual_disk_tgt_id; /* 0x06 -0x07 */ + u64 reg_lock_row_lba; /* 0x08 - 0x0F */ + u32 reg_lock_length; /* 0x10 - 0x13 */ + union { + u16 next_lmid; /* 0x14 - 0x15 */ + u16 peer_smid; /* used for the raid 1/10 fp writes */ + } smid; + u8 ex_status; /* 0x16 : OUT */ + u8 status; /* 0x17 status */ + u8 RAIDFlags; /* 0x18 resvd[7:6], ioSubType[5:4], + * resvd[3:1], preferredCpu[0] + */ + u8 span_arm; /* 0x1C span[7:5], arm[4:0] */ + u16 config_seq_num; /* 0x1A -0x1B */ +#if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */ + u16 stream_detected:1; + u16 reserved:3; + u16 num_sge:12; +#else + u16 num_sge:12; + u16 reserved:3; + u16 stream_detected:1; +#endif + u8 resvd2[2]; /* 0x1E-0x1F */ +}; + +union RAID_CONTEXT_UNION { + struct RAID_CONTEXT raid_context; + struct RAID_CONTEXT_G35 raid_context_g35; +}; + #define RAID_CTX_SPANARM_ARM_SHIFT (0) #define RAID_CTX_SPANARM_ARM_MASK (0x1f) #define RAID_CTX_SPANARM_SPAN_SHIFT (5) #define RAID_CTX_SPANARM_SPAN_MASK (0xE0) +/* number of bits per index in U32 TrackStream */ +#define BITS_PER_INDEX_STREAM 4 +#define INVALID_STREAM_NUM 16 +#define MR_STREAM_BITMAP 0x76543210 +#define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1) +#define ZERO_LAST_STREAM 0x0fffffff +#define MAX_STREAMS_TRACKED 8 + /* * define region lock types */ @@ -409,7 +492,7 @@ struct MPI2_RAID_SCSI_IO_REQUEST { u8 LUN[8]; /* 0x34 */ __le32 Control; /* 0x3C */ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ - struct RAID_CONTEXT RaidContext; /* 0x60 */ + union RAID_CONTEXT_UNION RaidContext; /* 0x60 */ union MPI2_SGE_IO_UNION SGL; /* 0x80 */ }; @@ -656,11 +739,13 @@ struct MR_LD_RAID { u32 encryptionType:8; u32 pdPiMode:4; u32 ldPiMode:4; - u32 reserved5:3; + u32 reserved5:2; + u32 ra_capable:1; u32 fpCapable:1; #else u32 fpCapable:1; - u32 reserved5:3; + u32 ra_capable:1; + u32 reserved5:2; u32 ldPiMode:4; u32 pdPiMode:4; u32 encryptionType:8; @@ -745,6 +830,7 @@ struct IO_REQUEST_INFO { u64 start_row; u8 span_arm; /* span[7:5], arm[4:0] */ u8 pd_after_lb; + bool ra_capable; }; struct MR_LD_TARGET_SYNC { @@ -930,6 +1016,30 @@ struct MR_PD_CFG_SEQ_NUM_SYNC { struct MR_PD_CFG_SEQ seq[1]; } __packed; +/* stream detection */ +struct STREAM_DETECT { + u64 next_seq_lba; /* next LBA to match sequential access */ + struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */ + struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */ + u32 count_cmds_in_stream; /* count of host commands in this stream */ + u16 num_sges_in_group; /* total number of SGEs in grouped IOs */ + u8 is_read; /* SCSI OpCode for this stream */ + u8 group_depth; /* total number of host commands in group */ + /* TRUE if cannot add any more commands to this group */ + bool group_flush; + u8 reserved[7]; /* pad to 64-bit alignment */ +}; + +struct LD_STREAM_DETECT { + bool write_back; /* TRUE if WB, FALSE if WT */ + bool fp_write_enabled; + bool members_ssds; + bool fp_cache_bypass_capable; + u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */ + /* this is the array of stream detect structures (one per stream) */ + struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED]; +}; + struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { u64 RDPQBaseAddress; u32 Reserved1; @@ -983,6 +1093,7 @@ struct fusion_context { struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; u8 adapter_type; + struct LD_STREAM_DETECT **stream_detect_by_ld; }; union desc_value { -- cgit v1.2.3-59-g8ed1b From 69c337c0f8d74d71e085efa8869be9fc51e5962b Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:47 -0500 Subject: scsi: megaraid_sas: SAS3.5 Generic Megaraid Controllers Fast Path for RAID 1/10 Writes To improve RAID 1/10 Write performance, OS drivers need to issue the required Write IOs as Fast Path IOs (after the appropriate checks allowing Fast Path to be used) to the appropriate physical drives (translated from the OS logical IO) and wait for all Write IOs to complete. Design: A write IO on RAID volume will be examined if it can be sent in Fast Path based on IO size and starting LBA and ending LBA falling on to a Physical Drive boundary. If the underlying RAID volume is a RAID 1/10, driver issues two fast path write IOs one for each corresponding physical drive after computing the corresponding start LBA for each physical drive. Both write IOs will have the same payload and are posted to HW such that replies land in the same reply queue. If there are no resources available for sending two IOs, driver will send the original IO from SCSI layer to RAID volume through the Firmware. Based on PCI bandwidth and write payload, every second this feature is enabled/disabled. When both IOs are completed by HW, the resources will be released and SCSI IO completion handler will be called. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_fp.c | 31 ++- drivers/scsi/megaraid/megaraid_sas_fusion.c | 335 ++++++++++++++++++++++++---- drivers/scsi/megaraid/megaraid_sas_fusion.h | 15 +- 4 files changed, 329 insertions(+), 53 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 3d86bc65222d..a96889c34db0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2056,6 +2056,7 @@ struct megasas_instance { u16 max_num_sge; u16 max_fw_cmds; + u16 max_mpt_cmds; u16 max_mfi_cmds; u16 max_scsi_cmds; u16 ldio_threshold; diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index a4e213be69d2..eb9ff444c099 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -737,7 +737,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); - u32 pd, arRef; + u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = TRUE; @@ -772,9 +772,16 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); - else { + /* get second pd also for raid 1/10 fast path writes*/ + if (raid->level == 1) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { *pDevHandle = cpu_to_le16(MR_PD_INVALID); if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || @@ -819,7 +826,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); - u32 pd, arRef; + u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = TRUE; @@ -867,10 +874,17 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); - else { + /* get second pd also for raid 1/10 fast path writes*/ + if (raid->level == 1) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { /* set dev handle as invalid. */ *pDevHandle = cpu_to_le16(MR_PD_INVALID); if ((raid->level >= 5) && @@ -1126,6 +1140,11 @@ MR_BuildRaidContext(struct megasas_instance *instance, /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) io_info->fpOkForIo = FALSE; + /* set raid 1/10 fast path write capable bit in io_info */ + if (io_info->fpOkForIo && + (io_info->r1_alt_dev_handle != MR_PD_INVALID) && + (raid->level == 1) && !isRead) + io_info->is_raid_1_fp_write = 1; return retval; } else if (isRead) { uint stripIdx; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 974fb3c37ad4..792f26918ed6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -270,7 +270,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c instance->ldio_threshold = ldio_threshold; if (!instance->is_rdpq) - instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); + instance->max_fw_cmds = + min_t(u16, instance->max_fw_cmds, 1024); if (reset_devices) instance->max_fw_cmds = min(instance->max_fw_cmds, @@ -286,7 +287,14 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); instance->cur_can_queue = instance->max_scsi_cmds; + instance->host->can_queue = instance->cur_can_queue; } + + if (instance->is_ventura) + instance->max_mpt_cmds = + instance->max_fw_cmds * RAID_1_10_RMW_CMDS; + else + instance->max_mpt_cmds = instance->max_fw_cmds; } /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool @@ -300,7 +308,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) struct megasas_cmd_fusion *cmd; /* SG, Sense */ - for (i = 0; i < instance->max_fw_cmds; i++) { + for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; if (cmd) { if (cmd->sg_frame) @@ -344,7 +352,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) /* cmd_list */ - for (i = 0; i < instance->max_fw_cmds; i++) + for (i = 0; i < instance->max_mpt_cmds; i++) kfree(fusion->cmd_list[i]); kfree(fusion->cmd_list); @@ -396,33 +404,49 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) return -ENOMEM; } } + + /* create sense buffer for the raid 1/10 fp */ + for (i = max_cmd; i < instance->max_mpt_cmds; i++) { + cmd = fusion->cmd_list[i]; + cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, + GFP_KERNEL, &cmd->sense_phys_addr); + if (!cmd->sense) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + return 0; } int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { - u32 max_cmd, i; + u32 max_mpt_cmd, i; struct fusion_context *fusion; fusion = instance->ctrl_context; - max_cmd = instance->max_fw_cmds; + max_mpt_cmd = instance->max_mpt_cmds; /* * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. * Allocate the dynamic array first and then allocate individual * commands. */ - fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, - GFP_KERNEL); + fusion->cmd_list = + kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd, + GFP_KERNEL); if (!fusion->cmd_list) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } - for (i = 0; i < max_cmd; i++) { + + + for (i = 0; i < max_mpt_cmd; i++) { fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); if (!fusion->cmd_list[i]) { @@ -657,13 +681,14 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) */ /* SMID 0 is reserved. Set SMID/index from 1 */ - for (i = 0; i < instance->max_fw_cmds; i++) { + for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); cmd->index = i + 1; cmd->scmd = NULL; - cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? + cmd->sync_cmd_idx = + (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? (i - instance->max_scsi_cmds) : (u32)ULONG_MAX; /* Set to Invalid */ cmd->instance = instance; @@ -673,6 +698,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; + cmd->is_raid_1_fp_write = 0; } if (megasas_create_sg_sense_fusion(instance)) @@ -1262,12 +1288,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); fusion->request_alloc_sz = - sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds; fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *(fusion->reply_q_depth); fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + - (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * - (max_cmd + 1)); /* Extra 1 for SMID 0 */ + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, @@ -1403,42 +1429,43 @@ fail_alloc_mfi_cmds: */ void -map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) +map_cmd_status(struct fusion_context *fusion, + struct scsi_cmnd *scmd, u8 status, u8 ext_status, + u32 data_length, u8 *sense) { switch (status) { case MFI_STAT_OK: - cmd->scmd->result = DID_OK << 16; + scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = (DID_ERROR << 16) | ext_status; + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: - cmd->scmd->result = (DID_OK << 16) | ext_status; + scmd->result = (DID_OK << 16) | ext_status; if (ext_status == SAM_STAT_CHECK_CONDITION) { - memset(cmd->scmd->sense_buffer, 0, + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - memcpy(cmd->scmd->sense_buffer, cmd->sense, + memcpy(scmd->sense_buffer, sense, SCSI_SENSE_BUFFERSIZE); - cmd->scmd->result |= DRIVER_SENSE << 24; + scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: - cmd->scmd->result = DID_BAD_TARGET << 16; + scmd->result = DID_BAD_TARGET << 16; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: - cmd->scmd->result = DID_IMM_RETRY << 16; + scmd->result = DID_IMM_RETRY << 16; break; default: - dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status); - cmd->scmd->result = DID_ERROR << 16; + scmd->result = DID_ERROR << 16; break; } } @@ -1881,6 +1908,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; + io_info.r1_alt_dev_handle = MR_PD_INVALID; io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) @@ -1949,6 +1977,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; + cmd->is_raid_1_fp_write = io_info.is_raid_1_fp_write; + if (io_info.is_raid_1_fp_write) + cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; + if ((raidLUN[0] == 1) && (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { instance->dev_handle = !(instance->dev_handle); @@ -2272,19 +2304,118 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) u8 *p; struct fusion_context *fusion; - if (index >= instance->max_fw_cmds) { + if (index >= instance->max_mpt_cmds) { dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for " "descriptor for scsi%d\n", index, instance->host->host_no); return NULL; } fusion = instance->ctrl_context; - p = fusion->req_frames_desc - +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; + p = fusion->req_frames_desc + + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; } +/* + * megasas_fpio_to_ldio- + * This function converts an fp io to ldio + */ + +void megasas_fpio_to_ldio(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd) +{ + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + cmd->request_desc->SCSIIO.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO + << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cmd->io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + cmd->io_request->DevHandle = cpu_to_le16(MEGASAS_DEV_INDEX(scmd)); + + /*remove FAST PATH ENABLE bit in IoFlags */ + cmd->io_request->IoFlags &= + cpu_to_le16(~MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + + /* if the numSGE > max_sge_in_main_sge set the chain offset*/ + if (cmd->io_request->RaidContext.raid_context_g35.num_sge > + fusion->max_sge_in_main_msg) + cmd->io_request->ChainOffset = fusion->chain_offset_io_request; + memcpy(cmd->io_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + cmd->io_request->CDB.EEDP32.PrimaryReferenceTag = 0; + cmd->io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; + cmd->io_request->EEDPFlags = 0; + cmd->io_request->Control = 0; + cmd->io_request->EEDPBlockSize = 0; + cmd->is_raid_1_fp_write = 0; +} + +/* megasas_prepate_secondRaid1_IO + * It prepares the raid 1 second IO + */ +void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct megasas_cmd_fusion *r1_cmd) +{ + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + req_desc = cmd->request_desc; + if (r1_cmd) { + /* copy the io request frame as well + * as 8 SGEs data for r1 command + */ + memcpy(r1_cmd->io_request, cmd->io_request, + sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, + (fusion->max_sge_in_main_msg * + sizeof(union MPI2_SGE_IO_UNION))); + /*sense buffer is different for r1 command*/ + r1_cmd->io_request->SenseBufferLowAddress = + cpu_to_le32(r1_cmd->sense_phys_addr); + r1_cmd->scmd = cmd->scmd; + req_desc2 = + megasas_get_request_descriptor(instance, r1_cmd->index-1); + if (req_desc2) { + req_desc2->Words = 0; + r1_cmd->request_desc = req_desc2; + req_desc2->SCSIIO.SMID = + cpu_to_le16(r1_cmd->index); + req_desc2->SCSIIO.RequestFlags = + req_desc->SCSIIO.RequestFlags; + r1_cmd->is_raid_1_fp_write = 1; + r1_cmd->request_desc->SCSIIO.DevHandle = + cmd->r1_alt_dev_handle; + r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; + cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(r1_cmd->index); + r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(cmd->index); + /* MSIxIndex of both commands request + * descriptors should be same + */ + r1_cmd->request_desc->SCSIIO.MSIxIndex = + cmd->request_desc->SCSIIO.MSIxIndex; + /*span arm is different for r1 cmd*/ + r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = + cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; + } else { + megasas_return_cmd_fusion(instance, r1_cmd); + dev_info(&instance->pdev->dev, + "unable to get request descriptor, firing as normal IO\n"); + atomic_dec(&instance->fw_outstanding); + megasas_fpio_to_ldio(instance, cmd, cmd->scmd); + } + } else { + dev_info(&instance->pdev->dev, + "unable to get command, firing as normal IO\n"); + atomic_dec(&instance->fw_outstanding); + megasas_fpio_to_ldio(instance, cmd, cmd->scmd); + } +} + /** * megasas_build_and_issue_cmd_fusion -Main routine for building and * issuing non IOCTL cmd @@ -2295,7 +2426,7 @@ static u32 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd) { - struct megasas_cmd_fusion *cmd; + struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 index; struct fusion_context *fusion; @@ -2310,13 +2441,27 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, return SCSI_MLQUEUE_DEVICE_BUSY; } + if (atomic_inc_return(&instance->fw_outstanding) > + instance->host->can_queue) { + dev_err(&instance->pdev->dev, "Throttle IOs beyond Controller queue depth\n"); + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); + if (!cmd) { + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); - if (!req_desc) + if (!req_desc) { + atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; + } req_desc->Words = 0; cmd->request_desc = req_desc; @@ -2325,6 +2470,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, megasas_return_cmd_fusion(instance, cmd); dev_err(&instance->pdev->dev, "Error building command\n"); cmd->request_desc = NULL; + atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } @@ -2335,14 +2481,39 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, cmd->io_request->ChainOffset != 0xF) dev_err(&instance->pdev->dev, "The chain offset value is not " "correct : %x\n", cmd->io_request->ChainOffset); + /* + * if it is raid 1/10 fp write capable. + * try to get second command from pool and construct it. + * From FW, it has confirmed that lba values of two PDs + * corresponds to single R1/10 LD are always same + * + */ + /* driver side count always should be less than max_fw_cmds + * to get new command + */ + if (cmd->is_raid_1_fp_write && + atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue)) { + megasas_fpio_to_ldio(instance, cmd, cmd->scmd); + atomic_dec(&instance->fw_outstanding); + } else if (cmd->is_raid_1_fp_write) { + r1_cmd = megasas_get_cmd_fusion(instance, + (scmd->request->tag + instance->max_fw_cmds)); + megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); + } + /* * Issue the command to the FW */ - atomic_inc(&instance->fw_outstanding); megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); + if (r1_cmd) + megasas_fire_cmd_fusion(instance, r1_cmd->request_desc, + instance->is_ventura); + + return 0; } @@ -2359,10 +2530,10 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; - struct megasas_cmd_fusion *cmd_fusion; + struct megasas_cmd_fusion *cmd_fusion, *r1_cmd = NULL; u16 smid, num_completed; - u8 reply_descript_type; - u32 status, extStatus, device_id; + u8 reply_descript_type, *sense; + u32 status, extStatus, device_id, data_length; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; @@ -2392,6 +2563,15 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) while (d_val.u.low != cpu_to_le32(UINT_MAX) && d_val.u.high != cpu_to_le32(UINT_MAX)) { + /* + * Ensure that the peer command is NULL here in case a + * command has completed but the R1 FP Write peer has + * not completed yet.If not null, it's possible that + * another thread will complete the peer + * command and should not. + */ + r1_cmd = NULL; + smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; @@ -2406,6 +2586,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) scmd_local = cmd_fusion->scmd; status = scsi_io_req->RaidContext.raid_context.status; extStatus = scsi_io_req->RaidContext.raid_context.exStatus; + sense = cmd_fusion->sense; + data_length = scsi_io_req->DataLength; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: @@ -2422,12 +2604,28 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) /* Update load balancing info */ device_id = MEGASAS_DEV_INDEX(scmd_local); lbinfo = &fusion->load_balance_info[device_id]; - if (cmd_fusion->scmd->SCp.Status & - MEGASAS_LOAD_BALANCE_FLAG) { + /* + * check for the raid 1/10 fast path writes + */ + if (!cmd_fusion->is_raid_1_fp_write && + (cmd_fusion->scmd->SCp.Status & + MEGASAS_LOAD_BALANCE_FLAG)) { atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; + } else if (cmd_fusion->is_raid_1_fp_write) { + /* get peer command */ + if (cmd_fusion->index < instance->max_fw_cmds) + r1_cmd = fusion->cmd_list[(cmd_fusion->index + + instance->max_fw_cmds)-1]; + else { + r1_cmd = + fusion->cmd_list[(cmd_fusion->index - + instance->max_fw_cmds)-1]; + } + cmd_fusion->cmd_completed = true; } + if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { if (megasas_dbg_lvl == 5) @@ -2437,14 +2635,48 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) /* Fall thru and complete IO */ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ /* Map the FW Cmd Status */ - map_cmd_status(cmd_fusion, status, extStatus); - scsi_io_req->RaidContext.raid_context.status = 0; - scsi_io_req->RaidContext.raid_context.exStatus = 0; - if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) - atomic_dec(&instance->ldio_outstanding); - megasas_return_cmd_fusion(instance, cmd_fusion); - scsi_dma_unmap(scmd_local); - scmd_local->scsi_done(scmd_local); + /* + * check for the raid 1/10 fast path writes + */ + if (r1_cmd && r1_cmd->is_raid_1_fp_write + && r1_cmd->cmd_completed) { + /* + * if the peer Raid 1/10 fast path failed, + * mark IO as failed to the scsi layer. + * over write the current status by the failed + * status makes sure that if any one of + * command fails,return fail status to + * scsi layer + */ + if (r1_cmd->io_request->RaidContext.raid_context.status != + MFI_STAT_OK) { + status = + r1_cmd->io_request->RaidContext.raid_context.status; + extStatus = + r1_cmd->io_request->RaidContext.raid_context.exStatus; + data_length = + r1_cmd->io_request->DataLength; + sense = r1_cmd->sense; + } + r1_cmd->io_request->RaidContext.raid_context.status = 0; + r1_cmd->io_request->RaidContext.raid_context.exStatus = 0; + cmd_fusion->is_raid_1_fp_write = 0; + r1_cmd->is_raid_1_fp_write = 0; + r1_cmd->cmd_completed = false; + cmd_fusion->cmd_completed = false; + megasas_return_cmd_fusion(instance, r1_cmd); + } + if (!cmd_fusion->is_raid_1_fp_write) { + map_cmd_status(fusion, scmd_local, status, + extStatus, data_length, sense); + scsi_io_req->RaidContext.raid_context.status + = 0; + scsi_io_req->RaidContext.raid_context.exStatus + = 0; + megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); + } atomic_dec(&instance->fw_outstanding); break; @@ -3497,7 +3729,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { int retval = SUCCESS, i, j, convert = 0; struct megasas_instance *instance; - struct megasas_cmd_fusion *cmd_fusion; + struct megasas_cmd_fusion *cmd_fusion, *mpt_cmd_fusion; struct fusion_context *fusion; u32 abs_state, status_reg, reset_adapter; u32 io_timeout_in_crash_mode = 0; @@ -3572,6 +3804,18 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; + /*check for extra commands issued by driver*/ + if (instance->is_ventura) { + cmd_fusion->is_raid_1_fp_write = 0; + cmd_fusion->cmd_completed = false; + mpt_cmd_fusion = + fusion->cmd_list[i + instance->max_fw_cmds]; + mpt_cmd_fusion->is_raid_1_fp_write = 0; + mpt_cmd_fusion->cmd_completed = false; + if (mpt_cmd_fusion->scmd) + megasas_return_cmd_fusion(instance, + mpt_cmd_fusion); + } scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { scmd_local->result = @@ -3582,10 +3826,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); - atomic_dec(&instance->fw_outstanding); } } + atomic_set(&instance->fw_outstanding, 0); + status_reg = instance->instancet->read_fw_status_reg( instance->reg_set); abs_state = status_reg & MFI_STATE_MASK; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 3909a26a5d31..3a5af3bb9989 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -94,6 +94,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 #define THRESHOLD_REPLY_COUNT 50 +#define RAID_1_10_RMW_CMDS 3 #define JBOD_MAPS_COUNT 2 enum MR_FUSION_ADAPTER_TYPE { @@ -728,7 +729,9 @@ struct MR_SPAN_BLOCK_INFO { struct MR_LD_RAID { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved4:5; + u32 reserved4:3; + u32 fp_cache_bypass_capable:1; + u32 fp_rmw_capable:1; u32 fpBypassRegionLock:1; u32 tmCapable:1; u32 fpNonRWCapable:1; @@ -756,7 +759,9 @@ struct MR_LD_RAID { u32 fpNonRWCapable:1; u32 tmCapable:1; u32 fpBypassRegionLock:1; - u32 reserved4:5; + u32 fp_rmw_capable:1; + u32 fp_cache_bypass_capable:1; + u32 reserved4:3; #endif } capability; __le32 reserved6; @@ -830,6 +835,8 @@ struct IO_REQUEST_INFO { u64 start_row; u8 span_arm; /* span[7:5], arm[4:0] */ u8 pd_after_lb; + u16 r1_alt_dev_handle; /* raid 1/10 only */ + bool is_raid_1_fp_write; bool ra_capable; }; @@ -883,6 +890,10 @@ struct megasas_cmd_fusion { u32 index; u8 pd_r1_lb; struct completion done; + bool is_raid_1_fp_write; + u16 r1_alt_dev_handle; /* raid 1/10 only*/ + bool cmd_completed; /* raid 1/10 fp writes status holder */ + }; struct LD_LOAD_BALANCE_INFO { -- cgit v1.2.3-59-g8ed1b From d889344e4e59eb962894ab3b64042dc37a2d8b39 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:48 -0500 Subject: scsi: megaraid_sas: Dynamic Raid Map Changes for SAS3.5 Generic Megaraid Controllers SAS3.5 Generic Megaraid Controllers FW will support new dynamic RaidMap to have different sizes for different number of supported VDs. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 7 + drivers/scsi/megaraid/megaraid_sas_base.c | 60 ++++-- drivers/scsi/megaraid/megaraid_sas_fp.c | 301 ++++++++++++++++++++++++---- drivers/scsi/megaraid/megaraid_sas_fusion.c | 225 ++++++++++++++++----- drivers/scsi/megaraid/megaraid_sas_fusion.h | 240 ++++++++++++++++++---- 5 files changed, 695 insertions(+), 138 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a96889c34db0..6ddf994a25b7 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1434,6 +1434,12 @@ enum FW_BOOT_CONTEXT { #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 #define MR_RDPQ_MODE_OFFSET 0X00800000 + +#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16 +#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF +#define MR_MIN_MAP_SIZE 0x10000 +/* 64k */ + #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 /* @@ -2151,6 +2157,7 @@ struct megasas_instance { bool fw_sync_cache_support; bool is_ventura; bool msix_combined; + u16 max_raid_mapsize; }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b2b1d29ec942..fd6ddde43d03 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4424,8 +4424,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) static void megasas_update_ext_vd_details(struct megasas_instance *instance) { struct fusion_context *fusion; - u32 old_map_sz; - u32 new_map_sz; + u32 ventura_map_sz = 0; fusion = instance->ctrl_context; /* For MFI based controllers return dummy success */ @@ -4455,21 +4454,38 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance) instance->supportmax256vd ? "Extended VD(240 VD)firmware" : "Legacy(64 VD) firmware"); - old_map_sz = sizeof(struct MR_FW_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * - (instance->fw_supported_vd_count - 1)); - new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); - fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * - (instance->drv_supported_vd_count - 1)); - - fusion->max_map_sz = max(old_map_sz, new_map_sz); + if (instance->max_raid_mapsize) { + ventura_map_sz = instance->max_raid_mapsize * + MR_MIN_MAP_SIZE; /* 64k */ + fusion->current_map_sz = ventura_map_sz; + fusion->max_map_sz = ventura_map_sz; + } else { + fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->fw_supported_vd_count - 1)); + fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); + fusion->max_map_sz = + max(fusion->old_map_sz, fusion->new_map_sz); - if (instance->supportmax256vd) - fusion->current_map_sz = new_map_sz; - else - fusion->current_map_sz = old_map_sz; + if (instance->supportmax256vd) + fusion->current_map_sz = fusion->new_map_sz; + else + fusion->current_map_sz = fusion->old_map_sz; + } + /* irrespective of FW raid maps, driver raid map is constant */ + fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); + +#if VD_EXT_DEBUG + dev_info(&instance->pdev->dev, "instance->max_raid_mapsize 0x%x\n ", + instance->max_raid_mapsize); + dev_info(&instance->pdev->dev, "new_map_sz = 0x%x, old_map_sz = 0x%x\n", + fusion->new_map_sz, fusion->old_map_sz); + dev_info(&instance->pdev->dev, "ventura_map_sz = 0x%x, current_map_sz = 0x%x\n", + ventura_map_sz, fusion->current_map_sz); + dev_info(&instance->pdev->dev, "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx\n", + fusion->drv_map_sz, sizeof(struct MR_DRV_RAID_MAP_ALL)); +#endif } /** @@ -4996,7 +5012,7 @@ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2; - u32 tmp_sectors, msix_enable, scratch_pad_2; + u32 tmp_sectors, msix_enable, scratch_pad_2, scratch_pad_3; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; @@ -5072,7 +5088,17 @@ static int megasas_init_fw(struct megasas_instance *instance) goto fail_ready_state; } - + if (instance->is_ventura) { + scratch_pad_3 = + readl(&instance->reg_set->outbound_scratch_pad_3); +#if VD_EXT_DEBUG + dev_info(&instance->pdev->dev, "scratch_pad3 0x%x\n", + scratch_pad_3); +#endif + instance->max_raid_mapsize = ((scratch_pad_3 >> + MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & + MR_MAX_RAID_MAP_SIZE_MASK); + } /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index eb9ff444c099..f1384b01b3d3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -179,18 +179,204 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; struct MR_FW_RAID_MAP *pFwRaidMap = NULL; - int i; + int i, j; u16 ld_count; + struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; + struct MR_FW_RAID_MAP_EXT *fw_map_ext; + struct MR_RAID_MAP_DESC_TABLE *desc_table; struct MR_DRV_RAID_MAP_ALL *drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; + void *raid_map_data = NULL; + + memset(drv_map, 0, fusion->drv_map_sz); + memset(pDrvRaidMap->ldTgtIdToLd, + 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); + + if (instance->max_raid_mapsize) { + fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n", + le32_to_cpu(fw_map_dyn->raid_map_size), + le32_to_cpu(fw_map_dyn->desc_table_offset)); + dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n", + le32_to_cpu(fw_map_dyn->desc_table_size), + le32_to_cpu(fw_map_dyn->desc_table_num_elements)); + dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n", + drv_map, fw_map_dyn->ld_count); +#endif + desc_table = + (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); + if (desc_table != fw_map_dyn->raid_map_desc_table) + dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", + desc_table, fw_map_dyn->raid_map_desc_table); + + ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = + fw_map_dyn->fp_pd_io_timeout_sec; + pDrvRaidMap->totalSize = sizeof(struct MR_DRV_RAID_MAP_ALL); + /* point to actual data starting point*/ + raid_map_data = (void *)fw_map_dyn + + le32_to_cpu(fw_map_dyn->desc_table_offset) + + le32_to_cpu(fw_map_dyn->desc_table_size); + + for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { + +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "desc table %p\n", + desc_table); + dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n", + desc_table->raid_map_desc_type, + desc_table->raid_map_desc_offset); + dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n", + desc_table->raid_map_desc_elements, + desc_table->raid_map_desc_buffer_size); +#endif + switch (le32_to_cpu(desc_table->raid_map_desc_type)) { + case RAID_MAP_DESC_TYPE_DEVHDL_INFO: + fw_map_dyn->dev_hndl_info = + (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "devHndlInfo address %p\n", + fw_map_dyn->dev_hndl_info); +#endif + memcpy(pDrvRaidMap->devHndlInfo, + fw_map_dyn->dev_hndl_info, + sizeof(struct MR_DEV_HANDLE_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_TGTID_INFO: + fw_map_dyn->ld_tgt_id_to_ld = + (u16 *) (raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "ldTgtIdToLd address %p\n", + fw_map_dyn->ld_tgt_id_to_ld); +#endif + for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { + pDrvRaidMap->ldTgtIdToLd[j] = + fw_map_dyn->ld_tgt_id_to_ld[j]; +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n", + j, pDrvRaidMap->ldTgtIdToLd[j]); +#endif + } + break; + case RAID_MAP_DESC_TYPE_ARRAY_INFO: + fw_map_dyn->ar_map_info = + (struct MR_ARRAY_INFO *) + (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "arMapInfo address %p\n", + fw_map_dyn->ar_map_info); +#endif + + memcpy(pDrvRaidMap->arMapInfo, + fw_map_dyn->ar_map_info, + sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_SPAN_INFO: + fw_map_dyn->ld_span_map = + (struct MR_LD_SPAN_MAP *) + (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->ldSpanMap, + fw_map_dyn->ld_span_map, + sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements)); +#if VD_EXT_DEBUG + dev_dbg(&instance->pdev->dev, "ldSpanMap address %p\n", + fw_map_dyn->ld_span_map); + dev_dbg(&instance->pdev->dev, "MR_LD_SPAN_MAP size 0x%lx\n", + sizeof(struct MR_LD_SPAN_MAP)); + for (j = 0; j < ld_count; j++) { + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x\n", + j, j, fw_map_dyn->ld_span_map[j].ldRaid.targetId); + dev_dbg(&instance->pdev->dev, "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", + j, fw_map_dyn->ld_span_map[j].ldRaid.seqNum); + dev_dbg(&instance->pdev->dev, "fw_map_dyn->ld_span_map[%d].ldRaid.rowSize 0x%x\n", + j, (u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize); + + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n", + j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId); + dev_dbg(&instance->pdev->dev, "DrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", + j, pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum); + dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", + j, (u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); + + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n", + instance->unique_id, drv_map); + dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p/%p\n", + pDrvRaidMap, + &fw_map_dyn->ld_span_map[j].ldRaid, + &pDrvRaidMap->ldSpanMap[j].ldRaid); + } +#endif + break; + default: + dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", + fw_map_dyn->desc_table_num_elements); + } + ++desc_table; + } + + } else if (instance->supportmax256vd) { + fw_map_ext = + (struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)]; + ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES_EXT) { + dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); + return; + } +#if VD_EXT_DEBUG + for (i = 0; i < ld_count; i++) { + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :Index 0x%x\n", + instance->unique_id, i); + dev_dbg(&instance->pdev->dev, "Target Id 0x%x\n", + fw_map_ext->ldSpanMap[i].ldRaid.targetId); + dev_dbg(&instance->pdev->dev, "Seq Num 0x%x Size 0/%llx\n", + fw_map_ext->ldSpanMap[i].ldRaid.seqNum, + fw_map_ext->ldSpanMap[i].ldRaid.size); + } +#endif + + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; + for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) + pDrvRaidMap->ldTgtIdToLd[i] = + (u16)fw_map_ext->ldTgtIdToLd[i]; + memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, + sizeof(struct MR_LD_SPAN_MAP) * ld_count); +#if VD_EXT_DEBUG + for (i = 0; i < ld_count; i++) { + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x\n", + i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId); + dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", + i, fw_map_ext->ldSpanMap[i].ldRaid.seqNum); + dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", + i, (u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); + + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n", + i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId); + dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", + i, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum); + dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", + i, (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); + + dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n", + instance->unique_id, drv_map); + dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p %p\n", + pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, + &pDrvRaidMap->ldSpanMap[i].ldRaid); + } +#endif + memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, + sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); + memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); - if (instance->supportmax256vd) { - memcpy(fusion->ld_drv_map[instance->map_id & 1], - fusion->ld_map[instance->map_id & 1], - fusion->current_map_sz); /* New Raid map will not set totalSize, so keep expected value * for legacy code in ValidateMapInfo */ @@ -213,16 +399,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) } #endif - memset(drv_map, 0, fusion->drv_map_sz); pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) pDrvRaidMap->ldTgtIdToLd[i] = (u8)pFwRaidMap->ldTgtIdToLd[i]; - for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS); - i < MAX_LOGICAL_DRIVES_EXT; i++) - pDrvRaidMap->ldTgtIdToLd[i] = 0xff; for (i = 0; i < ld_count; i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; #if VD_EXT_DEBUG @@ -279,7 +461,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) lbInfo = fusion->load_balance_info; ldSpanInfo = fusion->log_to_span; - if (instance->supportmax256vd) + if (instance->max_raid_mapsize) + expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); + else if (instance->supportmax256vd) expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); else expected_size = @@ -287,8 +471,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { - dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", - (unsigned int) expected_size); + dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", + le32_to_cpu(pDrvRaidMap->totalSize)); + dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", + (unsigned int) expected_size); dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), le32_to_cpu(pDrvRaidMap->totalSize)); @@ -787,7 +973,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) - pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); @@ -797,9 +983,16 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | - physArm; - io_info->span_arm = pRAID_Context->spanArm; + if (instance->is_ventura) { + ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } return retval; } @@ -891,7 +1084,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) - pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ physArm = physArm + 1; @@ -903,9 +1096,16 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | - physArm; - io_info->span_arm = pRAID_Context->spanArm; + if (instance->is_ventura) { + ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } return retval; } @@ -1109,20 +1309,20 @@ MR_BuildRaidContext(struct megasas_instance *instance, regSize += stripSize; } - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16(raid->fpIoTimeoutForLd ? raid->fpIoTimeoutForLd : map->raidMap.fpPdIoTimeoutSec); if (fusion->adapter_type == INVADER_SERIES) - pRAID_Context->regLockFlags = (isRead) ? + pRAID_Context->reg_lock_flags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; - else - pRAID_Context->regLockFlags = (isRead) ? + else if (!instance->is_ventura) + pRAID_Context->reg_lock_flags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; - pRAID_Context->VirtualDiskTgtId = raid->targetId; - pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); - pRAID_Context->regLockLength = cpu_to_le32(regSize); - pRAID_Context->configSeqNum = raid->seqNum; + pRAID_Context->virtual_disk_tgt_id = raid->targetId; + pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); + pRAID_Context->reg_lock_length = cpu_to_le32(regSize); + pRAID_Context->config_seq_num = raid->seqNum; /* save pointer to raid->LUN array */ *raidLUN = raid->LUN; @@ -1140,6 +1340,13 @@ MR_BuildRaidContext(struct megasas_instance *instance, /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) io_info->fpOkForIo = FALSE; + /* if FP possible, set the SLUD bit in + * regLockFlags for ventura + */ + else if ((instance->is_ventura) && (!isRead) && + (raid->writeMode == MR_RL_WRITE_BACK_MODE) && + (raid->capability.fp_cache_bypass_capable)) + ((struct RAID_CONTEXT_G35 *) pRAID_Context)->routing_flags.bits.sld = 1; /* set raid 1/10 fast path write capable bit in io_info */ if (io_info->fpOkForIo && (io_info->r1_alt_dev_handle != MR_PD_INVALID) && @@ -1319,6 +1526,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, struct fusion_context *fusion; struct MR_LD_RAID *raid; struct MR_DRV_RAID_MAP_ALL *drv_map; + u16 pd1_dev_handle; u16 pend0, pend1, ld; u64 diff0, diff1; u8 bestArm, pd0, pd1, span, arm; @@ -1344,23 +1552,36 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? (arm + 1 - span_row_size) : arm + 1, drv_map); - /* get the pending cmds for the data and mirror arms */ - pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); - pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); + /* Get PD1 Dev Handle */ + + pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); + + if (pd1_dev_handle == MR_PD_INVALID) { + bestArm = arm; + } else { + /* get the pending cmds for the data and mirror arms */ + pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); + pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); - /* Determine the disk whose head is nearer to the req. block */ - diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); - diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); - bestArm = (diff0 <= diff1 ? arm : arm ^ 1); + /* Determine the disk whose head is nearer to the req. block */ + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); + bestArm = (diff0 <= diff1 ? arm : arm ^ 1); - if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || + /* Make balance count from 16 to 4 to + * keep driver in sync with Firmware + */ + if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) - bestArm ^= 1; + bestArm ^= 1; + + /* Update the last accessed block on the correct pd */ + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; + io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; + } - /* Update the last accessed block on the correct pd */ - io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; - io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; #if SPAN_DEBUG if (arm != bestArm) dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance " diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 792f26918ed6..29e883fa3ead 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1829,7 +1829,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { u8 fp_possible; - u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; + u32 start_lba_lo, start_lba_hi, device_id, datalength = 0, ld; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; @@ -1837,16 +1837,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; unsigned long spinlock_flags; + union RAID_CONTEXT_UNION *praid_context; + struct MR_LD_RAID *raid; device_id = MEGASAS_DEV_INDEX(scp); fusion = instance->ctrl_context; io_request = cmd->io_request; - io_request->RaidContext.raid_context.VirtualDiskTgtId = + io_request->RaidContext.raid_context.virtual_disk_tgt_id = cpu_to_le16(device_id); io_request->RaidContext.raid_context.status = 0; - io_request->RaidContext.raid_context.exStatus = 0; + io_request->RaidContext.raid_context.ex_status = 0; req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; @@ -1915,10 +1917,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.isRead = 1; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + raid = MR_LdRaidGet(ld, local_map_ptr); if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { - io_request->RaidContext.raid_context.regLockFlags = 0; + io_request->RaidContext.raid_context.reg_lock_flags = 0; fp_possible = 0; } else { if (MR_BuildRaidContext(instance, &io_info, @@ -1945,6 +1949,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = false; } + praid_context = &io_request->RaidContext; + if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); @@ -1953,18 +1959,25 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { - if (io_request->RaidContext.raid_context.regLockFlags == + if (io_request->RaidContext.raid_context.reg_lock_flags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.raid_context.Type + io_request->RaidContext.raid_context.type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context.nseg = 0x1; io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - io_request->RaidContext.raid_context.regLockFlags |= + io_request->RaidContext.raid_context.reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); + } else if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context_g35.nseg = 0x1; + io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; + io_request->IoFlags |= + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } if ((fusion->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { @@ -1974,6 +1987,13 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, &io_info); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; + if (instance->is_ventura) + io_request->RaidContext.raid_context_g35.span_arm + = io_info.span_arm; + else + io_request->RaidContext.raid_context.span_arm + = io_info.span_arm; + } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; @@ -1992,28 +2012,98 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->DevHandle = io_info.devHandle; /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); + if (instance->is_ventura) { + if (io_info.isRead) { + if ((raid->cpuAffinity.pdRead.cpu0) && + (raid->cpuAffinity.pdRead.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdRead.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + } else { + if ((raid->cpuAffinity.pdWrite.cpu0) + && (raid->cpuAffinity.pdWrite.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdWrite.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + if (praid_context->raid_context_g35.routing_flags.bits.sld) { + praid_context->raid_context_g35.raid_flags + = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } + } + } } else { - io_request->RaidContext.raid_context.timeoutValue = + io_request->RaidContext.raid_context.timeout_value = cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { if (io_info.do_fp_rlbypass || - (io_request->RaidContext.raid_context.regLockFlags + (io_request->RaidContext.raid_context.reg_lock_flags == REGION_TYPE_UNUSED)) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.raid_context.Type + io_request->RaidContext.raid_context.type = MPI2_TYPE_CUDA; - io_request->RaidContext.raid_context.regLockFlags |= + io_request->RaidContext.raid_context.reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.raid_context.nseg = 0x1; + } else if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; + io_request->RaidContext.raid_context_g35.nseg = 0x1; } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); + + if (instance->is_ventura) { + if (io_info.isRead) { + if ((raid->cpuAffinity.ldRead.cpu0) + && (raid->cpuAffinity.ldRead.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldRead.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + } else { + if ((raid->cpuAffinity.ldWrite.cpu0) && + (raid->cpuAffinity.ldWrite.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldWrite.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + + if (io_request->RaidContext.raid_context_g35.stream_detected + && (raid->level == 5) && + (raid->writeMode == MR_RL_WRITE_THROUGH_MODE)) { + if (praid_context->raid_context_g35.routing_flags.bits.cpu_sel + == MR_RAID_CTX_CPUSEL_FCFS) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + } + } + } } /* Not FP */ } @@ -2048,9 +2138,9 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; /* Check with FW team */ - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->regLockRowLBA = 0; - pRAID_Context->regLockLength = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; if (fusion->fast_path_io && ( device_id < instance->fw_supported_vd_count)) { @@ -2069,7 +2159,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16 (scmd->request->timeout / HZ); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << @@ -2077,9 +2167,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, } else { /* set RAID context values */ - pRAID_Context->configSeqNum = raid->seqNum; - pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; - pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); + pRAID_Context->config_seq_num = raid->seqNum; + if (!instance->is_ventura) + pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeout_value = + cpu_to_le16(raid->fpIoTimeoutForLd); /* get the DevHandle for the PD (since this is fpNonRWCapable, this is a single disk RAID0) */ @@ -2134,12 +2226,12 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, io_request = cmd->io_request; /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; - pRAID_Context->regLockFlags = 0; - pRAID_Context->regLockRowLBA = 0; - pRAID_Context->regLockLength = 0; + pRAID_Context->reg_lock_flags = 0; + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); io_request->LUN[1] = scmd->device->lun; - pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD + pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; /* If FW supports PD sequence number */ @@ -2148,24 +2240,27 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, /* TgtId must be incremented by 255 as jbod seq number is index * below raid map */ - pRAID_Context->VirtualDiskTgtId = + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); - pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum; + pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; io_request->DevHandle = pd_sync->seq[pd_index].devHandle; - pRAID_Context->regLockFlags |= + if (instance->is_ventura) + io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; + else + pRAID_Context->reg_lock_flags |= (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); - pRAID_Context->Type = MPI2_TYPE_CUDA; + pRAID_Context->type = MPI2_TYPE_CUDA; pRAID_Context->nseg = 0x1; } else if (fusion->fast_path_io) { - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->configSeqNum = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->config_seq_num = 0; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { /* Want to send all IO via FW path */ - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->configSeqNum = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->config_seq_num = 0; io_request->DevHandle = cpu_to_le16(0xFFFF); } @@ -2181,14 +2276,14 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); } else { /* system pd Fast Path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; timeout_limit = (scmd->device->type == TYPE_DISK) ? 255 : 0xFFFF; - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); if (fusion->adapter_type == INVADER_SERIES) @@ -2227,8 +2322,8 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->Control = 0; io_request->EEDPBlockSize = 0; io_request->ChainOffset = 0; - io_request->RaidContext.raid_context.RAIDFlags = 0; - io_request->RaidContext.raid_context.Type = 0; + io_request->RaidContext.raid_context.raid_flags = 0; + io_request->RaidContext.raid_context.type = 0; io_request->RaidContext.raid_context.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); @@ -2273,11 +2368,16 @@ megasas_build_io_fusion(struct megasas_instance *instance, return 1; } - /* numSGE store lower 8 bit of sge_count. - * numSGEExt store higher 8 bit of sge_count - */ - io_request->RaidContext.raid_context.numSGE = sge_count; - io_request->RaidContext.raid_context.numSGEExt = (u8)(sge_count >> 8); + if (instance->is_ventura) + io_request->RaidContext.raid_context_g35.num_sge = sge_count; + else { + /* numSGE store lower 8 bit of sge_count. + * numSGEExt store higher 8 bit of sge_count + */ + io_request->RaidContext.raid_context.num_sge = sge_count; + io_request->RaidContext.raid_context.num_sge_ext = + (u8)(sge_count >> 8); + } io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); @@ -2326,6 +2426,10 @@ void megasas_fpio_to_ldio(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd) { struct fusion_context *fusion; + union RAID_CONTEXT_UNION *praid_context; + struct MR_LD_RAID *raid; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + u32 device_id, ld; fusion = instance->ctrl_context; cmd->request_desc->SCSIIO.RequestFlags = @@ -2349,6 +2453,35 @@ void megasas_fpio_to_ldio(struct megasas_instance *instance, cmd->io_request->Control = 0; cmd->io_request->EEDPBlockSize = 0; cmd->is_raid_1_fp_write = 0; + + device_id = MEGASAS_DEV_INDEX(cmd->scmd); + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + raid = MR_LdRaidGet(ld, local_map_ptr); + praid_context = &cmd->io_request->RaidContext; + if (cmd->scmd->sc_data_direction == PCI_DMA_FROMDEVICE) { + if ((raid->cpuAffinity.ldRead.cpu0) + && (raid->cpuAffinity.ldRead.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldRead.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + } else { + if ((raid->cpuAffinity.ldWrite.cpu0) + && (raid->cpuAffinity.ldWrite.cpu1)) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldWrite.cpu1) + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_1; + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel + = MR_RAID_CTX_CPUSEL_0; + } } /* megasas_prepate_secondRaid1_IO @@ -2585,7 +2718,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) scmd_local = cmd_fusion->scmd; status = scsi_io_req->RaidContext.raid_context.status; - extStatus = scsi_io_req->RaidContext.raid_context.exStatus; + extStatus = scsi_io_req->RaidContext.raid_context.ex_status; sense = cmd_fusion->sense; data_length = scsi_io_req->DataLength; @@ -2653,13 +2786,13 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) status = r1_cmd->io_request->RaidContext.raid_context.status; extStatus = - r1_cmd->io_request->RaidContext.raid_context.exStatus; + r1_cmd->io_request->RaidContext.raid_context.ex_status; data_length = r1_cmd->io_request->DataLength; sense = r1_cmd->sense; } r1_cmd->io_request->RaidContext.raid_context.status = 0; - r1_cmd->io_request->RaidContext.raid_context.exStatus = 0; + r1_cmd->io_request->RaidContext.raid_context.ex_status = 0; cmd_fusion->is_raid_1_fp_write = 0; r1_cmd->is_raid_1_fp_write = 0; r1_cmd->cmd_completed = false; @@ -2669,10 +2802,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) if (!cmd_fusion->is_raid_1_fp_write) { map_cmd_status(fusion, scmd_local, status, extStatus, data_length, sense); - scsi_io_req->RaidContext.raid_context.status - = 0; - scsi_io_req->RaidContext.raid_context.exStatus - = 0; + scsi_io_req->RaidContext.raid_context.status = 0; + scsi_io_req->RaidContext.raid_context.ex_status = 0; megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 3a5af3bb9989..4adb0d490af0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -59,6 +59,8 @@ #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 +#define MR_RL_WRITE_THROUGH_MODE 0x00 +#define MR_RL_WRITE_BACK_MODE 0x01 /* T10 PI defines */ #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 @@ -81,6 +83,11 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4, + MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6, + MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7 }; /* @@ -109,29 +116,29 @@ enum MR_FUSION_ADAPTER_TYPE { struct RAID_CONTEXT { #if defined(__BIG_ENDIAN_BITFIELD) - u8 nseg:4; - u8 Type:4; + u8 nseg:4; + u8 type:4; #else - u8 Type:4; - u8 nseg:4; + u8 type:4; + u8 nseg:4; #endif - u8 resvd0; - __le16 timeoutValue; - u8 regLockFlags; - u8 resvd1; - __le16 VirtualDiskTgtId; - __le64 regLockRowLBA; - __le32 regLockLength; - __le16 nextLMId; - u8 exStatus; - u8 status; - u8 RAIDFlags; - u8 numSGE; - __le16 configSeqNum; - u8 spanArm; - u8 priority; - u8 numSGEExt; - u8 resvd2; + u8 resvd0; + __le16 timeout_value; + u8 reg_lock_flags; + u8 resvd1; + __le16 virtual_disk_tgt_id; + __le64 reg_lock_row_lba; + __le32 reg_lock_length; + __le16 next_lmid; + u8 ex_status; + u8 status; + u8 raid_flags; + u8 num_sge; + __le16 config_seq_num; + u8 span_arm; + u8 priority; + u8 num_sge_ext; + u8 resvd2; }; /* @@ -187,7 +194,7 @@ struct RAID_CONTEXT_G35 { } smid; u8 ex_status; /* 0x16 : OUT */ u8 status; /* 0x17 status */ - u8 RAIDFlags; /* 0x18 resvd[7:6], ioSubType[5:4], + u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4], * resvd[3:1], preferredCpu[0] */ u8 span_arm; /* 0x1C span[7:5], arm[4:0] */ @@ -672,14 +679,17 @@ struct MPI2_IOC_INIT_REQUEST { #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 #define MAX_LOGICAL_DRIVES_EXT 256 +#define MAX_LOGICAL_DRIVES_DYN 512 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) #define MAX_ARRAYS 128 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) #define MAX_ARRAYS_EXT 256 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT) +#define MAX_API_ARRAYS_DYN 512 #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) +#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ @@ -726,12 +736,56 @@ struct MR_SPAN_BLOCK_INFO { struct MR_SPAN_INFO block_span_info; }; +#define MR_RAID_CTX_CPUSEL_0 0 +#define MR_RAID_CTX_CPUSEL_1 1 +#define MR_RAID_CTX_CPUSEL_2 2 +#define MR_RAID_CTX_CPUSEL_3 3 +#define MR_RAID_CTX_CPUSEL_FCFS 0xF + +struct MR_CPU_AFFINITY_MASK { + union { + struct { +#ifndef MFI_BIG_ENDIAN + u8 hw_path:1; + u8 cpu0:1; + u8 cpu1:1; + u8 cpu2:1; + u8 cpu3:1; + u8 reserved:3; +#else + u8 reserved:3; + u8 cpu3:1; + u8 cpu2:1; + u8 cpu1:1; + u8 cpu0:1; + u8 hw_path:1; +#endif + }; + u8 core_mask; + }; +}; + +struct MR_IO_AFFINITY { + union { + struct { + struct MR_CPU_AFFINITY_MASK pdRead; + struct MR_CPU_AFFINITY_MASK pdWrite; + struct MR_CPU_AFFINITY_MASK ldRead; + struct MR_CPU_AFFINITY_MASK ldWrite; + }; + u32 word; + }; + u8 maxCores; /* Total cores + HW Path in ROC */ + u8 reserved[3]; +}; + struct MR_LD_RAID { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved4:3; - u32 fp_cache_bypass_capable:1; - u32 fp_rmw_capable:1; + u32 reserved4:2; + u32 fp_cache_bypass_capable:1; + u32 fp_rmw_capable:1; + u32 disable_coalescing:1; u32 fpBypassRegionLock:1; u32 tmCapable:1; u32 fpNonRWCapable:1; @@ -759,9 +813,10 @@ struct MR_LD_RAID { u32 fpNonRWCapable:1; u32 tmCapable:1; u32 fpBypassRegionLock:1; - u32 fp_rmw_capable:1; - u32 fp_cache_bypass_capable:1; - u32 reserved4:3; + u32 disable_coalescing:1; + u32 fp_rmw_capable:1; + u32 fp_cache_bypass_capable:1; + u32 reserved4:2; #endif } capability; __le32 reserved6; @@ -788,7 +843,36 @@ struct MR_LD_RAID { u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ - u8 reserved3[0x80-0x2D]; /* 0x2D */ + /* Ox2D This LD accept priority boost of this type */ + u8 ld_accept_priority_type; + u8 reserved2[2]; /* 0x2E - 0x2F */ + /* 0x30 - 0x33, Logical block size for the LD */ + u32 logical_block_length; + struct { +#ifndef MFI_BIG_ENDIAN + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + u32 reserved1:24; /* 0x34 */ +#else + u32 reserved1:24; /* 0x34 */ + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; +#endif + }; /* 0x34 - 0x37 */ + /* 0x38 - 0x3f, This will determine which + * core will process LD IO and PD IO. + */ + struct MR_IO_AFFINITY cpuAffinity; + /* Bit definiations are specified by MR_IO_AFFINITY */ + u8 reserved3[0x80-0x40]; /* 0x40 - 0x7f */ }; struct MR_LD_SPAN_MAP { @@ -846,6 +930,91 @@ struct MR_LD_TARGET_SYNC { __le16 seqNum; }; +/* + * RAID Map descriptor Types. + * Each element should uniquely idetify one data structure in the RAID map + */ +enum MR_RAID_MAP_DESC_TYPE { + /* MR_DEV_HANDLE_INFO data */ + RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0, + /* target to Ld num Index map */ + RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1, + /* MR_ARRAY_INFO data */ + RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2, + /* MR_LD_SPAN_MAP data */ + RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3, + RAID_MAP_DESC_TYPE_COUNT, +}; + +/* + * This table defines the offset, size and num elements of each descriptor + * type in the RAID Map buffer + */ +struct MR_RAID_MAP_DESC_TABLE { + /* Raid map descriptor type */ + u32 raid_map_desc_type; + /* Offset into the RAID map buffer where + * descriptor data is saved + */ + u32 raid_map_desc_offset; + /* total size of the + * descriptor buffer + */ + u32 raid_map_desc_buffer_size; + /* Number of elements contained in the + * descriptor buffer + */ + u32 raid_map_desc_elements; +}; + +/* + * Dynamic Raid Map Structure. + */ +struct MR_FW_RAID_MAP_DYNAMIC { + u32 raid_map_size; /* total size of RAID Map structure */ + u32 desc_table_offset;/* Offset of desc table into RAID map*/ + u32 desc_table_size; /* Total Size of desc table */ + /* Total Number of elements in the desc table */ + u32 desc_table_num_elements; + u64 reserved1; + u32 reserved2[3]; /*future use */ + /* timeout value used by driver in FP IOs */ + u8 fp_pd_io_timeout_sec; + u8 reserved3[3]; + /* when this seqNum increments, driver needs to + * release RMW buffers asap + */ + u32 rmw_fp_seq_num; + u16 ld_count; /* count of lds. */ + u16 ar_count; /* count of arrays */ + u16 span_count; /* count of spans */ + u16 reserved4[3]; +/* + * The below structure of pointers is only to be used by the driver. + * This is added in the ,API to reduce the amount of code changes + * needed in the driver to support dynamic RAID map Firmware should + * not update these pointers while preparing the raid map + */ + union { + struct { + struct MR_DEV_HANDLE_INFO *dev_hndl_info; + u16 *ld_tgt_id_to_ld; + struct MR_ARRAY_INFO *ar_map_info; + struct MR_LD_SPAN_MAP *ld_span_map; + }; + u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT]; + }; +/* + * RAID Map descriptor table defines the layout of data in the RAID Map. + * The size of the descriptor table itself could change. + */ + /* Variable Size descriptor Table. */ + struct MR_RAID_MAP_DESC_TABLE + raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT]; + /* Variable Size buffer containing all data */ + u32 raid_map_desc_data[1]; +}; /* Dynamicaly sized RAID MAp structure */ + #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) @@ -955,9 +1124,10 @@ struct MR_DRV_RAID_MAP { __le16 spanCount; __le16 reserve3; - struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; - u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; - struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + struct MR_DEV_HANDLE_INFO + devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN]; + u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN]; struct MR_LD_SPAN_MAP ldSpanMap[1]; }; @@ -969,7 +1139,7 @@ struct MR_DRV_RAID_MAP { struct MR_DRV_RAID_MAP_ALL { struct MR_DRV_RAID_MAP raidMap; - struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1]; } __packed; @@ -1088,7 +1258,7 @@ struct fusion_context { u8 chain_offset_io_request; u8 chain_offset_mfi_pthru; - struct MR_FW_RAID_MAP_ALL *ld_map[2]; + struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2]; dma_addr_t ld_map_phys[2]; /*Non dma-able memory. Driver local copy.*/ @@ -1096,6 +1266,8 @@ struct fusion_context { u32 max_map_sz; u32 current_map_sz; + u32 old_map_sz; + u32 new_map_sz; u32 drv_map_sz; u32 drv_map_pages; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT]; -- cgit v1.2.3-59-g8ed1b From 9581ebebbe351d99579e8701e238c2771ccdae93 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:49 -0500 Subject: scsi: megaraid_sas: Add the Support for SAS3.5 Generic Megaraid Controllers Capabilities The Megaraid driver has to support the SAS3.5 Generic Megaraid Controllers Firmware functionality. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 53 ++++++++++++++--------------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 19 ++++++----- drivers/scsi/megaraid/megaraid_sas_fusion.h | 1 + 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index fd6ddde43d03..19bc3501f28f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5042,34 +5042,29 @@ static int megasas_init_fw(struct megasas_instance *instance) reg_set = instance->reg_set; - switch (instance->pdev->device) { - case PCI_DEVICE_ID_LSI_FUSION: - case PCI_DEVICE_ID_LSI_PLASMA: - case PCI_DEVICE_ID_LSI_INVADER: - case PCI_DEVICE_ID_LSI_FURY: - case PCI_DEVICE_ID_LSI_INTRUDER: - case PCI_DEVICE_ID_LSI_INTRUDER_24: - case PCI_DEVICE_ID_LSI_CUTLASS_52: - case PCI_DEVICE_ID_LSI_CUTLASS_53: + if (fusion) instance->instancet = &megasas_instance_template_fusion; - break; - case PCI_DEVICE_ID_LSI_SAS1078R: - case PCI_DEVICE_ID_LSI_SAS1078DE: - instance->instancet = &megasas_instance_template_ppc; - break; - case PCI_DEVICE_ID_LSI_SAS1078GEN2: - case PCI_DEVICE_ID_LSI_SAS0079GEN2: - instance->instancet = &megasas_instance_template_gen2; - break; - case PCI_DEVICE_ID_LSI_SAS0073SKINNY: - case PCI_DEVICE_ID_LSI_SAS0071SKINNY: - instance->instancet = &megasas_instance_template_skinny; - break; - case PCI_DEVICE_ID_LSI_SAS1064R: - case PCI_DEVICE_ID_DELL_PERC5: - default: - instance->instancet = &megasas_instance_template_xscale; - break; + else { + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_SAS1078R: + case PCI_DEVICE_ID_LSI_SAS1078DE: + instance->instancet = &megasas_instance_template_ppc; + break; + case PCI_DEVICE_ID_LSI_SAS1078GEN2: + case PCI_DEVICE_ID_LSI_SAS0079GEN2: + instance->instancet = &megasas_instance_template_gen2; + break; + case PCI_DEVICE_ID_LSI_SAS0073SKINNY: + case PCI_DEVICE_ID_LSI_SAS0071SKINNY: + instance->instancet = &megasas_instance_template_skinny; + break; + case PCI_DEVICE_ID_LSI_SAS1064R: + case PCI_DEVICE_ID_DELL_PERC5: + default: + instance->instancet = &megasas_instance_template_xscale; + instance->pd_list_not_supported = 1; + break; + } } if (megasas_transition_to_ready(instance, 0)) { @@ -5819,7 +5814,9 @@ static int megasas_probe_one(struct pci_dev *pdev, if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) fusion->adapter_type = THUNDERBOLT_SERIES; - else if (!instance->is_ventura) + else if (instance->is_ventura) + fusion->adapter_type = VENTURA_SERIES; + else fusion->adapter_type = INVADER_SERIES; } break; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 29e883fa3ead..ac424ba59ceb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -244,7 +244,10 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c reg_set = instance->reg_set; - cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; + /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */ + if (!instance->is_ventura) + cur_max_fw_cmds = + readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; if (dual_qdepth_disable || !cur_max_fw_cmds) cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; @@ -837,7 +840,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); /* driver support Extended MSIX */ - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) drv_ops->mfi_capabilities.support_additional_msix = 1; /* driver supports HA / Remote LUN over Fast Path interface */ drv_ops->mfi_capabilities.support_fp_remote_lun = 1; @@ -1491,7 +1494,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, fusion = instance->ctrl_context; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; @@ -1508,7 +1511,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; sgl_ptr++; @@ -1519,7 +1522,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { if ((le16_to_cpu(cmd->io_request->IoFlags) & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) @@ -1535,7 +1538,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = @@ -2286,7 +2289,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->timeout_value = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); @@ -2998,7 +3001,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, io_req = cmd->io_request; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 4adb0d490af0..a9bc9c0a1cb9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -107,6 +107,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { enum MR_FUSION_ADAPTER_TYPE { THUNDERBOLT_SERIES = 0, INVADER_SERIES = 1, + VENTURA_SERIES = 2, }; /* -- cgit v1.2.3-59-g8ed1b From 3e5eadb1a881bea2e3fa41f5ae7cdbfa36222d37 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:50 -0500 Subject: scsi: megaraid_sas: Enable or Disable Fast path based on the PCI Threshold Bandwidth Large SEQ IO workload should sent as non fast path commands Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 8 +++++ drivers/scsi/megaraid/megaraid_sas_base.c | 48 +++++++++++++++++++++++++++++ drivers/scsi/megaraid/megaraid_sas_fp.c | 7 +++++ drivers/scsi/megaraid/megaraid_sas_fusion.c | 16 ++++++---- drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 5 files changed, 74 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 6ddf994a25b7..0696903e28fd 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT { #define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 #define MFI_1068_FW_READY 0xDDDD0000 +#define MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL HZ + #define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F #define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 @@ -2101,6 +2103,10 @@ struct megasas_instance { atomic_t ldio_outstanding; atomic_t fw_reset_no_pci_access; + atomic64_t bytes_wrote; /* used for raid1 fast path enable or disable */ + atomic_t r1_write_fp_capable; + + struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; struct work_struct work_init; @@ -2142,6 +2148,7 @@ struct megasas_instance { long reset_flags; struct mutex reset_mutex; struct timer_list sriov_heartbeat_timer; + struct timer_list r1_fp_hold_timer; char skip_heartbeat_timer_del; u8 requestorId; char PlasmaFW111; @@ -2158,6 +2165,7 @@ struct megasas_instance { bool is_ventura; bool msix_combined; u16 max_raid_mapsize; + u64 pci_threshold_bandwidth; /* used to control the fp writes */ }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 19bc3501f28f..eba107898c7f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1940,6 +1940,9 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) } /* Complete outstanding ioctls when adapter is killed */ megasas_complete_outstanding_ioctls(instance); + if (instance->is_ventura) + del_timer_sync(&instance->r1_fp_hold_timer); + } /** @@ -2438,6 +2441,24 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr) } } +/*Handler for disabling/enabling raid 1 fast paths*/ +void megasas_change_r1_fp_status(unsigned long instance_addr) +{ + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + if (atomic64_read(&instance->bytes_wrote) >= + instance->pci_threshold_bandwidth) { + + atomic64_set(&instance->bytes_wrote, 0); + atomic_set(&instance->r1_write_fp_capable, 0); + } else { + atomic64_set(&instance->bytes_wrote, 0); + atomic_set(&instance->r1_write_fp_capable, 1); + } + mod_timer(&instance->r1_fp_hold_timer, + jiffies + MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); +} + /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state @@ -5362,6 +5383,17 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->skip_heartbeat_timer_del = 1; } + if (instance->is_ventura) { + atomic64_set(&instance->bytes_wrote, 0); + atomic_set(&instance->r1_write_fp_capable, 1); + megasas_start_timer(instance, + &instance->r1_fp_hold_timer, + megasas_change_r1_fp_status, + MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); + dev_info(&instance->pdev->dev, "starting the raid 1 fp timer with interval %d\n", + MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); + } + return 0; fail_get_ld_pd_list: @@ -6152,6 +6184,9 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); + if (instance->is_ventura) + del_timer_sync(&instance->r1_fp_hold_timer); + megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); @@ -6278,6 +6313,16 @@ megasas_resume(struct pci_dev *pdev) megasas_setup_jbod_map(instance); instance->unload = 0; + if (instance->is_ventura) { + atomic64_set(&instance->bytes_wrote, 0); + atomic_set(&instance->r1_write_fp_capable, 1); + megasas_start_timer(instance, + &instance->r1_fp_hold_timer, + megasas_change_r1_fp_status, + MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); + } + + /* * Initiate AEN (Asynchronous Event Notification) */ @@ -6366,6 +6411,9 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); + if (instance->is_ventura) + del_timer_sync(&instance->r1_fp_hold_timer); + if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index f1384b01b3d3..322a72b593e3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -197,6 +197,9 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) if (instance->max_raid_mapsize) { fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; + if (fw_map_dyn->pci_threshold_bandwidth) + instance->pci_threshold_bandwidth = + le64_to_cpu(fw_map_dyn->pci_threshold_bandwidth); #if VD_EXT_DEBUG dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n", le32_to_cpu(fw_map_dyn->raid_map_size), @@ -204,6 +207,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n", le32_to_cpu(fw_map_dyn->desc_table_size), le32_to_cpu(fw_map_dyn->desc_table_num_elements)); + dev_dbg(&instance->pdev->dev, "PCIThreasholdBandwidth %llu\n", + instance->pci_threshold_bandwidth); dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n", drv_map, fw_map_dyn->ld_count); #endif @@ -434,6 +439,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } + if (instance->is_ventura && !instance->pci_threshold_bandwidth) + instance->pci_threshold_bandwidth = ULLONG_MAX; } /* diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ac424ba59ceb..4d655e456709 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -95,6 +95,7 @@ extern unsigned int resetwaittime; extern unsigned int dual_qdepth_disable; static void megasas_free_rdpq_fusion(struct megasas_instance *instance); static void megasas_free_reply_fusion(struct megasas_instance *instance); +void megasas_change_r1_fp_status(unsigned long instance_addr); @@ -2628,8 +2629,9 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, * to get new command */ if (cmd->is_raid_1_fp_write && - atomic_inc_return(&instance->fw_outstanding) > - (instance->host->can_queue)) { + (atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue) || + (!atomic_read(&instance->r1_write_fp_capable)))) { megasas_fpio_to_ldio(instance, cmd, cmd->scmd); atomic_dec(&instance->fw_outstanding); } else if (cmd->is_raid_1_fp_write) { @@ -2638,17 +2640,19 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); } - /* * Issue the command to the FW */ + if (scmd->sc_data_direction == PCI_DMA_TODEVICE && instance->is_ventura) + atomic64_add(scsi_bufflen(scmd), &instance->bytes_wrote); megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); - if (r1_cmd) + if (r1_cmd) { + atomic64_add(scsi_bufflen(scmd), &instance->bytes_wrote); megasas_fire_cmd_fusion(instance, r1_cmd->request_desc, - instance->is_ventura); - + instance->is_ventura); + } return 0; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index a9bc9c0a1cb9..391aae6e27a4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -977,7 +977,7 @@ struct MR_FW_RAID_MAP_DYNAMIC { u32 desc_table_size; /* Total Size of desc table */ /* Total Number of elements in the desc table */ u32 desc_table_num_elements; - u64 reserved1; + u64 pci_threshold_bandwidth; u32 reserved2[3]; /*future use */ /* timeout value used by driver in FP IOs */ u8 fp_pd_io_timeout_sec; -- cgit v1.2.3-59-g8ed1b From b71b49c209facf8fec3778142ae5e45bb6ca4afc Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:51 -0500 Subject: scsi: megaraid_sas: ldio_outstanding variable is not decremented in completion path ldio outstanding variable needs to be decremented in io completion path for iMR dual queue depth Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 4d655e456709..705102f5b3a2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2580,7 +2580,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, if (atomic_inc_return(&instance->fw_outstanding) > instance->host->can_queue) { - dev_err(&instance->pdev->dev, "Throttle IOs beyond Controller queue depth\n"); atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } @@ -2811,6 +2810,9 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) extStatus, data_length, sense); scsi_io_req->RaidContext.raid_context.status = 0; scsi_io_req->RaidContext.raid_context.ex_status = 0; + if (instance->ldio_threshold + && megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); @@ -3959,7 +3961,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) scmd_local->result = megasas_check_mpio_paths(instance, scmd_local); - if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); -- cgit v1.2.3-59-g8ed1b From ede7c3ce82dc4001bbab33dddebab8c089f309e0 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:52 -0500 Subject: scsi: megaraid_sas: Implement the PD Map support for SAS3.5 Generic Megaraid Controllers Update Linux driver to use new pdTargetId field for JBOD target ID Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 105 +++++++++++++++++++++------- drivers/scsi/megaraid/megaraid_sas_base.c | 3 + drivers/scsi/megaraid/megaraid_sas_fusion.c | 6 ++ drivers/scsi/megaraid/megaraid_sas_fusion.h | 3 +- 4 files changed, 89 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0696903e28fd..21c48e1aa52d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1317,7 +1317,55 @@ struct megasas_ctrl_info { #endif } adapterOperations3; - u8 pad[0x800-0x7EC]; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + /* Indicates whether the CPLD image is part of + * the package and stored in flash + */ + u8 cpld_in_flash:1; +#else + u8 cpld_in_flash:1; + u8 reserved:7; +#endif + u8 reserved1[3]; + /* Null terminated string. Has the version + * information if cpld_in_flash = FALSE + */ + u8 userCodeDefinition[12]; + } cpld; /* Valid only if upgradableCPLD is TRUE */ + + struct { + #if defined(__BIG_ENDIAN_BITFIELD) + u16 reserved:8; + u16 fw_swaps_bbu_vpd_info:1; + u16 support_pd_map_target_id:1; + u16 support_ses_ctrl_in_multipathcfg:1; + u16 image_upload_supported:1; + u16 support_encrypted_mfc:1; + u16 supported_enc_algo:1; + u16 support_ibutton_less:1; + u16 ctrl_info_ext_supported:1; + #else + + u16 ctrl_info_ext_supported:1; + u16 support_ibutton_less:1; + u16 supported_enc_algo:1; + u16 support_encrypted_mfc:1; + u16 image_upload_supported:1; + /* FW supports LUN based association and target port based */ + u16 support_ses_ctrl_in_multipathcfg:1; + /* association for the SES device connected in multipath mode */ + /* FW defines Jbod target Id within MR_PD_CFG_SEQ */ + u16 support_pd_map_target_id:1; + /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to + * provide the data in little endian order + */ + u16 fw_swaps_bbu_vpd_info:1; + u16 reserved:8; + #endif + } adapter_operations4; + u8 pad[0x800-0x7FE]; /* 0x7FE pad to 2K for expansion */ } __packed; /* @@ -1557,33 +1605,35 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:20; - u32 support_qd_throttling:1; - u32 support_fp_rlbypass:1; - u32 support_vfid_in_ioframe:1; - u32 support_ext_io_size:1; - u32 support_ext_queue_depth:1; - u32 security_protocol_cmds_fw:1; - u32 support_core_affinity:1; - u32 support_ndrive_r1_lb:1; - u32 support_max_255lds:1; - u32 support_fastpath_wb:1; - u32 support_additional_msix:1; - u32 support_fp_remote_lun:1; + u32 reserved:19; + u32 support_pd_map_target_id:1; + u32 support_qd_throttling:1; + u32 support_fp_rlbypass:1; + u32 support_vfid_in_ioframe:1; + u32 support_ext_io_size:1; + u32 support_ext_queue_depth:1; + u32 security_protocol_cmds_fw:1; + u32 support_core_affinity:1; + u32 support_ndrive_r1_lb:1; + u32 support_max_255lds:1; + u32 support_fastpath_wb:1; + u32 support_additional_msix:1; + u32 support_fp_remote_lun:1; #else - u32 support_fp_remote_lun:1; - u32 support_additional_msix:1; - u32 support_fastpath_wb:1; - u32 support_max_255lds:1; - u32 support_ndrive_r1_lb:1; - u32 support_core_affinity:1; - u32 security_protocol_cmds_fw:1; - u32 support_ext_queue_depth:1; - u32 support_ext_io_size:1; - u32 support_vfid_in_ioframe:1; - u32 support_fp_rlbypass:1; - u32 support_qd_throttling:1; - u32 reserved:20; + u32 support_fp_remote_lun:1; + u32 support_additional_msix:1; + u32 support_fastpath_wb:1; + u32 support_max_255lds:1; + u32 support_ndrive_r1_lb:1; + u32 support_core_affinity:1; + u32 security_protocol_cmds_fw:1; + u32 support_ext_queue_depth:1; + u32 support_ext_io_size:1; + u32 support_vfid_in_ioframe:1; + u32 support_fp_rlbypass:1; + u32 support_qd_throttling:1; + u32 support_pd_map_target_id:1; + u32 reserved:19; #endif } mfi_capabilities; __le32 reg; @@ -2052,6 +2102,7 @@ struct megasas_instance { u32 crash_dump_drv_support; u32 crash_dump_app_support; u32 secure_jbod_support; + u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ bool use_seqnum_jbod_fp; /* Added for PD sequence */ spinlock_t crashdump_lock; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index eba107898c7f..70891a76ca86 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4576,6 +4576,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance) le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + le16_to_cpus((u16 *)&ctrl_info->adapter_operations4); /* Update the latest Ext VD info. * From Init path, store current firmware details. @@ -4585,6 +4586,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance) megasas_update_ext_vd_details(instance); instance->use_seqnum_jbod_fp = ctrl_info->adapterOperations3.useSeqNumJbodFP; + instance->support_morethan256jbod = + ctrl_info->adapter_operations4.support_pd_map_target_id; /*Check whether controller is iMR or MR */ instance->is_imr = (ctrl_info->memory_size ? 0 : 1); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 705102f5b3a2..9a9c84fb91b1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -858,6 +858,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops->mfi_capabilities.support_ext_queue_depth = 1; drv_ops->mfi_capabilities.support_qd_throttling = 1; + drv_ops->mfi_capabilities.support_pd_map_target_id = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); @@ -2244,6 +2245,11 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, /* TgtId must be incremented by 255 as jbod seq number is index * below raid map */ + /* More than 256 PD/JBOD support for Ventura */ + if (instance->support_morethan256jbod) + pRAID_Context->virtual_disk_tgt_id = + pd_sync->seq[pd_index].pd_target_id; + else pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 391aae6e27a4..3addd0cb2e99 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -1189,7 +1189,8 @@ struct MR_PD_CFG_SEQ { u8 reserved:7; #endif } capability; - u8 reserved[3]; + u8 reserved; + u16 pd_target_id; } __packed; struct MR_PD_CFG_SEQ_NUM_SYNC { -- cgit v1.2.3-59-g8ed1b From 223e4b93e61f7538681632bfb19edd4f27a0c319 Mon Sep 17 00:00:00 2001 From: Sasikumar Chandrasekaran Date: Tue, 10 Jan 2017 18:20:53 -0500 Subject: scsi: megaraid_sas: driver version upgrade Upgrade driver version. Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 21c48e1aa52d..ba9fbb71eb35 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.812.07.00-rc1" -#define MEGASAS_RELDATE "August 22, 2016" +#define MEGASAS_VERSION "07.700.00.00-rc1" +#define MEGASAS_RELDATE "November 29, 2016" /* * Device IDs -- cgit v1.2.3-59-g8ed1b From 2e48e3491189c40dc9ea9d4a53412d2b66c87555 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 18 Nov 2016 07:02:38 +0100 Subject: scsi: vmw_pvscsi: switch to pci_alloc_irq_vectors And simplify the interrupt handler by splitting the INTx case that needs to deal with shared interrupts into a separate helper. [mkp: typo fixage] Signed-off-by: Christoph Hellwig Acked-by: Jim Gill Signed-off-by: Martin K. Petersen --- drivers/scsi/vmw_pvscsi.c | 104 +++++++++++++++++----------------------------- drivers/scsi/vmw_pvscsi.h | 5 --- 2 files changed, 38 insertions(+), 71 deletions(-) diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 15ca09cd16f3..ef474a748744 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -68,10 +68,7 @@ struct pvscsi_ctx { struct pvscsi_adapter { char *mmioBase; - unsigned int irq; u8 rev; - bool use_msi; - bool use_msix; bool use_msg; bool use_req_threshold; @@ -1161,30 +1158,26 @@ static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, static irqreturn_t pvscsi_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; - int handled; - - if (adapter->use_msi || adapter->use_msix) - handled = true; - else { - u32 val = pvscsi_read_intr_status(adapter); - handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; - if (handled) - pvscsi_write_intr_status(devp, val); - } - - if (handled) { - unsigned long flags; + unsigned long flags; - spin_lock_irqsave(&adapter->hw_lock, flags); + spin_lock_irqsave(&adapter->hw_lock, flags); + pvscsi_process_completion_ring(adapter); + if (adapter->use_msg && pvscsi_msg_pending(adapter)) + queue_work(adapter->workqueue, &adapter->work); + spin_unlock_irqrestore(&adapter->hw_lock, flags); - pvscsi_process_completion_ring(adapter); - if (adapter->use_msg && pvscsi_msg_pending(adapter)) - queue_work(adapter->workqueue, &adapter->work); + return IRQ_HANDLED; +} - spin_unlock_irqrestore(&adapter->hw_lock, flags); - } +static irqreturn_t pvscsi_shared_isr(int irq, void *devp) +{ + struct pvscsi_adapter *adapter = devp; + u32 val = pvscsi_read_intr_status(adapter); - return IRQ_RETVAL(handled); + if (!(val & PVSCSI_INTR_ALL_SUPPORTED)) + return IRQ_NONE; + pvscsi_write_intr_status(devp, val); + return pvscsi_isr(irq, devp); } static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) @@ -1196,34 +1189,10 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); } -static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, - unsigned int *irq) -{ - struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; - int ret; - - ret = pci_enable_msix_exact(adapter->dev, &entry, 1); - if (ret) - return ret; - - *irq = entry.vector; - - return 0; -} - static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) { - if (adapter->irq) { - free_irq(adapter->irq, adapter); - adapter->irq = 0; - } - if (adapter->use_msi) { - pci_disable_msi(adapter->dev); - adapter->use_msi = 0; - } else if (adapter->use_msix) { - pci_disable_msix(adapter->dev); - adapter->use_msix = 0; - } + free_irq(pci_irq_vector(adapter->dev, 0), adapter); + pci_free_irq_vectors(adapter->dev); } static void pvscsi_release_resources(struct pvscsi_adapter *adapter) @@ -1359,11 +1328,11 @@ exit: static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY; struct pvscsi_adapter *adapter; struct pvscsi_adapter adapter_temp; struct Scsi_Host *host = NULL; unsigned int i; - unsigned long flags = 0; int error; u32 max_id; @@ -1512,30 +1481,33 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_reset_adapter; } - if (!pvscsi_disable_msix && - pvscsi_setup_msix(adapter, &adapter->irq) == 0) { - printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); - adapter->use_msix = 1; - } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { - printk(KERN_INFO "vmw_pvscsi: using MSI\n"); - adapter->use_msi = 1; - adapter->irq = pdev->irq; - } else { - printk(KERN_INFO "vmw_pvscsi: using INTx\n"); - adapter->irq = pdev->irq; - flags = IRQF_SHARED; - } + if (pvscsi_disable_msix) + irq_flag &= ~PCI_IRQ_MSIX; + if (pvscsi_disable_msi) + irq_flag &= ~PCI_IRQ_MSI; + + error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); + if (error) + goto out_reset_adapter; adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", adapter->use_req_threshold ? "en" : "dis"); - error = request_irq(adapter->irq, pvscsi_isr, flags, - "vmw_pvscsi", adapter); + if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { + printk(KERN_INFO "vmw_pvscsi: using MSI%s\n", + adapter->dev->msix_enabled ? "-X" : ""); + error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr, + 0, "vmw_pvscsi", adapter); + } else { + printk(KERN_INFO "vmw_pvscsi: using INTx\n"); + error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr, + IRQF_SHARED, "vmw_pvscsi", adapter); + } + if (error) { printk(KERN_ERR "vmw_pvscsi: unable to request IRQ: %d\n", error); - adapter->irq = 0; goto out_reset_adapter; } diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index d41292ef85f2..75966d3f326e 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -422,11 +422,6 @@ struct PVSCSIConfigPageController { */ #define PVSCSI_MAX_INTRS 24 -/* - * Enumeration of supported MSI-X vectors - */ -#define PVSCSI_VECTOR_COMPLETION 0 - /* * Misc constants for the rings. */ -- cgit v1.2.3-59-g8ed1b From 73eba2be9203c0eef37df6939ebba4d34227f284 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Tue, 10 Jan 2017 16:48:25 -0800 Subject: scsi: ufs: fix arguments order some trace calls Colin Ian King reported that with commit 7ff5ab473633 ("scsi: ufs: add tracing support") static analysis is reporting that we may have swapped arguments on calls to: trace_ufshcd_runtime_resume, trace_ufshcd_runtime_suspend, trace_ufshcd_system_suspend, trace_ufshcd_system_resume, and trace_ufshcd_init Where: hba->uic_link_state is passed to dev_state hba->curr_dev_pwr_mode is passed to link_state This wasn't intentional so it's a bug. This change fixed this bug. Reported-by: Colin Ian King Signed-off-by: Subhash Jadavani Acked-by: Colin Ian King Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index dfad19ff166c..a70bf066f277 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5803,7 +5803,7 @@ out: trace_ufshcd_init(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), - hba->uic_link_state, hba->curr_dev_pwr_mode); + hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; } @@ -6817,7 +6817,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba) out: trace_ufshcd_system_suspend(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), - hba->uic_link_state, hba->curr_dev_pwr_mode); + hba->curr_dev_pwr_mode, hba->uic_link_state); if (!ret) hba->is_sys_suspended = true; return ret; @@ -6850,7 +6850,7 @@ int ufshcd_system_resume(struct ufs_hba *hba) out: trace_ufshcd_system_resume(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), - hba->uic_link_state, hba->curr_dev_pwr_mode); + hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; } EXPORT_SYMBOL(ufshcd_system_resume); @@ -6878,7 +6878,7 @@ int ufshcd_runtime_suspend(struct ufs_hba *hba) out: trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), - hba->uic_link_state, hba->curr_dev_pwr_mode); + hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; } EXPORT_SYMBOL(ufshcd_runtime_suspend); @@ -6919,7 +6919,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba) out: trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), - hba->uic_link_state, hba->curr_dev_pwr_mode); + hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; } EXPORT_SYMBOL(ufshcd_runtime_resume); -- cgit v1.2.3-59-g8ed1b From 9c7d1ee5f13a7130f6d3df307ec010e9e003fa98 Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 11 Jan 2017 19:19:08 -0600 Subject: scsi: cxlflash: Refactor context reset to share reset logic As staging for supporting hardware with different context reset registers but a similar reset procedure, refactor the existing context reset routine to move the reset logic to a common routine. This will allow hardware with a different reset register to leverage existing code. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/main.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index b17ebf6d0a7e..a990efb27197 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -188,10 +188,11 @@ static void cmd_complete(struct afu_cmd *cmd) } /** - * context_reset_ioarrin() - reset command owner context via IOARRIN register + * context_reset() - reset command owner context via specified register * @cmd: AFU command that timed out. + * @reset_reg: MMIO register to perform reset. */ -static void context_reset_ioarrin(struct afu_cmd *cmd) +static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) { int nretry = 0; u64 rrin = 0x1; @@ -201,9 +202,9 @@ static void context_reset_ioarrin(struct afu_cmd *cmd) pr_debug("%s: cmd=%p\n", __func__, cmd); - writeq_be(rrin, &afu->host_map->ioarrin); + writeq_be(rrin, reset_reg); do { - rrin = readq_be(&afu->host_map->ioarrin); + rrin = readq_be(reset_reg); if (rrin != 0x1) break; /* Double delay each time */ @@ -214,6 +215,17 @@ static void context_reset_ioarrin(struct afu_cmd *cmd) __func__, rrin, nretry); } +/** + * context_reset_ioarrin() - reset command owner context via IOARRIN register + * @cmd: AFU command that timed out. + */ +static void context_reset_ioarrin(struct afu_cmd *cmd) +{ + struct afu *afu = cmd->parent; + + context_reset(cmd, &afu->host_map->ioarrin); +} + /** * send_cmd_ioarrin() - sends an AFU command via IOARRIN register * @afu: AFU associated with the host. -- cgit v1.2.3-59-g8ed1b From 696d0b0c715360ce28fedd3c8b009d3771a5ddeb Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 11 Jan 2017 19:19:33 -0600 Subject: scsi: cxlflash: Support SQ Command Mode The SISLite specification outlines a new queuing model to improve over the MMIO-based IOARRIN model that exists today. This new model uses a submission queue that exists in host memory and is shared with the device. Each entry in the queue is an IOARCB that describes a transfer request. When requests are submitted, IOARCBs ('current' position tracked in host software) are populated and the submission queue tail pointer is then updated via MMIO to make the device aware of the requests. Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 30 +++++++++++- drivers/scsi/cxlflash/main.c | 98 ++++++++++++++++++++++++++++++++++++-- drivers/scsi/cxlflash/sislite.h | 19 +++++++- drivers/scsi/cxlflash/superpipe.c | 18 +++++-- include/uapi/scsi/cxlflash_ioctl.h | 1 + 5 files changed, 153 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 0e9de5d62da2..dee865735ac0 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -54,6 +54,9 @@ extern const struct file_operations cxlflash_cxl_fops; /* RRQ for master issued cmds */ #define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS +/* SQ for master issued cmds */ +#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS + static inline void check_sizes(void) { @@ -155,8 +158,8 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) struct afu { /* Stuff requiring alignment go first. */ - - u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ + struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ + u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ /* Beware of alignment till here. Preferably introduce new * fields after this point @@ -174,6 +177,12 @@ struct afu { struct kref mapcount; ctx_hndl_t ctx_hndl; /* master's context handle */ + + atomic_t hsq_credits; + spinlock_t hsq_slock; + struct sisl_ioarcb *hsq_start; + struct sisl_ioarcb *hsq_end; + struct sisl_ioarcb *hsq_curr; u64 *hrrq_start; u64 *hrrq_end; u64 *hrrq_curr; @@ -191,6 +200,23 @@ struct afu { }; +static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode) +{ + u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; + + return afu_cap & cmd_mode; +} + +static inline bool afu_is_sq_cmd_mode(struct afu *afu) +{ + return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE); +} + +static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu) +{ + return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE); +} + static inline u64 lun_to_lunid(u64 lun) { __be64 lun_id; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index a990efb27197..d2bac4b7b85f 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -226,6 +226,17 @@ static void context_reset_ioarrin(struct afu_cmd *cmd) context_reset(cmd, &afu->host_map->ioarrin); } +/** + * context_reset_sq() - reset command owner context w/ SQ Context Reset register + * @cmd: AFU command that timed out. + */ +static void context_reset_sq(struct afu_cmd *cmd) +{ + struct afu *afu = cmd->parent; + + context_reset(cmd, &afu->host_map->sq_ctx_reset); +} + /** * send_cmd_ioarrin() - sends an AFU command via IOARRIN register * @afu: AFU associated with the host. @@ -268,6 +279,49 @@ out: return rc; } +/** + * send_cmd_sq() - sends an AFU command via SQ ring + * @afu: AFU associated with the host. + * @cmd: AFU command to send. + * + * Return: + * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure + */ +static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) +{ + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + int rc = 0; + int newval; + ulong lock_flags; + + newval = atomic_dec_if_positive(&afu->hsq_credits); + if (newval <= 0) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + cmd->rcb.ioasa = &cmd->sa; + + spin_lock_irqsave(&afu->hsq_slock, lock_flags); + + *afu->hsq_curr = cmd->rcb; + if (afu->hsq_curr < afu->hsq_end) + afu->hsq_curr++; + else + afu->hsq_curr = afu->hsq_start; + writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail); + + spin_unlock_irqrestore(&afu->hsq_slock, lock_flags); +out: + dev_dbg(dev, "%s: cmd=%p len=%d ea=%p ioasa=%p rc=%d curr=%p " + "head=%016llX tail=%016llX\n", __func__, cmd, cmd->rcb.data_len, + (void *)cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, + readq_be(&afu->host_map->sq_head), + readq_be(&afu->host_map->sq_tail)); + return rc; +} + /** * wait_resp() - polls for a response or timeout to a sent AFU command * @afu: AFU associated with the host. @@ -739,7 +793,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg) int rc = 0; struct device *dev = &cfg->dev->dev; - /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ + /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(sizeof(struct afu))); if (unlikely(!cfg->afu)) { @@ -1127,6 +1181,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) { struct afu *afu = (struct afu *)data; struct afu_cmd *cmd; + struct sisl_ioasa *ioasa; + struct sisl_ioarcb *ioarcb; bool toggle = afu->toggle; u64 entry, *hrrq_start = afu->hrrq_start, @@ -1140,7 +1196,16 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) break; - cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); + entry &= ~SISL_RESP_HANDLE_T_BIT; + + if (afu_is_sq_cmd_mode(afu)) { + ioasa = (struct sisl_ioasa *)entry; + cmd = container_of(ioasa, struct afu_cmd, sa); + } else { + ioarcb = (struct sisl_ioarcb *)entry; + cmd = container_of(ioarcb, struct afu_cmd, rcb); + } + cmd_complete(cmd); /* Advance to next entry or wrap and flip the toggle bit */ @@ -1150,6 +1215,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) hrrq_curr = hrrq_start; toggle ^= SISL_RESP_HANDLE_T_BIT; } + + atomic_inc(&afu->hsq_credits); } afu->hrrq_curr = hrrq_curr; @@ -1402,10 +1469,15 @@ static int init_global(struct cxlflash_cfg *cfg) pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); - /* Set up RRQ in AFU for master issued cmds */ + /* Set up RRQ and SQ in AFU for master issued cmds */ writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); + if (afu_is_sq_cmd_mode(afu)) { + writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start); + writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end); + } + /* AFU configuration */ reg = readq_be(&afu->afu_map->global.regs.afu_config); reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; @@ -1480,6 +1552,17 @@ static int start_afu(struct cxlflash_cfg *cfg) afu->hrrq_curr = afu->hrrq_start; afu->toggle = 1; + /* Initialize SQ */ + if (afu_is_sq_cmd_mode(afu)) { + memset(&afu->sq, 0, sizeof(afu->sq)); + afu->hsq_start = &afu->sq[0]; + afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1]; + afu->hsq_curr = afu->hsq_start; + + spin_lock_init(&afu->hsq_slock); + atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1); + } + rc = init_global(cfg); pr_debug("%s: returning rc=%d\n", __func__, rc); @@ -1641,8 +1724,13 @@ static int init_afu(struct cxlflash_cfg *cfg) goto err2; } - afu->send_cmd = send_cmd_ioarrin; - afu->context_reset = context_reset_ioarrin; + if (afu_is_sq_cmd_mode(afu)) { + afu->send_cmd = send_cmd_sq; + afu->context_reset = context_reset_sq; + } else { + afu->send_cmd = send_cmd_ioarrin; + afu->context_reset = context_reset_ioarrin; + } pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, afu->version, afu->interface_version); diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 1a2d09c148b3..a6e48a893fef 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -72,7 +72,10 @@ struct sisl_ioarcb { u16 timeout; /* in units specified by req_flags */ u32 rsvd1; u8 cdb[16]; /* must be in big endian */ - u64 reserved; /* Reserved area */ + union { + u64 reserved; /* Reserved for IOARRIN mode */ + struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */ + }; } __packed; struct sisl_rc { @@ -260,6 +263,11 @@ struct sisl_host_map { __be64 cmd_room; __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ __be64 mbox_w; /* restricted use */ + __be64 sq_start; /* Submission Queue (R/W): write sequence and */ + __be64 sq_end; /* inclusion semantics are the same as RRQ */ + __be64 sq_head; /* Submission Queue Head (R): for debugging */ + __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */ + __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */ }; /* per context provisioning & control MMIO */ @@ -348,6 +356,15 @@ struct sisl_global_regs { __be64 rsvd[0xf8]; __le64 afu_version; __be64 interface_version; +#define SISL_INTVER_CAP_SHIFT 16 +#define SISL_INTVER_MAJ_SHIFT 8 +#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL +#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL +#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL +#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL +#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL +#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL +#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL }; #define CXLFLASH_NUM_FC_PORTS 2 diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 9636970d9611..42674ae6f4dd 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1287,6 +1287,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, int rc = 0; u32 perms; int ctxid = -1; + u64 flags = 0UL; u64 rctxid = 0UL; struct file *file = NULL; @@ -1426,10 +1427,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, out_attach: if (fd != -1) - attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD; - else - attach->hdr.return_flags = 0; + flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; + if (afu_is_sq_cmd_mode(afu)) + flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; + attach->hdr.return_flags = flags; attach->context_id = ctxi->ctxid; attach->block_size = gli->blk_len; attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); @@ -1617,6 +1619,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, struct afu *afu = cfg->afu; struct ctx_info *ctxi = NULL; struct mutex *mutex = &cfg->ctx_recovery_mutex; + u64 flags; u64 ctxid = DECODE_CTXID(recover->context_id), rctxid = recover->context_id; long reg; @@ -1672,11 +1675,16 @@ retry_recover: } ctxi->err_recovery_active = false; + + flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | + DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; + if (afu_is_sq_cmd_mode(afu)) + flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; + + recover->hdr.return_flags = flags; recover->context_id = ctxi->ctxid; recover->adap_fd = new_adap_fd; recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); - recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | - DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; goto out; } diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h index 6bf1f8a022b1..e9fdc12ad984 100644 --- a/include/uapi/scsi/cxlflash_ioctl.h +++ b/include/uapi/scsi/cxlflash_ioctl.h @@ -40,6 +40,7 @@ struct dk_cxlflash_hdr { */ #define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL #define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL +#define DK_CXLFLASH_CONTEXT_SQ_CMD_MODE 0x0000000000000004ULL /* * General Notes: -- cgit v1.2.3-59-g8ed1b From fb67d44dfbdf85d984b9b40284e90636a3a7b21d Mon Sep 17 00:00:00 2001 From: "Matthew R. Ochs" Date: Wed, 11 Jan 2017 19:19:47 -0600 Subject: scsi: cxlflash: Cleanup prints The usage of prints within the cxlflash driver is inconsistent. This hinders debug and makes the driver source and log output appear sloppy. The following cleanups help unify the prints within cxlflash: - move all prints to dev-* where possible - transition all hex prints to lowercase - standardize variable prints in debug output - derive pointers in a consistent manner - change int to bool where appropriate - remove superfluous data from prints and print statements that do not make sense Signed-off-by: Matthew R. Ochs Signed-off-by: Uma Krishnan Reviewed-by: Andrew Donnellan Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/lunmgt.c | 31 ++-- drivers/scsi/cxlflash/main.c | 319 ++++++++++++++++++-------------------- drivers/scsi/cxlflash/superpipe.c | 165 ++++++++++---------- drivers/scsi/cxlflash/vlun.c | 169 ++++++++++---------- 4 files changed, 346 insertions(+), 338 deletions(-) diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c index 6c318db90c85..0efed177cc8b 100644 --- a/drivers/scsi/cxlflash/lunmgt.c +++ b/drivers/scsi/cxlflash/lunmgt.c @@ -32,11 +32,13 @@ */ static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid) { + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct llun_info *lli = NULL; lli = kzalloc(sizeof(*lli), GFP_KERNEL); if (unlikely(!lli)) { - pr_err("%s: could not allocate lli\n", __func__); + dev_err(dev, "%s: could not allocate lli\n", __func__); goto out; } @@ -58,11 +60,13 @@ out: */ static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid) { + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct glun_info *gli = NULL; gli = kzalloc(sizeof(*gli), GFP_KERNEL); if (unlikely(!gli)) { - pr_err("%s: could not allocate gli\n", __func__); + dev_err(dev, "%s: could not allocate gli\n", __func__); goto out; } @@ -129,10 +133,10 @@ static struct glun_info *lookup_global(u8 *wwid) */ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid) { + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct llun_info *lli = NULL; struct glun_info *gli = NULL; - struct Scsi_Host *shost = sdev->host; - struct cxlflash_cfg *cfg = shost_priv(shost); if (unlikely(!wwid)) goto out; @@ -165,7 +169,7 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid) list_add(&gli->list, &global.gluns); out: - pr_debug("%s: returning %p\n", __func__, lli); + dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli); return lli; } @@ -225,17 +229,18 @@ void cxlflash_term_global_luns(void) int cxlflash_manage_lun(struct scsi_device *sdev, struct dk_cxlflash_manage_lun *manage) { - int rc = 0; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct llun_info *lli = NULL; + int rc = 0; u64 flags = manage->hdr.flags; u32 chan = sdev->channel; mutex_lock(&global.mutex); lli = find_and_create_lun(sdev, manage->wwid); - pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n", - __func__, get_unaligned_be64(&manage->wwid[0]), - get_unaligned_be64(&manage->wwid[8]), - manage->hdr.flags, lli); + dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n", + __func__, get_unaligned_be64(&manage->wwid[0]), + get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli); if (unlikely(!lli)) { rc = -ENOMEM; goto out; @@ -265,11 +270,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev, } } - pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__, - lli->port_sel, chan, lli->lun_id[chan]); + dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n", + __func__, lli->port_sel, chan, lli->lun_id[chan]); out: mutex_unlock(&global.mutex); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index d2bac4b7b85f..ab38bca5df2b 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -43,6 +43,9 @@ MODULE_LICENSE("GPL"); */ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) { + struct afu *afu = cmd->parent; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; struct sisl_ioarcb *ioarcb; struct sisl_ioasa *ioasa; u32 resid; @@ -56,21 +59,20 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { resid = ioasa->resid; scsi_set_resid(scp, resid); - pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", - __func__, cmd, scp, resid); + dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", + __func__, cmd, scp, resid); } if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { - pr_debug("%s: cmd underrun cmd = %p scp = %p\n", - __func__, cmd, scp); + dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", + __func__, cmd, scp); scp->result = (DID_ERROR << 16); } - pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " - "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", - __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, - ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, - ioasa->fc_extra); + dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " + "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, + ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, + ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); if (ioasa->rc.scsi_rc) { /* We have a SCSI status */ @@ -159,6 +161,7 @@ static void cmd_complete(struct afu_cmd *cmd) ulong lock_flags; struct afu *afu = cmd->parent; struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; bool cmd_is_tmf; if (cmd->scp) { @@ -170,9 +173,8 @@ static void cmd_complete(struct afu_cmd *cmd) cmd_is_tmf = cmd->cmd_tmf; - pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " - "ioasc=%d\n", __func__, scp, scp->result, - cmd->sa.ioasc); + dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", + __func__, scp, scp->result, cmd->sa.ioasc); scsi_dma_unmap(scp); scp->scsi_done(scp); @@ -200,7 +202,7 @@ static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; - pr_debug("%s: cmd=%p\n", __func__, cmd); + dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd); writeq_be(rrin, reset_reg); do { @@ -211,7 +213,7 @@ static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); - dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n", + dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n", __func__, rrin, nretry); } @@ -274,8 +276,8 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); out: spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); - pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, - cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); + dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, + cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); return rc; } @@ -314,9 +316,9 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) spin_unlock_irqrestore(&afu->hsq_slock, lock_flags); out: - dev_dbg(dev, "%s: cmd=%p len=%d ea=%p ioasa=%p rc=%d curr=%p " - "head=%016llX tail=%016llX\n", __func__, cmd, cmd->rcb.data_len, - (void *)cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, + dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " + "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, + cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, readq_be(&afu->host_map->sq_head), readq_be(&afu->host_map->sq_tail)); return rc; @@ -332,6 +334,8 @@ out: */ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) { + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; int rc = 0; ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); @@ -342,10 +346,8 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) } if (unlikely(cmd->sa.ioasc != 0)) { - pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " - "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], - cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, - cmd->sa.rc.fc_rc); + dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", + __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); rc = -1; } @@ -364,8 +366,7 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) { u32 port_sel = scp->device->channel + 1; - struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(scp->device->host); struct afu_cmd *cmd = sc_to_afucz(scp); struct device *dev = &cfg->dev->dev; ulong lock_flags; @@ -410,7 +411,7 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) to); if (!to) { cfg->tmf_active = false; - dev_err(dev, "%s: TMF timed out!\n", __func__); + dev_err(dev, "%s: TMF timed out\n", __func__); rc = -1; } spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); @@ -448,7 +449,7 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host) */ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(host); struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd = sc_to_afucz(scp); @@ -461,7 +462,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) int kref_got = 0; dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08X-%08X-%08X-%08X)\n", + "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, get_unaligned_be32(&((u32 *)scp->cmnd)[0]), @@ -483,11 +484,11 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) switch (cfg->state) { case STATE_RESET: - dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); + dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; case STATE_FAILTERM: - dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); + dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); scp->result = (DID_NO_CONNECT << 16); scp->scsi_done(scp); rc = 0; @@ -502,7 +503,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) if (likely(sg)) { nseg = scsi_dma_map(scp); if (unlikely(nseg < 0)) { - dev_err(dev, "%s: Fail DMA map!\n", __func__); + dev_err(dev, "%s: Fail DMA map\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -531,7 +532,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) out: if (kref_got) kref_put(&afu->mapcount, afu_unmap); - pr_devel("%s: returning rc=%d\n", __func__, rc); return rc; } @@ -651,6 +651,8 @@ static void term_mc(struct cxlflash_cfg *cfg) */ static void term_afu(struct cxlflash_cfg *cfg) { + struct device *dev = &cfg->dev->dev; + /* * Tear down is carefully orchestrated to ensure * no interrupts can come in when the problem state @@ -666,7 +668,7 @@ static void term_afu(struct cxlflash_cfg *cfg) term_mc(cfg); - pr_debug("%s: returning\n", __func__); + dev_dbg(dev, "%s: returning\n", __func__); } /** @@ -693,8 +695,7 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) return; if (!afu || !afu->afu_map) { - dev_dbg(dev, "%s: The problem state area is not mapped\n", - __func__); + dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); return; } @@ -736,10 +737,11 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) static void cxlflash_remove(struct pci_dev *pdev) { struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; ulong lock_flags; if (!pci_is_enabled(pdev)) { - pr_debug("%s: Device is disabled\n", __func__); + dev_dbg(dev, "%s: Device is disabled\n", __func__); return; } @@ -775,7 +777,7 @@ static void cxlflash_remove(struct pci_dev *pdev) break; } - pr_debug("%s: returning\n", __func__); + dev_dbg(dev, "%s: returning\n", __func__); } /** @@ -817,6 +819,7 @@ out: static int init_pci(struct cxlflash_cfg *cfg) { struct pci_dev *pdev = cfg->dev; + struct device *dev = &cfg->dev->dev; int rc = 0; rc = pci_enable_device(pdev); @@ -827,15 +830,14 @@ static int init_pci(struct cxlflash_cfg *cfg) } if (rc) { - dev_err(&pdev->dev, "%s: Cannot enable adapter\n", - __func__); + dev_err(dev, "%s: Cannot enable adapter\n", __func__); cxlflash_wait_for_pci_err_recovery(cfg); goto out; } } out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -848,19 +850,19 @@ out: static int init_scsi(struct cxlflash_cfg *cfg) { struct pci_dev *pdev = cfg->dev; + struct device *dev = &cfg->dev->dev; int rc = 0; rc = scsi_add_host(cfg->host, &pdev->dev); if (rc) { - dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", - __func__, rc); + dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); goto out; } scsi_scan_host(cfg->host); out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -910,16 +912,12 @@ static void set_port_offline(__be64 __iomem *fc_regs) * Return: * TRUE (1) when the specified port is online * FALSE (0) when the specified port fails to come online after timeout - * -EINVAL when @delay_us is less than 1000 */ -static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) +static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) { u64 status; - if (delay_us < 1000) { - pr_err("%s: invalid delay specified %d\n", __func__, delay_us); - return -EINVAL; - } + WARN_ON(delay_us < 1000); do { msleep(delay_us / 1000); @@ -943,16 +941,12 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) * Return: * TRUE (1) when the specified port is offline * FALSE (0) when the specified port fails to go offline after timeout - * -EINVAL when @delay_us is less than 1000 */ -static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) +static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) { u64 status; - if (delay_us < 1000) { - pr_err("%s: invalid delay specified %d\n", __func__, delay_us); - return -EINVAL; - } + WARN_ON(delay_us < 1000); do { msleep(delay_us / 1000); @@ -981,11 +975,14 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, u64 wwpn) { + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; + set_port_offline(fc_regs); if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) { - pr_debug("%s: wait on port %d to go offline timed out\n", - __func__, port); + dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", + __func__, port); } writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); @@ -993,8 +990,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, set_port_online(fc_regs); if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) { - pr_debug("%s: wait on port %d to go online timed out\n", - __func__, port); + dev_dbg(dev, "%s: wait on port %d to go online timed out\n", + __func__, port); } } @@ -1013,6 +1010,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, */ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) { + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; u64 port_sel; /* first switch the AFU to the other links, if any */ @@ -1024,21 +1023,21 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) set_port_offline(fc_regs); if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) - pr_err("%s: wait on port %d to go offline timed out\n", - __func__, port); + dev_err(dev, "%s: wait on port %d to go offline timed out\n", + __func__, port); set_port_online(fc_regs); if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) - pr_err("%s: wait on port %d to go online timed out\n", - __func__, port); + dev_err(dev, "%s: wait on port %d to go online timed out\n", + __func__, port); /* switch back to include this port */ port_sel |= (1ULL << port); writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); - pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); + dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); } /* @@ -1148,6 +1147,8 @@ static void afu_err_intr_init(struct afu *afu) static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) { struct afu *afu = (struct afu *)data; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; u64 reg; u64 reg_unmasked; @@ -1155,18 +1156,17 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) reg_unmasked = (reg & SISL_ISTATUS_UNMASK); if (reg_unmasked == 0UL) { - pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", - __func__, (u64)afu, reg); + dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", + __func__, reg); goto cxlflash_sync_err_irq_exit; } - pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", - __func__, (u64)afu, reg); + dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", + __func__, reg); writeq_be(reg_unmasked, &afu->host_map->intr_clear); cxlflash_sync_err_irq_exit: - pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); return IRQ_HANDLED; } @@ -1248,7 +1248,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) reg_unmasked = (reg & SISL_ASTATUS_UNMASK); if (reg_unmasked == 0) { - dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", + dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", __func__, reg); goto out; } @@ -1264,7 +1264,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) port = info->port; - dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", + dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", __func__, port, info->desc, readq_be(&global->fc_regs[port][FC_STATUS / 8])); @@ -1289,7 +1289,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) * should be the same and tracing one is sufficient. */ - dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", + dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", __func__, port, reg); writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); @@ -1304,7 +1304,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) } out: - dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); return IRQ_HANDLED; } @@ -1316,13 +1315,14 @@ out: */ static int start_context(struct cxlflash_cfg *cfg) { + struct device *dev = &cfg->dev->dev; int rc = 0; rc = cxl_start_context(cfg->mcctx, cfg->afu->work.work_element_descriptor, NULL); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1335,7 +1335,8 @@ static int start_context(struct cxlflash_cfg *cfg) */ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) { - struct pci_dev *dev = cfg->dev; + struct device *dev = &cfg->dev->dev; + struct pci_dev *pdev = cfg->dev; int rc = 0; int ro_start, ro_size, i, j, k; ssize_t vpd_size; @@ -1344,10 +1345,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; /* Get the VPD data from the device */ - vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); + vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); if (unlikely(vpd_size <= 0)) { - dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", - __func__, vpd_size); + dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", + __func__, vpd_size); rc = -ENODEV; goto out; } @@ -1356,8 +1357,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); if (unlikely(ro_start < 0)) { - dev_err(&dev->dev, "%s: VPD Read-only data not found\n", - __func__); + dev_err(dev, "%s: VPD Read-only data not found\n", __func__); rc = -ENODEV; goto out; } @@ -1367,8 +1367,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) j = ro_size; i = ro_start + PCI_VPD_LRDT_TAG_SIZE; if (unlikely((i + j) > vpd_size)) { - pr_debug("%s: Might need to read more VPD (%d > %ld)\n", - __func__, (i + j), vpd_size); + dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", + __func__, (i + j), vpd_size); ro_size = vpd_size - i; } @@ -1386,8 +1386,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); if (unlikely(i < 0)) { - dev_err(&dev->dev, "%s: Port %d WWPN not found " - "in VPD\n", __func__, k); + dev_err(dev, "%s: Port %d WWPN not found in VPD\n", + __func__, k); rc = -ENODEV; goto out; } @@ -1395,9 +1395,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) j = pci_vpd_info_field_size(&vpd_data[i]); i += PCI_VPD_INFO_FLD_HDR_SIZE; if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { - dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " - "VPD corrupt\n", - __func__, k); + dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", + __func__, k); rc = -ENODEV; goto out; } @@ -1405,15 +1404,15 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); if (unlikely(rc)) { - dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " - "to integer\n", __func__, k); + dev_err(dev, "%s: WWPN conversion failed for port %d\n", + __func__, k); rc = -ENODEV; goto out; } } out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1467,7 +1466,8 @@ static int init_global(struct cxlflash_cfg *cfg) goto out; } - pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); + dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n", + __func__, wwpn[0], wwpn[1]); /* Set up RRQ and SQ in AFU for master issued cmds */ writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); @@ -1527,7 +1527,6 @@ static int init_global(struct cxlflash_cfg *cfg) &afu->ctrl_map->ctx_cap); /* Initialize heartbeat */ afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); - out: return rc; } @@ -1539,6 +1538,7 @@ out: static int start_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; int rc = 0; init_pcr(cfg); @@ -1565,7 +1565,7 @@ static int start_afu(struct cxlflash_cfg *cfg) rc = init_global(cfg); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1585,7 +1585,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, rc = cxl_allocate_afu_irqs(ctx, 3); if (unlikely(rc)) { - dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", + dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", __func__, rc); level = UNDO_NOOP; goto out; @@ -1594,8 +1594,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, "SISL_MSI_SYNC_ERROR"); if (unlikely(rc <= 0)) { - dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", - __func__); + dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); level = FREE_IRQ; goto out; } @@ -1603,8 +1602,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, "SISL_MSI_RRQ_UPDATED"); if (unlikely(rc <= 0)) { - dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", - __func__); + dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); level = UNMAP_ONE; goto out; } @@ -1612,8 +1610,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, "SISL_MSI_ASYNC_ERROR"); if (unlikely(rc <= 0)) { - dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", - __func__); + dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); level = UNMAP_TWO; goto out; } @@ -1647,15 +1644,13 @@ static int init_mc(struct cxlflash_cfg *cfg) /* During initialization reset the AFU to start from a clean slate */ rc = cxl_afu_reset(cfg->mcctx); if (unlikely(rc)) { - dev_err(dev, "%s: initial AFU reset failed rc=%d\n", - __func__, rc); + dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc); goto ret; } level = init_intr(cfg, ctx); if (unlikely(level)) { - dev_err(dev, "%s: setting up interrupts failed rc=%d\n", - __func__, rc); + dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); goto out; } @@ -1670,7 +1665,7 @@ static int init_mc(struct cxlflash_cfg *cfg) goto out; } ret: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; out: term_intr(cfg, level); @@ -1697,7 +1692,7 @@ static int init_afu(struct cxlflash_cfg *cfg) rc = init_mc(cfg); if (rc) { - dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", + dev_err(dev, "%s: init_mc failed rc=%d\n", __func__, rc); goto out; } @@ -1705,7 +1700,7 @@ static int init_afu(struct cxlflash_cfg *cfg) /* Map the entire MMIO space of the AFU */ afu->afu_map = cxl_psa_map(cfg->mcctx); if (!afu->afu_map) { - dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); + dev_err(dev, "%s: cxl_psa_map failed\n", __func__); rc = -ENOMEM; goto err1; } @@ -1717,8 +1712,8 @@ static int init_afu(struct cxlflash_cfg *cfg) afu->interface_version = readq_be(&afu->afu_map->global.regs.interface_version); if ((afu->interface_version + 1) == 0) { - pr_err("Back level AFU, please upgrade. AFU version %s " - "interface version 0x%llx\n", afu->version, + dev_err(dev, "Back level AFU, please upgrade. AFU version %s " + "interface version %016llx\n", afu->version, afu->interface_version); rc = -EINVAL; goto err2; @@ -1732,13 +1727,12 @@ static int init_afu(struct cxlflash_cfg *cfg) afu->context_reset = context_reset_ioarrin; } - pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, - afu->version, afu->interface_version); + dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, + afu->version, afu->interface_version); rc = start_afu(cfg); if (rc) { - dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", - __func__, rc); + dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); goto err2; } @@ -1749,7 +1743,7 @@ static int init_afu(struct cxlflash_cfg *cfg) /* Restore the LUN mappings */ cxlflash_restore_luntable(cfg); out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; err2: @@ -1793,7 +1787,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, static DEFINE_MUTEX(sync_active); if (cfg->state != STATE_NORMAL) { - pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); + dev_dbg(dev, "%s: Sync not required state=%u\n", + __func__, cfg->state); return 0; } @@ -1810,7 +1805,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, init_completion(&cmd->cevent); cmd->parent = afu; - pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); + dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; cmd->rcb.ctx_id = afu->ctx_hndl; @@ -1835,7 +1830,7 @@ out: atomic_dec(&afu->cmds_active); mutex_unlock(&sync_active); kfree(buf); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1847,16 +1842,17 @@ out: */ static int afu_reset(struct cxlflash_cfg *cfg) { + struct device *dev = &cfg->dev->dev; int rc = 0; + /* Stop the context before the reset. Since the context is * no longer available restart it after the reset is complete */ - term_afu(cfg); rc = init_afu(cfg); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1885,18 +1881,18 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) { int rc = SUCCESS; struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(host); + struct device *dev = &cfg->dev->dev; struct afu *afu = cfg->afu; int rcr = 0; - pr_debug("%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, - host->host_no, scp->device->channel, - scp->device->id, scp->device->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " + "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, + scp->device->channel, scp->device->id, scp->device->lun, + get_unaligned_be32(&((u32 *)scp->cmnd)[0]), + get_unaligned_be32(&((u32 *)scp->cmnd)[1]), + get_unaligned_be32(&((u32 *)scp->cmnd)[2]), + get_unaligned_be32(&((u32 *)scp->cmnd)[3])); retry: switch (cfg->state) { @@ -1913,7 +1909,7 @@ retry: break; } - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -1935,16 +1931,16 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) int rc = SUCCESS; int rcr = 0; struct Scsi_Host *host = scp->device->host; - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(host); + struct device *dev = &cfg->dev->dev; - pr_debug("%s: (scp=%p) %d/%d/%d/%llu " - "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, - host->host_no, scp->device->channel, - scp->device->id, scp->device->lun, - get_unaligned_be32(&((u32 *)scp->cmnd)[0]), - get_unaligned_be32(&((u32 *)scp->cmnd)[1]), - get_unaligned_be32(&((u32 *)scp->cmnd)[2]), - get_unaligned_be32(&((u32 *)scp->cmnd)[3])); + dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " + "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, + scp->device->channel, scp->device->id, scp->device->lun, + get_unaligned_be32(&((u32 *)scp->cmnd)[0]), + get_unaligned_be32(&((u32 *)scp->cmnd)[1]), + get_unaligned_be32(&((u32 *)scp->cmnd)[2]), + get_unaligned_be32(&((u32 *)scp->cmnd)[3])); switch (cfg->state) { case STATE_NORMAL: @@ -1970,7 +1966,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) break; } - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -2036,8 +2032,7 @@ static ssize_t port0_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; return cxlflash_show_port_status(0, afu, buf); @@ -2055,8 +2050,7 @@ static ssize_t port1_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; return cxlflash_show_port_status(1, afu, buf); @@ -2073,8 +2067,7 @@ static ssize_t port1_show(struct device *dev, static ssize_t lun_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); @@ -2107,7 +2100,7 @@ static ssize_t lun_mode_store(struct device *dev, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(shost); struct afu *afu = cfg->afu; int rc; u32 lun_mode; @@ -2169,7 +2162,7 @@ static ssize_t cxlflash_show_port_lun_table(u32 port, for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, - "%03d: %016llX\n", i, readq_be(&fc_port[i])); + "%03d: %016llx\n", i, readq_be(&fc_port[i])); return bytes; } @@ -2185,8 +2178,7 @@ static ssize_t port0_lun_table_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; return cxlflash_show_port_lun_table(0, afu, buf); @@ -2204,8 +2196,7 @@ static ssize_t port1_lun_table_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; return cxlflash_show_port_lun_table(1, afu, buf); @@ -2365,6 +2356,7 @@ static int cxlflash_probe(struct pci_dev *pdev, { struct Scsi_Host *host; struct cxlflash_cfg *cfg = NULL; + struct device *dev = &pdev->dev; struct dev_dependent_vals *ddv; int rc = 0; @@ -2376,8 +2368,7 @@ static int cxlflash_probe(struct pci_dev *pdev, host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); if (!host) { - dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", - __func__); + dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); rc = -ENOMEM; goto out; } @@ -2388,12 +2379,11 @@ static int cxlflash_probe(struct pci_dev *pdev, host->unique_id = host->host_no; host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; - cfg = (struct cxlflash_cfg *)host->hostdata; + cfg = shost_priv(host); cfg->host = host; rc = alloc_mem(cfg); if (rc) { - dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", - __func__); + dev_err(dev, "%s: alloc_mem failed\n", __func__); rc = -ENOMEM; scsi_host_put(cfg->host); goto out; @@ -2434,30 +2424,27 @@ static int cxlflash_probe(struct pci_dev *pdev, rc = init_pci(cfg); if (rc) { - dev_err(&pdev->dev, "%s: call to init_pci " - "failed rc=%d!\n", __func__, rc); + dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_PCI; rc = init_afu(cfg); if (rc) { - dev_err(&pdev->dev, "%s: call to init_afu " - "failed rc=%d!\n", __func__, rc); + dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_AFU; rc = init_scsi(cfg); if (rc) { - dev_err(&pdev->dev, "%s: call to init_scsi " - "failed rc=%d!\n", __func__, rc); + dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_SCSI; out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; out_remove: @@ -2495,7 +2482,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, drain_ioctls(cfg); rc = cxlflash_mark_contexts_error(cfg); if (unlikely(rc)) - dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", + dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", __func__, rc); term_afu(cfg); return PCI_ERS_RESULT_NEED_RESET; @@ -2529,7 +2516,7 @@ static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) rc = init_afu(cfg); if (unlikely(rc)) { - dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); + dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); return PCI_ERS_RESULT_DISCONNECT; } @@ -2577,8 +2564,6 @@ static struct pci_driver cxlflash_driver = { */ static int __init init_cxlflash(void) { - pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); - cxlflash_list_init(); return pci_register_driver(&cxlflash_driver); diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 42674ae6f4dd..90869cee2b20 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -212,7 +212,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, } out: - dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u " + dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, ctx_ctrl); @@ -260,7 +260,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) writeq_be(val, &ctrl_map->ctx_cap); val = readq_be(&ctrl_map->ctx_cap); if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { - dev_err(dev, "%s: ctx may be closed val=%016llX\n", + dev_err(dev, "%s: ctx may be closed val=%016llx\n", __func__, val); rc = -EAGAIN; goto out; @@ -302,7 +302,7 @@ out: */ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct glun_info *gli = lli->parent; u8 *cmd_buf = NULL; @@ -326,7 +326,7 @@ retry: scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); - dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__, + dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__, retry_cnt ? "re" : "", scsi_cmd[0]); /* Drop the ioctl read semahpore across lengthy call */ @@ -336,7 +336,7 @@ retry: down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { - dev_err(dev, "%s: Failed state! result=0x08%X\n", + dev_err(dev, "%s: Failed state result=%08x\n", __func__, result); rc = -ENODEV; goto out; @@ -378,7 +378,7 @@ retry: } if (result) { - dev_err(dev, "%s: command failed, result=0x%x\n", + dev_err(dev, "%s: command failed, result=%08x\n", __func__, result); rc = -EIO; goto out; @@ -415,29 +415,32 @@ out: struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, struct llun_info *lli) { + struct cxlflash_cfg *cfg = ctxi->cfg; + struct device *dev = &cfg->dev->dev; struct sisl_rht_entry *rhte = NULL; if (unlikely(!ctxi->rht_start)) { - pr_debug("%s: Context does not have allocated RHT!\n", + dev_dbg(dev, "%s: Context does not have allocated RHT\n", __func__); goto out; } if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { - pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl); + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", + __func__, rhndl); goto out; } if (unlikely(ctxi->rht_lun[rhndl] != lli)) { - pr_debug("%s: Bad resource handle LUN! (%d)\n", - __func__, rhndl); + dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n", + __func__, rhndl); goto out; } rhte = &ctxi->rht_start[rhndl]; if (unlikely(rhte->nmask == 0)) { - pr_debug("%s: Unopened resource handle! (%d)\n", - __func__, rhndl); + dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n", + __func__, rhndl); rhte = NULL; goto out; } @@ -456,6 +459,8 @@ out: struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, struct llun_info *lli) { + struct cxlflash_cfg *cfg = ctxi->cfg; + struct device *dev = &cfg->dev->dev; struct sisl_rht_entry *rhte = NULL; int i; @@ -470,7 +475,7 @@ struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, if (likely(rhte)) ctxi->rht_lun[i] = lli; - pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i); + dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i); return rhte; } @@ -547,7 +552,7 @@ int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) if (gli->mode == MODE_NONE) gli->mode = mode; else if (gli->mode != mode) { - pr_debug("%s: LUN operating in mode %d, requested mode %d\n", + pr_debug("%s: gli_mode=%d requested_mode=%d\n", __func__, gli->mode, mode); rc = -EINVAL; goto out; @@ -605,7 +610,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi, struct dk_cxlflash_release *release) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -622,13 +627,13 @@ int _cxlflash_disk_release(struct scsi_device *sdev, struct sisl_rht_entry *rhte; struct sisl_rht_entry_f1 *rhte_f1; - dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n", + dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n", __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); if (!ctxi) { ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%llu)\n", + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto out; @@ -639,7 +644,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev, rhte = get_rhte(ctxi, rhndl, lli); if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Bad resource handle! (%d)\n", + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", __func__, rhndl); rc = -EINVAL; goto out; @@ -758,13 +763,13 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg) lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); if (unlikely(!ctxi || !lli || !ws)) { - dev_err(dev, "%s: Unable to allocate context!\n", __func__); + dev_err(dev, "%s: Unable to allocate context\n", __func__); goto err; } rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); if (unlikely(!rhte)) { - dev_err(dev, "%s: Unable to allocate RHT!\n", __func__); + dev_err(dev, "%s: Unable to allocate RHT\n", __func__); goto err; } @@ -858,7 +863,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev, struct ctx_info *ctxi, struct dk_cxlflash_detach *detach) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct lun_access *lun_access, *t; @@ -875,7 +880,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev, if (!ctxi) { ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%llu)\n", + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto out; @@ -964,7 +969,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file) ctxid = cxl_process_element(ctx); if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed! (%d)\n", + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", __func__, ctx, ctxid); goto out; } @@ -973,18 +978,18 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file) if (unlikely(!ctxi)) { ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); if (!ctxi) { - dev_dbg(dev, "%s: Context %d already free!\n", + dev_dbg(dev, "%s: ctxid=%d already free\n", __func__, ctxid); goto out_release; } - dev_dbg(dev, "%s: Another process owns context %d!\n", + dev_dbg(dev, "%s: Another process owns ctxid=%d\n", __func__, ctxid); put_context(ctxi); goto out; } - dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid); + dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); detach.context_id = ctxi->ctxid; list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) @@ -1011,17 +1016,20 @@ static void unmap_context(struct ctx_info *ctxi) /** * get_err_page() - obtains and allocates the error notification page + * @cfg: Internal structure associated with the host. * * Return: error notification page on success, NULL on failure */ -static struct page *get_err_page(void) +static struct page *get_err_page(struct cxlflash_cfg *cfg) { struct page *err_page = global.err_page; + struct device *dev = &cfg->dev->dev; if (unlikely(!err_page)) { err_page = alloc_page(GFP_KERNEL); if (unlikely(!err_page)) { - pr_err("%s: Unable to allocate err_page!\n", __func__); + dev_err(dev, "%s: Unable to allocate err_page\n", + __func__); goto out; } @@ -1039,7 +1047,7 @@ static struct page *get_err_page(void) } out: - pr_debug("%s: returning err_page=%p\n", __func__, err_page); + dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page); return err_page; } @@ -1074,14 +1082,14 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ctxid = cxl_process_element(ctx); if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed! (%d)\n", + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", __func__, ctx, ctxid); goto err; } ctxi = get_context(cfg, ctxid, file, ctrl); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); goto err; } @@ -1091,13 +1099,12 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); } else { - dev_dbg(dev, "%s: err recovery active, use err_page!\n", + dev_dbg(dev, "%s: err recovery active, use err_page\n", __func__); - err_page = get_err_page(); + err_page = get_err_page(cfg); if (unlikely(!err_page)) { - dev_err(dev, "%s: Could not obtain error page!\n", - __func__); + dev_err(dev, "%s: Could not get err_page\n", __func__); rc = VM_FAULT_RETRY; goto out; } @@ -1147,7 +1154,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) ctxid = cxl_process_element(ctx); if (unlikely(ctxid < 0)) { - dev_err(dev, "%s: Context %p was closed! (%d)\n", + dev_err(dev, "%s: Context %p was closed ctxid=%d\n", __func__, ctx, ctxid); rc = -EIO; goto out; @@ -1155,7 +1162,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) ctxi = get_context(cfg, ctxid, file, ctrl); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); rc = -EIO; goto out; } @@ -1251,7 +1258,7 @@ retry: break; goto retry; case STATE_FAILTERM: - dev_dbg(dev, "%s: Failed/Terminating!\n", __func__); + dev_dbg(dev, "%s: Failed/Terminating\n", __func__); rc = -ENODEV; break; default: @@ -1276,7 +1283,7 @@ retry: static int cxlflash_disk_attach(struct scsi_device *sdev, struct dk_cxlflash_attach *attach) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct afu *afu = cfg->afu; struct llun_info *lli = sdev->hostdata; @@ -1303,24 +1310,24 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, } if (gli->max_lba == 0) { - dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n", + dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n", __func__, lli->lun_id[sdev->channel]); rc = read_cap16(sdev, lli); if (rc) { - dev_err(dev, "%s: Invalid device! (%d)\n", + dev_err(dev, "%s: Invalid device rc=%d\n", __func__, rc); rc = -ENODEV; goto out; } - dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba); - dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len); + dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba); + dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len); } if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { rctxid = attach->context_id; ctxi = get_context(cfg, rctxid, NULL, 0); if (!ctxi) { - dev_dbg(dev, "%s: Bad context! (%016llX)\n", + dev_dbg(dev, "%s: Bad context rctxid=%016llx\n", __func__, rctxid); rc = -EINVAL; goto out; @@ -1328,7 +1335,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, list_for_each_entry(lun_access, &ctxi->luns, list) if (lun_access->lli == lli) { - dev_dbg(dev, "%s: Already attached!\n", + dev_dbg(dev, "%s: Already attached\n", __func__); rc = -EINVAL; goto out; @@ -1337,13 +1344,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, rc = scsi_device_get(sdev); if (unlikely(rc)) { - dev_err(dev, "%s: Unable to get sdev reference!\n", __func__); + dev_err(dev, "%s: Unable to get sdev reference\n", __func__); goto out; } lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); if (unlikely(!lun_access)) { - dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__); + dev_err(dev, "%s: Unable to allocate lun_access\n", __func__); rc = -ENOMEM; goto err; } @@ -1353,7 +1360,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, /* Non-NULL context indicates reuse (another context reference) */ if (ctxi) { - dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", + dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n", __func__, rctxid); kref_get(&ctxi->kref); list_add(&lun_access->list, &ctxi->luns); @@ -1362,7 +1369,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ctxi = create_context(cfg); if (unlikely(!ctxi)) { - dev_err(dev, "%s: Failed to create context! (%d)\n", + dev_err(dev, "%s: Failed to create context ctxid=%d\n", __func__, ctxid); goto err; } @@ -1388,7 +1395,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ctxid = cxl_process_element(ctx); if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { - dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); + dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); rc = -EPERM; goto err; } @@ -1522,7 +1529,7 @@ static int recover_context(struct cxlflash_cfg *cfg, ctxid = cxl_process_element(ctx); if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { - dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); + dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); rc = -EPERM; goto err2; } @@ -1613,7 +1620,7 @@ err1: static int cxlflash_afu_recover(struct scsi_device *sdev, struct dk_cxlflash_recover_afu *recover) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct afu *afu = cfg->afu; @@ -1635,19 +1642,19 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, goto out; rc = check_state(cfg); if (rc) { - dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); + dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); rc = -ENODEV; goto out; } - dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", + dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n", __func__, recover->reason, rctxid); retry: /* Ensure that this process is attached to the context */ ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto out; } @@ -1656,12 +1663,12 @@ retry: retry_recover: rc = recover_context(cfg, ctxi, &new_adap_fd); if (unlikely(rc)) { - dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", + dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n", __func__, ctxid, rc); if ((rc == -ENODEV) && ((atomic_read(&cfg->recovery_threads) > 1) || (lretry--))) { - dev_dbg(dev, "%s: Going to try again!\n", + dev_dbg(dev, "%s: Going to try again\n", __func__); mutex_unlock(mutex); msleep(100); @@ -1707,7 +1714,7 @@ retry_recover: goto retry; } - dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__); + dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__); out: if (likely(ctxi)) put_context(ctxi); @@ -1726,7 +1733,7 @@ out: static int process_sense(struct scsi_device *sdev, struct dk_cxlflash_verify *verify) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -1737,7 +1744,7 @@ static int process_sense(struct scsi_device *sdev, rc = scsi_normalize_sense((const u8 *)&verify->sense_data, DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); if (!rc) { - dev_err(dev, "%s: Failed to normalize sense data!\n", __func__); + dev_err(dev, "%s: Failed to normalize sense data\n", __func__); rc = -EINVAL; goto out; } @@ -1793,7 +1800,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev, { int rc = 0; struct ctx_info *ctxi = NULL; - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -1803,20 +1810,20 @@ static int cxlflash_disk_verify(struct scsi_device *sdev, rctxid = verify->context_id; u64 last_lba = 0; - dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, " - "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle, + dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " + "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle, verify->hint, verify->hdr.flags); ctxi = get_context(cfg, rctxid, lli, 0); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto out; } rhte = get_rhte(ctxi, rhndl, lli); if (unlikely(!rhte)) { - dev_dbg(dev, "%s: Bad resource handle! (%d)\n", + dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", __func__, rhndl); rc = -EINVAL; goto out; @@ -1863,7 +1870,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev, out: if (likely(ctxi)) put_context(ctxi); - dev_dbg(dev, "%s: returning rc=%d llba=%llX\n", + dev_dbg(dev, "%s: returning rc=%d llba=%llx\n", __func__, rc, verify->last_lba); return rc; } @@ -1915,7 +1922,7 @@ static char *decode_ioctl(int cmd) */ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct afu *afu = cfg->afu; struct llun_info *lli = sdev->hostdata; @@ -1935,25 +1942,25 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) struct ctx_info *ctxi = NULL; struct sisl_rht_entry *rhte = NULL; - pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); + dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); if (unlikely(rc)) { - dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n", - __func__); + dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__); goto out; } ctxi = get_context(cfg, rctxid, lli, 0); if (unlikely(!ctxi)) { - dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto err1; } rhte = rhte_checkout(ctxi, lli); if (unlikely(!rhte)) { - dev_dbg(dev, "%s: too many opens for this context\n", __func__); + dev_dbg(dev, "%s: Too many opens ctxid=%lld\n", + __func__, ctxid); rc = -EMFILE; /* too many opens */ goto err1; } @@ -1971,7 +1978,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) out: if (likely(ctxi)) put_context(ctxi); - dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n", + dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", __func__, rsrc_handle, rc, last_lba); return rc; @@ -1993,7 +2000,7 @@ err1: */ static int ioctl_common(struct scsi_device *sdev, int cmd) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; int rc = 0; @@ -2010,7 +2017,7 @@ static int ioctl_common(struct scsi_device *sdev, int cmd) case DK_CXLFLASH_VLUN_RESIZE: case DK_CXLFLASH_RELEASE: case DK_CXLFLASH_DETACH: - dev_dbg(dev, "%s: Command override! (%d)\n", + dev_dbg(dev, "%s: Command override rc=%d\n", __func__, rc); rc = 0; break; @@ -2040,7 +2047,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { typedef int (*sioctl) (struct scsi_device *, void *); - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct afu *afu = cfg->afu; struct dk_cxlflash_hdr *hdr; @@ -2119,7 +2126,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) } if (unlikely(copy_from_user(&buf, arg, size))) { - dev_err(dev, "%s: copy_from_user() fail! " + dev_err(dev, "%s: copy_from_user() fail " "size=%lu cmd=%d (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; @@ -2135,7 +2142,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) } if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { - dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__); + dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); rc = -EINVAL; goto cxlflash_ioctl_exit; } @@ -2143,7 +2150,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) rc = do_ioctl(sdev, (void *)&buf); if (likely(!rc)) if (unlikely(copy_to_user(arg, &buf, size))) { - dev_err(dev, "%s: copy_to_user() fail! " + dev_err(dev, "%s: copy_to_user() fail " "size=%lu cmd=%d (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 90c5d7f5278e..8fcc804dbef9 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -66,8 +66,8 @@ static int ba_init(struct ba_lun *ba_lun) int last_word_underflow = 0; u64 *lam; - pr_debug("%s: Initializing LUN: lun_id = %llX, " - "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n", + pr_debug("%s: Initializing LUN: lun_id=%016llx " + "ba_lun->lsize=%lx ba_lun->au_size=%lX\n", __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); /* Calculate bit map size */ @@ -80,7 +80,7 @@ static int ba_init(struct ba_lun *ba_lun) /* Allocate lun information container */ bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL); if (unlikely(!bali)) { - pr_err("%s: Failed to allocate lun_info for lun_id %llX\n", + pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n", __func__, ba_lun->lun_id); return -ENOMEM; } @@ -96,7 +96,7 @@ static int ba_init(struct ba_lun *ba_lun) GFP_KERNEL); if (unlikely(!bali->lun_alloc_map)) { pr_err("%s: Failed to allocate lun allocation map: " - "lun_id = %llX\n", __func__, ba_lun->lun_id); + "lun_id=%016llx\n", __func__, ba_lun->lun_id); kfree(bali); return -ENOMEM; } @@ -125,7 +125,7 @@ static int ba_init(struct ba_lun *ba_lun) bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)), GFP_KERNEL); if (unlikely(!bali->aun_clone_map)) { - pr_err("%s: Failed to allocate clone map: lun_id = %llX\n", + pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n", __func__, ba_lun->lun_id); kfree(bali->lun_alloc_map); kfree(bali); @@ -136,7 +136,7 @@ static int ba_init(struct ba_lun *ba_lun) ba_lun->ba_lun_handle = bali; pr_debug("%s: Successfully initialized the LUN: " - "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n", + "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n", __func__, ba_lun->lun_id, bali->lun_bmap_size, bali->free_aun_cnt); return 0; @@ -165,10 +165,9 @@ static int find_free_range(u32 low, num_bits = (sizeof(*lam) * BITS_PER_BYTE); bit_pos = find_first_bit(lam, num_bits); - pr_devel("%s: Found free bit %llX in LUN " - "map entry %llX at bitmap index = %X\n", - __func__, bit_pos, bali->lun_alloc_map[i], - i); + pr_devel("%s: Found free bit %llu in LUN " + "map entry %016llx at bitmap index = %d\n", + __func__, bit_pos, bali->lun_alloc_map[i], i); *bit_word = i; bali->free_aun_cnt--; @@ -194,11 +193,11 @@ static u64 ba_alloc(struct ba_lun *ba_lun) bali = ba_lun->ba_lun_handle; pr_debug("%s: Received block allocation request: " - "lun_id = %llX, free_aun_cnt = %llX\n", + "lun_id=%016llx free_aun_cnt=%llx\n", __func__, ba_lun->lun_id, bali->free_aun_cnt); if (bali->free_aun_cnt == 0) { - pr_debug("%s: No space left on LUN: lun_id = %llX\n", + pr_debug("%s: No space left on LUN: lun_id=%016llx\n", __func__, ba_lun->lun_id); return -1ULL; } @@ -212,7 +211,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun) bali, &bit_word); if (bit_pos == -1) { pr_debug("%s: Could not find an allocation unit on LUN:" - " lun_id = %llX\n", __func__, ba_lun->lun_id); + " lun_id=%016llx\n", __func__, ba_lun->lun_id); return -1ULL; } } @@ -223,8 +222,8 @@ static u64 ba_alloc(struct ba_lun *ba_lun) else bali->free_curr_idx = bit_word; - pr_debug("%s: Allocating AU number %llX, on lun_id %llX, " - "free_aun_cnt = %llX\n", __func__, + pr_debug("%s: Allocating AU number=%llx lun_id=%016llx " + "free_aun_cnt=%llx\n", __func__, ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id, bali->free_aun_cnt); @@ -266,18 +265,18 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free) bali = ba_lun->ba_lun_handle; if (validate_alloc(bali, to_free)) { - pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n", + pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n", __func__, to_free, ba_lun->lun_id); return -1; } - pr_debug("%s: Received a request to free AU %llX on lun_id %llX, " - "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id, + pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx " + "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id, bali->free_aun_cnt); if (bali->aun_clone_map[to_free] > 0) { - pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone " - "count = %X\n", __func__, to_free, ba_lun->lun_id, + pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n", + __func__, to_free, ba_lun->lun_id, bali->aun_clone_map[to_free]); bali->aun_clone_map[to_free]--; return 0; @@ -294,8 +293,8 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free) else if (idx > bali->free_high_idx) bali->free_high_idx = idx; - pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on " - "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx, + pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x " + "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx, ba_lun->lun_id, bali->free_aun_cnt); return 0; @@ -313,16 +312,16 @@ static int ba_clone(struct ba_lun *ba_lun, u64 to_clone) struct ba_lun_info *bali = ba_lun->ba_lun_handle; if (validate_alloc(bali, to_clone)) { - pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n", + pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n", __func__, to_clone, ba_lun->lun_id); return -1; } - pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n", + pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n", __func__, to_clone, ba_lun->lun_id); if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { - pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n", + pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n", __func__, to_clone, ba_lun->lun_id); return -1; } @@ -433,7 +432,7 @@ static int write_same16(struct scsi_device *sdev, u64 offset = lba; int left = nblks; u32 to = sdev->request_queue->rq_timeout; - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); @@ -459,7 +458,7 @@ static int write_same16(struct scsi_device *sdev, down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { - dev_err(dev, "%s: Failed state! result=0x08%X\n", + dev_err(dev, "%s: Failed state result=%08x\n", __func__, result); rc = -ENODEV; goto out; @@ -467,7 +466,7 @@ static int write_same16(struct scsi_device *sdev, if (result) { dev_err_ratelimited(dev, "%s: command failed for " - "offset %lld result=0x%x\n", + "offset=%lld result=%08x\n", __func__, offset, result); rc = -EIO; goto out; @@ -480,7 +479,7 @@ out: kfree(cmd_buf); kfree(scsi_cmd); kfree(sense_buf); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -508,6 +507,8 @@ static int grow_lxt(struct afu *afu, struct sisl_rht_entry *rhte, u64 *new_size) { + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -527,7 +528,8 @@ static int grow_lxt(struct afu *afu, mutex_lock(&blka->mutex); av_size = ba_space(&blka->ba_lun); if (unlikely(av_size <= 0)) { - pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size); + dev_dbg(dev, "%s: ba_space error av_size=%d\n", + __func__, av_size); mutex_unlock(&blka->mutex); rc = -ENOSPC; goto out; @@ -568,8 +570,8 @@ static int grow_lxt(struct afu *afu, */ aun = ba_alloc(&blka->ba_lun); if ((aun == -1ULL) || (aun >= blka->nchunk)) - pr_debug("%s: ba_alloc error: allocated chunk# %llX, " - "max %llX\n", __func__, aun, blka->nchunk - 1); + dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu " + "max=%llu\n", __func__, aun, blka->nchunk - 1); /* select both ports, use r/w perms from RHT */ lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) | @@ -599,7 +601,7 @@ static int grow_lxt(struct afu *afu, kfree(lxt_old); *new_size = my_new_size; out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -621,6 +623,8 @@ static int shrink_lxt(struct afu *afu, struct ctx_info *ctxi, u64 *new_size) { + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct sisl_lxt_entry *lxt, *lxt_old; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -706,7 +710,7 @@ static int shrink_lxt(struct afu *afu, kfree(lxt_old); *new_size = my_new_size; out: - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -728,7 +732,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi, struct dk_cxlflash_resize *resize) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; struct afu *afu = cfg->afu; @@ -751,13 +756,13 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len; new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE); - pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx," - "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle, - resize->req_size, new_size); + dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n", + __func__, ctxid, resize->rsrc_handle, resize->req_size, + new_size); if (unlikely(gli->mode != MODE_VIRTUAL)) { - pr_debug("%s: LUN mode does not support resize! (%d)\n", - __func__, gli->mode); + dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n", + __func__, gli->mode); rc = -EINVAL; goto out; @@ -766,7 +771,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, if (!ctxi) { ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); if (unlikely(!ctxi)) { - pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid); + dev_dbg(dev, "%s: Bad context ctxid=%llu\n", + __func__, ctxid); rc = -EINVAL; goto out; } @@ -776,7 +782,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, rhte = get_rhte(ctxi, rhndl, lli); if (unlikely(!rhte)) { - pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl); + dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n", + __func__, rhndl); rc = -EINVAL; goto out; } @@ -794,8 +801,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev, out: if (put_ctx) put_context(ctxi); - pr_debug("%s: resized to %lld returning rc=%d\n", - __func__, resize->last_lba, rc); + dev_dbg(dev, "%s: resized to %llu returning rc=%d\n", + __func__, resize->last_lba, rc); return rc; } @@ -815,6 +822,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) u32 chan; u32 lind; struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; struct sisl_global_map __iomem *agm = &afu->afu_map->global; mutex_lock(&global.mutex); @@ -828,15 +836,15 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) if (lli->port_sel == BOTH_PORTS) { writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); - pr_debug("%s: Virtual LUN on slot %d id0=%llx, " - "id1=%llx\n", __func__, lind, - lli->lun_id[0], lli->lun_id[1]); + dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx " + "id1=%llx\n", __func__, lind, + lli->lun_id[0], lli->lun_id[1]); } else { chan = PORT2CHAN(lli->port_sel); writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); - pr_debug("%s: Virtual LUN on slot %d chan=%d, " - "id=%llx\n", __func__, lind, chan, - lli->lun_id[chan]); + dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d " + "id=%llx\n", __func__, lind, chan, + lli->lun_id[chan]); } } @@ -860,6 +868,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) u32 lind; int rc = 0; struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; struct sisl_global_map __iomem *agm = &afu->afu_map->global; mutex_lock(&global.mutex); @@ -882,8 +891,8 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); cfg->promote_lun_index++; - pr_debug("%s: Virtual LUN on slot %d id0=%llx, id1=%llx\n", - __func__, lind, lli->lun_id[0], lli->lun_id[1]); + dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx id1=%llx\n", + __func__, lind, lli->lun_id[0], lli->lun_id[1]); } else { /* * If this LUN is visible only from one port, we will put @@ -898,14 +907,14 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) lind = lli->lun_index = cfg->last_lun_index[chan]; writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); cfg->last_lun_index[chan]--; - pr_debug("%s: Virtual LUN on slot %d chan=%d, id=%llx\n", - __func__, lind, chan, lli->lun_id[chan]); + dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d id=%llx\n", + __func__, lind, chan, lli->lun_id[chan]); } lli->in_table = true; out: mutex_unlock(&global.mutex); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } @@ -923,7 +932,7 @@ out: */ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; @@ -942,14 +951,14 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) struct ctx_info *ctxi = NULL; struct sisl_rht_entry *rhte = NULL; - pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); + dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); /* Setup the LUNs block allocator on first call */ mutex_lock(&gli->mutex); if (gli->mode == MODE_NONE) { rc = init_vlun(lli); if (rc) { - dev_err(dev, "%s: call to init_vlun failed rc=%d!\n", + dev_err(dev, "%s: init_vlun failed rc=%d\n", __func__, rc); rc = -ENOMEM; goto err0; @@ -958,29 +967,28 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true); if (unlikely(rc)) { - dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n", - __func__); + dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__); goto err0; } mutex_unlock(&gli->mutex); rc = init_luntable(cfg, lli); if (rc) { - dev_err(dev, "%s: call to init_luntable failed rc=%d!\n", - __func__, rc); + dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc); goto err1; } ctxi = get_context(cfg, rctxid, lli, 0); if (unlikely(!ctxi)) { - dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); + dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); rc = -EINVAL; goto err1; } rhte = rhte_checkout(ctxi, lli); if (unlikely(!rhte)) { - dev_err(dev, "%s: too many opens for this context\n", __func__); + dev_err(dev, "%s: too many opens ctxid=%llu\n", + __func__, ctxid); rc = -EMFILE; /* too many opens */ goto err1; } @@ -996,7 +1004,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) resize.rsrc_handle = rsrc_handle; rc = _cxlflash_vlun_resize(sdev, ctxi, &resize); if (rc) { - dev_err(dev, "%s: resize failed rc %d\n", __func__, rc); + dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc); goto err2; } last_lba = resize.last_lba; @@ -1013,8 +1021,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) out: if (likely(ctxi)) put_context(ctxi); - pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n", - __func__, rsrc_handle, rc, last_lba); + dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", + __func__, rsrc_handle, rc, last_lba); return rc; err2: @@ -1047,6 +1055,8 @@ static int clone_lxt(struct afu *afu, struct sisl_rht_entry *rhte, struct sisl_rht_entry *rhte_src) { + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; struct sisl_lxt_entry *lxt; u32 ngrps; u64 aun; /* chunk# allocated by block allocator */ @@ -1101,7 +1111,7 @@ static int clone_lxt(struct afu *afu, cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); - pr_debug("%s: returning\n", __func__); + dev_dbg(dev, "%s: returning\n", __func__); return 0; } @@ -1120,7 +1130,8 @@ static int clone_lxt(struct afu *afu, int cxlflash_disk_clone(struct scsi_device *sdev, struct dk_cxlflash_clone *clone) { - struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; + struct cxlflash_cfg *cfg = shost_priv(sdev->host); + struct device *dev = &cfg->dev->dev; struct llun_info *lli = sdev->hostdata; struct glun_info *gli = lli->parent; struct blka *blka = &gli->blka; @@ -1140,8 +1151,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev, bool found; LIST_HEAD(sidecar); - pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n", - __func__, ctxid_src, ctxid_dst); + dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n", + __func__, ctxid_src, ctxid_dst); /* Do not clone yourself */ if (unlikely(rctxid_src == rctxid_dst)) { @@ -1151,16 +1162,16 @@ int cxlflash_disk_clone(struct scsi_device *sdev, if (unlikely(gli->mode != MODE_VIRTUAL)) { rc = -EINVAL; - pr_debug("%s: Clone not supported on physical LUNs! (%d)\n", - __func__, gli->mode); + dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n", + __func__, gli->mode); goto out; } ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE); ctxi_dst = get_context(cfg, rctxid_dst, lli, 0); if (unlikely(!ctxi_src || !ctxi_dst)) { - pr_debug("%s: Bad context! (%llu,%llu)\n", __func__, - ctxid_src, ctxid_dst); + dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n", + __func__, ctxid_src, ctxid_dst); rc = -EINVAL; goto out; } @@ -1185,8 +1196,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev, lun_access_dst = kzalloc(sizeof(*lun_access_dst), GFP_KERNEL); if (unlikely(!lun_access_dst)) { - pr_err("%s: Unable to allocate lun_access!\n", - __func__); + dev_err(dev, "%s: lun_access allocation fail\n", + __func__); rc = -ENOMEM; goto out; } @@ -1197,7 +1208,7 @@ int cxlflash_disk_clone(struct scsi_device *sdev, } if (unlikely(!ctxi_src->rht_out)) { - pr_debug("%s: Nothing to clone!\n", __func__); + dev_dbg(dev, "%s: Nothing to clone\n", __func__); goto out_success; } @@ -1256,7 +1267,7 @@ out: put_context(ctxi_src); if (ctxi_dst) put_context(ctxi_dst); - pr_debug("%s: returning rc=%d\n", __func__, rc); + dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; err: -- cgit v1.2.3-59-g8ed1b From 0df5bef739601f18bffc0d256ae451f239a826bd Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 11 Jan 2017 19:20:03 -0600 Subject: scsi: cxlflash: Cancel scheduled workers before stopping AFU When processing an AFU asynchronous interrupt, if the action results in an operation that requires off level processing (a link reset for example), the worker thread is scheduled. In the meantime a reset event (i.e.: EEH) could unmap the AFU to recover. This results in an Oops when the worker thread tries to access the AFU mapping. [c000000f17e03b90] d000000007cd5978 cxlflash_worker_thread+0x268/0x550 [c000000f17e03c40] c00000000011883c process_one_work+0x1dc/0x680 [c000000f17e03ce0] c000000000118e80 worker_thread+0x1a0/0x520 [c000000f17e03d80] c000000000126174 kthread+0xf4/0x100 [c000000f17e03e30] c00000000000a47c ret_from_kernel_thread+0x5c/0xe0 In an effort to avoid this, a mapcount was introduced in commit b45cdbaf9f7f ("cxlflash: Resolve oops in wait_port_offline") but due to the race condition described above, this solution is incomplete. In order to fully resolve this problem and to simplify things, this commit removes the mapcount solution. Instead, the scheduled worker thread is cancelled after interrupts have been disabled and prior to the mapping being freed. Fixes: b45cdbaf9f7f ("cxlflash: Resolve oops in wait_port_offline") Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen --- drivers/scsi/cxlflash/common.h | 2 -- drivers/scsi/cxlflash/main.c | 34 ++++++---------------------------- 2 files changed, 6 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index dee865735ac0..d11dcc59ff46 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -174,8 +174,6 @@ struct afu { struct sisl_host_map __iomem *host_map; /* MC host map */ struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ - struct kref mapcount; - ctx_hndl_t ctx_hndl; /* master's context handle */ atomic_t hsq_credits; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index ab38bca5df2b..7069639e92bc 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -419,16 +419,6 @@ out: return rc; } -static void afu_unmap(struct kref *ref) -{ - struct afu *afu = container_of(ref, struct afu, mapcount); - - if (likely(afu->afu_map)) { - cxl_psa_unmap((void __iomem *)afu->afu_map); - afu->afu_map = NULL; - } -} - /** * cxlflash_driver_info() - information handler for this host driver * @host: SCSI host associated with device. @@ -459,7 +449,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) ulong lock_flags; int nseg = 0; int rc = 0; - int kref_got = 0; dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08x-%08x-%08x-%08x)\n", @@ -497,9 +486,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) break; } - kref_get(&cfg->afu->mapcount); - kref_got = 1; - if (likely(sg)) { nseg = scsi_dma_map(scp); if (unlikely(nseg < 0)) { @@ -530,8 +516,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) if (unlikely(rc)) scsi_dma_unmap(scp); out: - if (kref_got) - kref_put(&afu->mapcount, afu_unmap); return rc; } @@ -569,13 +553,15 @@ static void free_mem(struct cxlflash_cfg *cfg) * * Safe to call with AFU in a partially allocated/initialized state. * - * Waits for any active internal AFU commands to timeout and then unmaps - * the MMIO space. + * Cancels scheduled worker threads, waits for any active internal AFU + * commands to timeout and then unmaps the MMIO space. */ static void stop_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; + cancel_work_sync(&cfg->work_q); + if (likely(afu)) { while (atomic_read(&afu->cmds_active)) ssleep(1); @@ -583,7 +569,6 @@ static void stop_afu(struct cxlflash_cfg *cfg) cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; } - kref_put(&afu->mapcount, afu_unmap); } } @@ -767,7 +752,6 @@ static void cxlflash_remove(struct pci_dev *pdev) scsi_remove_host(cfg->host); /* fall through */ case INIT_STATE_AFU: - cancel_work_sync(&cfg->work_q); term_afu(cfg); case INIT_STATE_PCI: pci_disable_device(pdev); @@ -1277,7 +1261,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) __func__, port); cfg->lr_state = LINK_RESET_REQUIRED; cfg->lr_port = port; - kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } @@ -1298,7 +1281,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) if (info->action & SCAN_HOST) { atomic_inc(&cfg->scan_host_needed); - kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } } @@ -1704,7 +1686,6 @@ static int init_afu(struct cxlflash_cfg *cfg) rc = -ENOMEM; goto err1; } - kref_init(&afu->mapcount); /* No byte reverse on reading afu_version or string will be backwards */ reg = readq(&afu->afu_map->global.regs.afu_version); @@ -1716,7 +1697,7 @@ static int init_afu(struct cxlflash_cfg *cfg) "interface version %016llx\n", afu->version, afu->interface_version); rc = -EINVAL; - goto err2; + goto err1; } if (afu_is_sq_cmd_mode(afu)) { @@ -1733,7 +1714,7 @@ static int init_afu(struct cxlflash_cfg *cfg) rc = start_afu(cfg); if (rc) { dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); - goto err2; + goto err1; } afu_err_intr_init(cfg->afu); @@ -1746,8 +1727,6 @@ out: dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; -err2: - kref_put(&afu->mapcount, afu_unmap); err1: term_intr(cfg, UNMAP_THREE); term_mc(cfg); @@ -2341,7 +2320,6 @@ static void cxlflash_worker_thread(struct work_struct *work) if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) scsi_scan_host(cfg->host); - kref_put(&afu->mapcount, afu_unmap); } /** -- cgit v1.2.3-59-g8ed1b From 2c0f83f328fcb48da03401e1ecb2b34492671a9a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 11 Jan 2017 14:26:52 +0100 Subject: scsi: qla4xxx: remove two unused MSI-X related #defines Spotted while preparing qla2xxx changes as the symbols exist in both drivers (sigh..). Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/qla4xxx/ql4_def.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index aeebefb1e9f8..fc233717355f 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -408,9 +408,6 @@ struct qla4_8xxx_legacy_intr_set { }; /* MSI-X Support */ - -#define QLA_MSIX_DEFAULT 0 -#define QLA_MSIX_RSP_Q 1 #define QLA_MSIX_ENTRIES 2 /* -- cgit v1.2.3-59-g8ed1b From e01ea5e2a3f2a127ace8b671a830a5c789f07510 Mon Sep 17 00:00:00 2001 From: Shyam Saini Date: Sat, 24 Dec 2016 16:21:07 +0530 Subject: scsi: lpfc: Replace BUG() with BUG_ON() Replace BUG() with BUG_ON() using coccinelle. Signed-off-by: Shyam Saini Acked-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_els.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 27f0cbb9b278..ede14f1810d2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -8855,8 +8855,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, { struct ls_rjt stat; - if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) - BUG(); + BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); switch (rspiocb->iocb.ulpStatus) { case IOSTAT_NPORT_RJT: -- cgit v1.2.3-59-g8ed1b From f253473699b2e308e48a6e2eacb4391e045410c9 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 12 Jan 2017 14:21:32 +0000 Subject: scsi: be2iscsi: Use GFP_ATOMIC under spin lock A spin lock is taken here so we should use GFP_ATOMIC. Fixes: 987132167f4b ("scsi: be2iscsi: Fix for crash in beiscsi_eh_device_reset") Signed-off-by: Wei Yongjun Reviewed-by: Jitendra Bhivare Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 6372613e7516..c9b9daa91091 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -294,7 +294,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; - inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_KERNEL); + inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); if (!inv_tbl) { spin_unlock_bh(&session->frwd_lock); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, -- cgit v1.2.3-59-g8ed1b From f2a3313d65f0bf6d3fefe8fa40e0208fe003e59e Mon Sep 17 00:00:00 2001 From: John Pittman Date: Thu, 12 Jan 2017 16:17:20 -0500 Subject: scsi: sd: Cleaned up comment references to @sdp argument explanation. In sd.c there are two comment references to 'struct scsi_device *sdp' as an argument. One of the references has a typo and the other should be a reference to 'struct device *dev' instead. Fixed by correcting the typo in the first and changing the explanation in the second. Signed-off-by: John Pittman Signed-off-by: Martin K. Petersen --- drivers/scsi/sd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b1933041da39..52105d8bce5b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -703,7 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) /** * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device - * @sdp: scsi device to operate one + * @sdp: scsi device to operate on * @rq: Request to prepare * * Will issue either UNMAP or WRITE SAME(16) depending on preference @@ -3192,7 +3192,7 @@ static int sd_probe(struct device *dev) * sd_remove - called whenever a scsi disk (previously recognized by * sd_probe) is detached from the system. It is called (potentially * multiple times) during sd module unload. - * @sdp: pointer to mid level scsi device object + * @dev: pointer to device object * * Note: this function is invoked from the scsi mid-level. * This function potentially frees up a device name (e.g. /dev/sdc) -- cgit v1.2.3-59-g8ed1b From e2934ed183b94cff7626a517877ecc9bbe82cdc8 Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 17 Jan 2017 12:31:56 -0800 Subject: scsi: lpfc: Fix lpfc_wwn_set return code check When I reversed the patch to re-add the lpfc_soft_wwn parameter feature, it re-added code that had a long-standing bug. (that's what I get I guess :) As Dan Carpenter pointed out - error checks looked at wrong polarity. 0 is success, -errno is failure. Updated checks. Signed-off-by: James Smart Reported-by: Dan Carpenter Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 6c104d79abe7..50cf402dea29 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2150,7 +2150,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, phba->soft_wwn_enable = 0; rc = lpfc_wwn_set(buf, cnt, wwpn); - if (!rc) { + if (rc) { /* not able to set wwpn, unlock it */ phba->soft_wwn_enable = 1; return rc; @@ -2231,7 +2231,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, return -EINVAL; rc = lpfc_wwn_set(buf, cnt, wwnn); - if (!rc) { + if (rc) { /* Allow wwnn to be set many times, as long as the enable * is set. However, once the wwpn is set, everything locks. */ -- cgit v1.2.3-59-g8ed1b From f2f89c32a0f1512856ffb4569bfdf2f20e42fcab Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 20 Jan 2017 20:45:18 +0800 Subject: scsi: hisi_sas: workaround v2 hw SATA IO timeout issue The v2 SAS controller needs more time to detect channel idle and send setup link request than SATA disk does, so it is difficult for the SAS controller to setup an STP link. Therefore it may cause some IO timeouts. We need to periodically configure the SAS controller so it doesn't receive STP setup requests from SATA disks for a while, so IO can be sent during this period. Signed-off-by: Xiang Chen Signed-off-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 75 +++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 69b0f06d1a96..6c787ebdebee 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -215,6 +215,7 @@ #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define CON_CONTROL (PORT_BASE + 0x118) #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) #define CHL_INT0 (PORT_BASE + 0x1b4) #define CHL_INT0_HOTPLUG_TOUT_OFF 0 @@ -526,6 +527,8 @@ enum { #define SATA_PROTOCOL_FPDMA 0x8 #define SATA_PROTOCOL_ATAPI 0x10 +static void hisi_sas_link_timeout_disable_link(unsigned long data); + static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; @@ -978,6 +981,50 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) upper_32_bits(hisi_hba->initial_fis_dma)); } +static void hisi_sas_link_timeout_enable_link(unsigned long data) +{ + struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + int i, reg_val; + + for (i = 0; i < hisi_hba->n_phy; i++) { + reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); + if (!(reg_val & BIT(0))) { + hisi_sas_phy_write32(hisi_hba, i, + CON_CONTROL, 0x7); + break; + } + } + + hisi_hba->timer.function = hisi_sas_link_timeout_disable_link; + mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); +} + +static void hisi_sas_link_timeout_disable_link(unsigned long data) +{ + struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + int i, reg_val; + + reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); + for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { + if (reg_val & BIT(i)) { + hisi_sas_phy_write32(hisi_hba, i, + CON_CONTROL, 0x6); + break; + } + } + + hisi_hba->timer.function = hisi_sas_link_timeout_enable_link; + mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); +} + +static void set_link_timer_quirk(struct hisi_hba *hisi_hba) +{ + hisi_hba->timer.data = (unsigned long)hisi_hba; + hisi_hba->timer.function = hisi_sas_link_timeout_disable_link; + hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); + add_timer(&hisi_hba->timer); +} + static int hw_init_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = &hisi_hba->pdev->dev; @@ -2020,9 +2067,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != SAS_PHY_UNUSED) + else if (phy->identify.device_type != SAS_PHY_UNUSED) { phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + if (!timer_pending(&hisi_hba->timer)) + set_link_timer_quirk(hisi_hba); + } queue_work(hisi_hba->wq, &phy->phyup_ws); end: @@ -2033,10 +2083,23 @@ end: return res; } +static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) +{ + u32 port_state; + + port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + if (port_state & 0x1ff) + return true; + + return false; +} + static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { int res = 0; u32 phy_state, sl_ctrl, txid_auto; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct hisi_sas_port *port = phy->port; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); @@ -2046,6 +2109,10 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_ctrl & ~SL_CONTROL_CTA_MSK); + if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) + if (!check_any_wideports_v2_hw(hisi_hba) && + timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, @@ -2821,6 +2888,12 @@ static int hisi_sas_v2_probe(struct platform_device *pdev) static int hisi_sas_v2_remove(struct platform_device *pdev) { + struct sas_ha_struct *sha = platform_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + + if (timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); + return hisi_sas_remove(pdev); } -- cgit v1.2.3-59-g8ed1b From c399acfb85974a3ebc0dec0e7fb7b60cb1aeac7b Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 20 Jan 2017 20:45:19 +0800 Subject: scsi: hisi_sas: modify some values of ITCT table Set SMP connection timeout and continue AWT timer; Clear ITCT table when dev gone. Signed-off-by: Xiang Chen Signed-off-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 6c787ebdebee..93fcedaa583f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -334,6 +334,11 @@ #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) #define ITCT_HDR_VLN_OFF 9 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_SMP_TIMEOUT_8US 1 +#define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \ + 250) /* 2ms */ +#define ITCT_HDR_AWT_CONTINUE_OFF 25 #define ITCT_HDR_PORT_ID_OFF 28 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) /* qw2 */ @@ -696,6 +701,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, qw0 |= ((1 << ITCT_HDR_VALID_OFF) | (device->linkrate << ITCT_HDR_MCR_OFF) | (1 << ITCT_HDR_VLN_OFF) | + (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) | + (1 << ITCT_HDR_AWT_CONTINUE_OFF) | (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); @@ -705,7 +712,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, /* qw2 */ if (!dev_is_sata(device)) - itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | + itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | (0x1ULL << ITCT_HDR_BITLT_OFF) | (0x32ULL << ITCT_HDR_MCTLT_OFF) | (0x1ULL << ITCT_HDR_RTOLT_OFF)); @@ -714,7 +721,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, static void free_device_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { - u64 qw0, dev_id = sas_dev->device_id; + u64 dev_id = sas_dev->device_id; struct device *dev = &hisi_hba->pdev->dev; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); @@ -738,8 +745,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, dev_dbg(dev, "got clear ITCT done interrupt\n"); /* invalid the itct state*/ - qw0 = cpu_to_le64(itct->qw0); - qw0 &= ~(1 << ITCT_HDR_VALID_OFF); + memset(itct, 0, sizeof(struct hisi_sas_itct)); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); -- cgit v1.2.3-59-g8ed1b From 87e287c1ebf887acdc2b1b0563b52f0c4449dccb Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 20 Jan 2017 20:45:20 +0800 Subject: scsi: hisi_sas: downgrade refclk message The message to inform that the controller has no refclk is currently at warning level, which is unnecessary, so downgrade to debug. Signed-off-by: John Garry Reviewed-by: Xiang Chen Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 22dba0143bdc..eee7ae2c771f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1453,7 +1453,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, refclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(refclk)) - dev_info(dev, "no ref clk property\n"); + dev_dbg(dev, "no ref clk property\n"); else hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; -- cgit v1.2.3-59-g8ed1b From 0edef7e46c24b86dc51e6ba7e22c3070ced7328c Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 20 Jan 2017 20:45:21 +0800 Subject: scsi: hisi_sas: modify hard reset for directed-attached disk Correctly set registers in v2 for root PHY hardreset for directly attached disk. Signed-off-by: Xiang Chen Signed-off-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 93fcedaa583f..2e776b70be8f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -207,6 +207,8 @@ #define TXID_AUTO (PORT_BASE + 0xb8) #define TXID_AUTO_CT3_OFF 1 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) +#define TX_HARDRST_OFF 2 +#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) @@ -1078,7 +1080,15 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + u32 txid_auto; + stop_phy_v2_hw(hisi_hba, phy_no); + if (phy->identify.device_type == SAS_END_DEVICE) { + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto | TX_HARDRST_MSK); + } msleep(100); start_phy_v2_hw(hisi_hba, phy_no); } -- cgit v1.2.3-59-g8ed1b From 297d73023ac559ea0cb4f1638fc8563b8c763c1e Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 20 Jan 2017 20:45:22 +0800 Subject: scsi: hisi_sas: downgrade internal abort exit print Downgrade the exit print in hisi_sas_internal_task_abort() to dbg level, as info is not required. Signed-off-by: John Garry Reviewed-by: Xiang Chen Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index eee7ae2c771f..b2782ce04542 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1121,7 +1121,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, } exit: - dev_info(dev, "internal task abort: task to dev %016llx task=%p " + dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " "resp: 0x%x sts 0x%x\n", SAS_ADDR(device->sas_addr), task, -- cgit v1.2.3-59-g8ed1b From 0757f041b171d19cab9b6dac186a588ba0260055 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 20 Jan 2017 20:45:23 +0800 Subject: scsi: hisi_sas: fix probe ordering problem There is a potential probe issue in how we trigger the hw initialisation. Although we use 1s timer to delay hw initialisation, there is still a potential that sas_register_ha() is not be finished before we start the PHY init from hw->hw_init(). To avoid this issue, initialise the hw after sas_register_ha() in the same probe context. Note: it is not necessary to use 1s timer now (modified v2 hw only). Signed-off-by: Xiang Chen Signed-off-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 8 ++++---- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 8 ++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index b2782ce04542..8601cec296a8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1552,10 +1552,6 @@ int hisi_sas_probe(struct platform_device *pdev, hisi_sas_init_add(hisi_hba); - rc = hisi_hba->hw->hw_init(hisi_hba); - if (rc) - goto err_out_ha; - rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha; @@ -1564,6 +1560,10 @@ int hisi_sas_probe(struct platform_device *pdev, if (rc) goto err_out_register_ha; + rc = hisi_hba->hw->hw_init(hisi_hba); + if (rc) + goto err_out_register_ha; + scsi_scan_host(shost); return 0; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 2e776b70be8f..9e16f42944bc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1093,9 +1093,8 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) start_phy_v2_hw(hisi_hba, phy_no); } -static void start_phys_v2_hw(unsigned long data) +static void start_phys_v2_hw(struct hisi_hba *hisi_hba) { - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; int i; for (i = 0; i < hisi_hba->n_phy; i++) @@ -1104,10 +1103,7 @@ static void start_phys_v2_hw(unsigned long data) static void phys_init_v2_hw(struct hisi_hba *hisi_hba) { - struct timer_list *timer = &hisi_hba->timer; - - setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba); - mod_timer(timer, jiffies + HZ); + start_phys_v2_hw(hisi_hba); } static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) -- cgit v1.2.3-59-g8ed1b From 13c599069130e5a1f04a9409d65566e149b12708 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 20 Jan 2017 20:45:24 +0800 Subject: scsi: hisi_sas: decrease running_req in hisi_sas_slot_task_free() There is an issue that hisi_sas_dev.running_req is not decremented properly for internal abort and TMF. To resolve, only decrease running_req in hisi_sas_slot_task_free() Signed-off-by: Xiang Chen Signed-off-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 8 ++++---- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 2 -- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 8601cec296a8..53637a941b94 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -71,6 +71,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { struct device *dev = &hisi_hba->pdev->dev; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; if (!slot->task) return; @@ -97,6 +99,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, slot->task = NULL; slot->port = NULL; hisi_sas_slot_index_free(hisi_hba, slot->idx); + if (sas_dev) + atomic64_dec(&sas_dev->running_req); /* slot memory is fully zeroed when it is reused */ } EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); @@ -141,8 +145,6 @@ static void hisi_sas_slot_abort(struct work_struct *work) struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); struct scsi_cmnd *cmnd = task->uldd_task; struct hisi_sas_tmf_task tmf_task; - struct domain_device *device = task->dev; - struct hisi_sas_device *sas_dev = device->lldd_dev; struct scsi_lun lun; struct device *dev = &hisi_hba->pdev->dev; int tag = abort_slot->idx; @@ -165,8 +167,6 @@ out: spin_unlock_irqrestore(&hisi_hba->lock, flags); if (task->task_done) task->task_done(task); - if (sas_dev) - atomic64_dec(&sas_dev->running_req); } static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 9e16f42944bc..1b214450dcb5 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1830,8 +1830,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, } out: - if (sas_dev) - atomic64_dec(&sas_dev->running_req); hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; -- cgit v1.2.3-59-g8ed1b From 090284610632bb32a5d2eea41420ee7c2b6975ca Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: ncr5380: Shorten host info string by removing unused option macros The DIFFERENTIAL and PARITY option macros are unused: no supported hardware uses differential signalling and the core driver never implemented parity checking. These options just waste space in the host info string. While we are here, fix a typo in the NCR5380_info() kernel-doc comment. Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/NCR5380.c | 49 +++++++++---------------------------------------- drivers/scsi/NCR5380.h | 10 +--------- 2 files changed, 10 insertions(+), 49 deletions(-) diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 4f5ca794bb71..f29b407d91d1 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -96,17 +96,6 @@ * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * - * These macros control options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential - * transceivers. - * - * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. - * - * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. - * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register @@ -347,7 +336,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) #endif /** - * NCR58380_info - report driver and host information + * NCR5380_info - report driver and host information * @instance: relevant scsi host instance * * For use as the host template info() handler. @@ -360,33 +349,6 @@ static const char *NCR5380_info(struct Scsi_Host *instance) return hostdata->info; } -static void prepare_info(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - snprintf(hostdata->info, sizeof(hostdata->info), - "%s, irq %d, " - "io_port 0x%lx, base 0x%lx, " - "can_queue %d, cmd_per_lun %d, " - "sg_tablesize %d, this_id %d, " - "flags { %s%s%s}, " - "options { %s} ", - instance->hostt->name, instance->irq, - hostdata->io_port, hostdata->base, - instance->can_queue, instance->cmd_per_lun, - instance->sg_tablesize, instance->this_id, - hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", - hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", - hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", -#ifdef DIFFERENTIAL - "DIFFERENTIAL " -#endif -#ifdef PARITY - "PARITY " -#endif - ""); -} - /** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure @@ -436,7 +398,14 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) if (!hostdata->work_q) return -ENOMEM; - prepare_info(instance); + snprintf(hostdata->info, sizeof(hostdata->info), + "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}", + instance->hostt->name, instance->irq, hostdata->io_port, + hostdata->base, instance->can_queue, instance->cmd_per_lun, + instance->sg_tablesize, instance->this_id, + hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", + hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", + hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : ""); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 51a3567a6fb2..e61d9f9987fa 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -81,11 +81,7 @@ #define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ #define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ -#ifdef DIFFERENTIAL -#define ICR_BASE ICR_DIFF_ENABLE -#else #define ICR_BASE 0 -#endif #define MODE_REG 2 /* @@ -102,11 +98,7 @@ #define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */ #define MR_ARBITRATE 0x01 /* rw start arbitration */ -#ifdef PARITY -#define MR_BASE MR_ENABLE_PAR_CHECK -#else #define MR_BASE 0 -#endif #define TARGET_COMMAND_REG 3 #define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */ @@ -234,7 +226,7 @@ struct NCR5380_hostdata { unsigned char id_higher_mask; /* All bits above id_mask */ unsigned char last_message; /* Last Message Out */ unsigned long region_size; /* Size of address/port range */ - char info[256]; + char info[168]; /* Host banner message */ }; #ifdef __KERNEL__ -- cgit v1.2.3-59-g8ed1b From 9507037304e581493bcd8bf76e013e44be71470e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: ncr5380: Clean up dead code and redundant macro usage Remove dead code inside #if 0 conditionals. Remove the #ifdef __KERNEL__ test, since NCR5380.h has no definitions that relate to userspace code. Remove two redundant macro definitions which were overlooked in commit e9db3198e08b ("sun3_scsi: Adopt NCR5380.c core driver"). Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/NCR5380.h | 7 ------- drivers/scsi/atari_scsi.c | 31 ------------------------------- drivers/scsi/sun3_scsi.c | 3 --- 3 files changed, 41 deletions(-) diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index e61d9f9987fa..d78f0957d865 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -166,11 +166,7 @@ #define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */ #define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */ -#if 0 -#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR -#else #define CSR_BASE CSR_53C80_INTR -#endif /* Note : PHASE_* macros are based on the values of the STATUS register */ #define PHASE_MASK (SR_MSG | SR_CD | SR_IO) @@ -229,8 +225,6 @@ struct NCR5380_hostdata { char info[168]; /* Host banner message */ }; -#ifdef __KERNEL__ - struct NCR5380_cmd { struct list_head list; }; @@ -323,5 +317,4 @@ static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata) return 0; } -#endif /* __KERNEL__ */ #endif /* NCR5380_H */ diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 105b35393ce9..2b6eb7c24c69 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -178,37 +178,6 @@ static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) } -#if 0 -/* Dead code... wasn't called anyway :-) and causes some trouble, because at - * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has - * to clear the DMA int pending bit before it allows other level 6 interrupts. - */ -static void scsi_dma_buserr(int irq, void *dummy) -{ - unsigned char dma_stat = tt_scsi_dma.dma_ctrl; - - /* Don't do anything if a NCR interrupt is pending. Probably it's just - * masked... */ - if (atari_irq_pending(IRQ_TT_MFP_SCSI)) - return; - - printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", - SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); - if (dma_stat & 0x80) { - if (!scsi_dma_is_ignored_buserr(dma_stat)) - printk("SCSI DMA bus error -- bad DMA programming!\n"); - } else { - /* Under normal circumstances we never should get to this point, - * since both interrupts are triggered simultaneously and the 5380 - * int has higher priority. When this irq is handled, that DMA - * interrupt is cleared. So a warning message is printed here. - */ - printk("SCSI DMA intr ?? -- this shouldn't happen!\n"); - } -} -#endif - - static irqreturn_t scsi_tt_intr(int irq, void *dev) { struct Scsi_Host *instance = dev; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 88db6992420e..166f466964c4 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -56,9 +56,6 @@ #define NCR5380_dma_send_setup sun3scsi_dma_count #define NCR5380_dma_residual sun3scsi_dma_residual -#define NCR5380_acquire_dma_irq(instance) (1) -#define NCR5380_release_dma_irq(instance) - #include "NCR5380.h" -- cgit v1.2.3-59-g8ed1b From 14d739f640d894b4b5db1ab3e1df17b865825a42 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: ncr5380: Reduce #include files The NCR5380 wrapper drivers don't export symbols or declarations and don't actually need separate header files. Most of these header files were removed already; only sun3_scsi.h and g_NCR5380.h remain. Move the remaining definitions to the corresponding .c files to improve readability and proximity. The #defines which influence the #included core driver are no longer mixed up with unrelated #defines and #includes. Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/g_NCR5380.c | 45 ++++++++++++++++++++- drivers/scsi/g_NCR5380.h | 56 -------------------------- drivers/scsi/sun3_scsi.c | 80 ++++++++++++++++++++++++++++++++++++- drivers/scsi/sun3_scsi.h | 102 ----------------------------------------------- 4 files changed, 122 insertions(+), 161 deletions(-) delete mode 100644 drivers/scsi/g_NCR5380.h delete mode 100644 drivers/scsi/sun3_scsi.h diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 6f9665d50d84..67c8dac321ad 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -26,14 +26,55 @@ #include #include #include -#include "g_NCR5380.h" -#include "NCR5380.h" #include #include #include #include #include +/* Definitions for the core NCR5380 driver. */ + +#define NCR5380_read(reg) \ + ioread8(hostdata->io + hostdata->offset + (reg)) +#define NCR5380_write(reg, value) \ + iowrite8(value, hostdata->io + hostdata->offset + (reg)) + +#define NCR5380_implementation_fields \ + int offset; \ + int c400_ctl_status; \ + int c400_blk_cnt; \ + int c400_host_buf; \ + int io_width + +#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len +#define NCR5380_dma_recv_setup generic_NCR5380_pread +#define NCR5380_dma_send_setup generic_NCR5380_pwrite +#define NCR5380_dma_residual NCR5380_dma_residual_none + +#define NCR5380_intr generic_NCR5380_intr +#define NCR5380_queue_command generic_NCR5380_queue_command +#define NCR5380_abort generic_NCR5380_abort +#define NCR5380_bus_reset generic_NCR5380_bus_reset +#define NCR5380_info generic_NCR5380_info + +#define NCR5380_io_delay(x) udelay(x) + +#include "NCR5380.h" + +#define DRV_MODULE_NAME "g_NCR5380" + +#define NCR53C400_mem_base 0x3880 +#define NCR53C400_host_buffer 0x3900 +#define NCR53C400_region_size 0x3a00 + +#define BOARD_NCR5380 0 +#define BOARD_NCR53C400 1 +#define BOARD_NCR53C400A 2 +#define BOARD_DTC3181E 3 +#define BOARD_HP_C2502 4 + +#define IRQ_AUTO 254 + #define MAX_CARDS 8 /* old-style parameters for compatibility */ diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h deleted file mode 100644 index 81b22d989648..000000000000 --- a/drivers/scsi/g_NCR5380.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Generic Generic NCR5380 driver defines - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 440-4894 - * - * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin - * K.Lentin@cs.monash.edu.au - */ - -#ifndef GENERIC_NCR5380_H -#define GENERIC_NCR5380_H - -#define DRV_MODULE_NAME "g_NCR5380" - -#define NCR5380_read(reg) \ - ioread8(hostdata->io + hostdata->offset + (reg)) -#define NCR5380_write(reg, value) \ - iowrite8(value, hostdata->io + hostdata->offset + (reg)) - -#define NCR5380_implementation_fields \ - int offset; \ - int c400_ctl_status; \ - int c400_blk_cnt; \ - int c400_host_buf; \ - int io_width; - -#define NCR53C400_mem_base 0x3880 -#define NCR53C400_host_buffer 0x3900 -#define NCR53C400_region_size 0x3a00 - -#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len -#define NCR5380_dma_recv_setup generic_NCR5380_pread -#define NCR5380_dma_send_setup generic_NCR5380_pwrite -#define NCR5380_dma_residual NCR5380_dma_residual_none - -#define NCR5380_intr generic_NCR5380_intr -#define NCR5380_queue_command generic_NCR5380_queue_command -#define NCR5380_abort generic_NCR5380_abort -#define NCR5380_bus_reset generic_NCR5380_bus_reset -#define NCR5380_info generic_NCR5380_info - -#define NCR5380_io_delay(x) udelay(x) - -#define BOARD_NCR5380 0 -#define BOARD_NCR53C400 1 -#define BOARD_NCR53C400A 2 -#define BOARD_DTC3181E 3 -#define BOARD_HP_C2502 4 - -#define IRQ_AUTO 254 - -#endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 166f466964c4..14cebaf10371 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -34,7 +34,6 @@ #include #include -#include "sun3_scsi.h" /* minimum number of bytes to do dma on */ #define DMA_MIN_SIZE 129 @@ -58,6 +57,85 @@ #include "NCR5380.h" +/* dma regs start at regbase + 8, directly after the NCR regs */ +struct sun3_dma_regs { + unsigned short dma_addr_hi; /* vme only */ + unsigned short dma_addr_lo; /* vme only */ + unsigned short dma_count_hi; /* vme only */ + unsigned short dma_count_lo; /* vme only */ + unsigned short udc_data; /* udc dma data reg (obio only) */ + unsigned short udc_addr; /* uda dma addr reg (obio only) */ + unsigned short fifo_data; /* fifo data reg, + * holds extra byte on odd dma reads + */ + unsigned short fifo_count; + unsigned short csr; /* control/status reg */ + unsigned short bpack_hi; /* vme only */ + unsigned short bpack_lo; /* vme only */ + unsigned short ivect; /* vme only */ + unsigned short fifo_count_hi; /* vme only */ +}; + +/* ucd chip specific regs - live in dvma space */ +struct sun3_udc_regs { + unsigned short rsel; /* select regs to load */ + unsigned short addr_hi; /* high word of addr */ + unsigned short addr_lo; /* low word */ + unsigned short count; /* words to be xfer'd */ + unsigned short mode_hi; /* high word of channel mode */ + unsigned short mode_lo; /* low word of channel mode */ +}; + +/* addresses of the udc registers */ +#define UDC_MODE 0x38 +#define UDC_CSR 0x2e /* command/status */ +#define UDC_CHN_HI 0x26 /* chain high word */ +#define UDC_CHN_LO 0x22 /* chain lo word */ +#define UDC_CURA_HI 0x1a /* cur reg A high */ +#define UDC_CURA_LO 0x0a /* cur reg A low */ +#define UDC_CURB_HI 0x12 /* cur reg B high */ +#define UDC_CURB_LO 0x02 /* cur reg B low */ +#define UDC_MODE_HI 0x56 /* mode reg high */ +#define UDC_MODE_LO 0x52 /* mode reg low */ +#define UDC_COUNT 0x32 /* words to xfer */ + +/* some udc commands */ +#define UDC_RESET 0 +#define UDC_CHN_START 0xa0 /* start chain */ +#define UDC_INT_ENABLE 0x32 /* channel 1 int on */ + +/* udc mode words */ +#define UDC_MODE_HIWORD 0x40 +#define UDC_MODE_LSEND 0xc2 +#define UDC_MODE_LRECV 0xd2 + +/* udc reg selections */ +#define UDC_RSEL_SEND 0x282 +#define UDC_RSEL_RECV 0x182 + +/* bits in csr reg */ +#define CSR_DMA_ACTIVE 0x8000 +#define CSR_DMA_CONFLICT 0x4000 +#define CSR_DMA_BUSERR 0x2000 + +#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */ +#define CSR_SDB_INT 0x200 /* sbc interrupt pending */ +#define CSR_DMA_INT 0x100 /* dma interrupt pending */ + +#define CSR_LEFT 0xc0 +#define CSR_LEFT_3 0xc0 +#define CSR_LEFT_2 0x80 +#define CSR_LEFT_1 0x40 +#define CSR_PACK_ENABLE 0x20 + +#define CSR_DMA_ENABLE 0x10 + +#define CSR_SEND 0x8 /* 1 = send 0 = recv */ +#define CSR_FIFO 0x2 /* reset fifo */ +#define CSR_INTR 0x4 /* interrupt enable */ +#define CSR_SCSI 0x1 + +#define VME_DATA24 0x3d00 extern int sun3_map_test(unsigned long, char *); diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h deleted file mode 100644 index d22745fae328..000000000000 --- a/drivers/scsi/sun3_scsi.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) - * - * Sun3 DMA additions by Sam Creasey (sammy@sammy.net) - * - * Adapted from mac_scsinew.h: - */ -/* - * Cumana Generic NCR5380 driver defines - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 440-4894 - */ - -#ifndef SUN3_SCSI_H -#define SUN3_SCSI_H - -/* additional registers - mainly DMA control regs */ -/* these start at regbase + 8 -- directly after the NCR regs */ -struct sun3_dma_regs { - unsigned short dma_addr_hi; /* vme only */ - unsigned short dma_addr_lo; /* vme only */ - unsigned short dma_count_hi; /* vme only */ - unsigned short dma_count_lo; /* vme only */ - unsigned short udc_data; /* udc dma data reg (obio only) */ - unsigned short udc_addr; /* uda dma addr reg (obio only) */ - unsigned short fifo_data; /* fifo data reg, holds extra byte on - odd dma reads */ - unsigned short fifo_count; - unsigned short csr; /* control/status reg */ - unsigned short bpack_hi; /* vme only */ - unsigned short bpack_lo; /* vme only */ - unsigned short ivect; /* vme only */ - unsigned short fifo_count_hi; /* vme only */ -}; - -/* ucd chip specific regs - live in dvma space */ -struct sun3_udc_regs { - unsigned short rsel; /* select regs to load */ - unsigned short addr_hi; /* high word of addr */ - unsigned short addr_lo; /* low word */ - unsigned short count; /* words to be xfer'd */ - unsigned short mode_hi; /* high word of channel mode */ - unsigned short mode_lo; /* low word of channel mode */ -}; - -/* addresses of the udc registers */ -#define UDC_MODE 0x38 -#define UDC_CSR 0x2e /* command/status */ -#define UDC_CHN_HI 0x26 /* chain high word */ -#define UDC_CHN_LO 0x22 /* chain lo word */ -#define UDC_CURA_HI 0x1a /* cur reg A high */ -#define UDC_CURA_LO 0x0a /* cur reg A low */ -#define UDC_CURB_HI 0x12 /* cur reg B high */ -#define UDC_CURB_LO 0x02 /* cur reg B low */ -#define UDC_MODE_HI 0x56 /* mode reg high */ -#define UDC_MODE_LO 0x52 /* mode reg low */ -#define UDC_COUNT 0x32 /* words to xfer */ - -/* some udc commands */ -#define UDC_RESET 0 -#define UDC_CHN_START 0xa0 /* start chain */ -#define UDC_INT_ENABLE 0x32 /* channel 1 int on */ - -/* udc mode words */ -#define UDC_MODE_HIWORD 0x40 -#define UDC_MODE_LSEND 0xc2 -#define UDC_MODE_LRECV 0xd2 - -/* udc reg selections */ -#define UDC_RSEL_SEND 0x282 -#define UDC_RSEL_RECV 0x182 - -/* bits in csr reg */ -#define CSR_DMA_ACTIVE 0x8000 -#define CSR_DMA_CONFLICT 0x4000 -#define CSR_DMA_BUSERR 0x2000 - -#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */ -#define CSR_SDB_INT 0x200 /* sbc interrupt pending */ -#define CSR_DMA_INT 0x100 /* dma interrupt pending */ - -#define CSR_LEFT 0xc0 -#define CSR_LEFT_3 0xc0 -#define CSR_LEFT_2 0x80 -#define CSR_LEFT_1 0x40 -#define CSR_PACK_ENABLE 0x20 - -#define CSR_DMA_ENABLE 0x10 - -#define CSR_SEND 0x8 /* 1 = send 0 = recv */ -#define CSR_FIFO 0x2 /* reset fifo */ -#define CSR_INTR 0x4 /* interrupt enable */ -#define CSR_SCSI 0x1 - -#define VME_DATA24 0x3d00 - -#endif /* SUN3_SCSI_H */ - -- cgit v1.2.3-59-g8ed1b From 4ab2a7878fa56ae7581bf8a94049b355c730aef0 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: ncr5380: Resolve various static checker warnings Avoid various warnings from "make C=1" by annotating a couple of unlock-then-lock sequences, replacing a zero with NULL and correcting some type casts. Also avoid a warning from "make W=1" by adding braces. Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/NCR5380.c | 5 ++++- drivers/scsi/atari_scsi.c | 2 +- drivers/scsi/mac_scsi.c | 8 ++++---- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index f29b407d91d1..518d10180a31 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -591,8 +591,9 @@ static inline void maybe_release_dma_irq(struct Scsi_Host *instance) list_empty(&hostdata->unissued) && list_empty(&hostdata->autosense) && !hostdata->connected && - !hostdata->selecting) + !hostdata->selecting) { NCR5380_release_dma_irq(instance); + } } /** @@ -931,6 +932,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) + __releases(&hostdata->lock) __acquires(&hostdata->lock) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char tmp[3], phase; @@ -1623,6 +1625,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, */ static void NCR5380_information_transfer(struct Scsi_Host *instance) + __releases(&hostdata->lock) __acquires(&hostdata->lock) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char msgout = NOP; diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 2b6eb7c24c69..b2ffab6577a6 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -782,7 +782,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) return -ENOMEM; } atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); - atari_dma_orig_addr = 0; + atari_dma_orig_addr = NULL; } instance = scsi_host_alloc(&atari_scsi_template, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index ccb68d12692c..196acc79714b 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -154,7 +154,7 @@ __asm__ __volatile__ \ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { - unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); + u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; int n = len; int transferred; @@ -257,7 +257,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { unsigned char *s = src; - unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); + u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); int n = len; int transferred; @@ -381,10 +381,10 @@ static int __init mac_scsi_probe(struct platform_device *pdev) hostdata = shost_priv(instance); hostdata->base = pio_mem->start; - hostdata->io = (void *)pio_mem->start; + hostdata->io = (u8 __iomem *)pio_mem->start; if (pdma_mem && setup_use_pdma) - hostdata->pdma_io = (void *)pdma_mem->start; + hostdata->pdma_io = (u8 __iomem *)pdma_mem->start; else host_flags |= FLAG_NO_PSEUDO_DMA; -- cgit v1.2.3-59-g8ed1b From b15e791d0c51e778353777b3dd2993ee0a83388e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: ncr5380: Improve target selection robustness Handle timeout or bus phase change errors that could occur when sending the IDENTIFY message. Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/NCR5380.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 518d10180a31..acc33440bca0 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1165,8 +1165,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); + if (len) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + cmd->result = DID_ERROR << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); + cmd = NULL; + goto out; + } + dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); - /* XXX need to handle errors here */ hostdata->connected = cmd; hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; -- cgit v1.2.3-59-g8ed1b From 546a4d1812b647fddad58a4416c7a44c27bb0499 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 15 Jan 2017 18:50:57 -0500 Subject: scsi: atari_scsi: Reset DMA during bus reset only under ST-DMA lock The atari_scsi driver should not access Falcon DMA chip registers unless it has acquired exclusive access to that chip. If the driver doesn't have exclusive access then there's no need for a DMA reset as there are no scsi commands in progress. Signed-off-by: Finn Thain Tested-by: Ondrej Zary Tested-by: Michael Schmitz Signed-off-by: Martin K. Petersen --- drivers/scsi/atari_scsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index b2ffab6577a6..f792420c533e 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -682,7 +682,8 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = 0; } else { - st_dma.dma_mode_status = 0x90; + if (stdma_is_locked_by(scsi_falcon_intr)) + st_dma.dma_mode_status = 0x90; atari_dma_active = 0; atari_dma_orig_addr = NULL; } -- cgit v1.2.3-59-g8ed1b From 526db94e5de8923eb293ae5e2dd6b19d75e26e4c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 29 Jan 2017 10:12:18 +0100 Subject: scsi: storvsc: remove bogus code to transfer struct scatterlist Remove a piece of code in storvsc_queuecommand that tries to pass the physical address of the kernel struct scatterlist pointer to the host. Fortunately the code can't ever be reached anyway. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: K. Y. Srinivasan Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 888e16ec8554..ceadf6b4ebc1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1635,13 +1635,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) page_to_pfn(sg_page((cur_sgl))); cur_sgl = sg_next(cur_sgl); } - - } else if (scsi_sglist(scmnd)) { - payload->range.len = length; - payload->range.offset = - virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); - payload->range.pfn_array[0] = - virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; } cmd_request->payload = payload; -- cgit v1.2.3-59-g8ed1b From 96b6ce4e8b66b009396799843d6191fefcc36c33 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Mon, 30 Jan 2017 16:05:17 -0600 Subject: scsi: hpsa: remove coalescing settings for ioaccel2 - Setting coalescing has a significant negative impact on low queue-depth performance. - Does not help high queue-depth performance. Signed-off-by: Don Brace Reviewed-by: Scott Benesh Reviewed-by: Scott Teel Reviewed-by: Kevin Barnett Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index cbc0c5fe5a60..4323ddcf9718 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -9263,13 +9263,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) access = SA5_ioaccel_mode1_access; writel(10, &h->cfgtable->HostWrite.CoalIntDelay); writel(4, &h->cfgtable->HostWrite.CoalIntCount); - } else { - if (trans_support & CFGTBL_Trans_io_accel2) { + } else + if (trans_support & CFGTBL_Trans_io_accel2) access = SA5_ioaccel_mode2_access; - writel(10, &h->cfgtable->HostWrite.CoalIntDelay); - writel(4, &h->cfgtable->HostWrite.CoalIntCount); - } - } writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); if (hpsa_wait_for_mode_change_ack(h)) { dev_err(&h->pdev->dev, -- cgit v1.2.3-59-g8ed1b From 6c44c0fe91af7bac78dcaf4c106421862530f499 Mon Sep 17 00:00:00 2001 From: Chaitra P B Date: Mon, 23 Jan 2017 15:26:07 +0530 Subject: scsi: mpt3sas: Added print to notify cable running at a degraded speed. Driver processes the event MPI26_EVENT_ACTIVE_CABLE_DEGRADED when a cable is present and is running at a degraded speed (below the SAS3 12 Gb/s rate). Prints added to inform the user that the cable is not running at optimal speed. Signed-off-by: Chaitra P B Signed-off-by: Suganath Prabu S Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | 2 ++ drivers/scsi/mpt3sas/mpt3sas_scsih.c | 25 +++++++++++++++++-------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index 8bae305bc156..af4be403582e 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -624,6 +624,8 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { /* defines for ReasonCode field */ #define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) /*Hard Reset Received Event data */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b5c966e319d3..8f524a25bbaf 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -8016,15 +8016,24 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: ActiveCableEventData = (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; - if (ActiveCableEventData->ReasonCode == - MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { - pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", - ioc->name, ActiveCableEventData->ReceptacleID); - pr_info("cannot be powered and devices connected to this active cable"); - pr_info("will not be seen. This active cable"); - pr_info("requires %d mW of power", - ActiveCableEventData->ActiveCablePowerRequirement); + switch (ActiveCableEventData->ReasonCode) { + case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: + pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable" + " requires %d mW of power\n", ioc->name, + ActiveCableEventData->ReceptacleID, + ActiveCableEventData->ActiveCablePowerRequirement); + pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected" + " to this active cable will not be seen\n", + ioc->name, ActiveCableEventData->ReceptacleID); + break; + + case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: + pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable", + ioc->name, ActiveCableEventData->ReceptacleID); + pr_notice(" is not running at an optimal speed(12 Gb/s)\n"); + break; } + break; default: /* ignore the rest */ -- cgit v1.2.3-59-g8ed1b From 459325c466d278d3c9f51ddc9bb544c014136fd1 Mon Sep 17 00:00:00 2001 From: Chaitra P B Date: Mon, 23 Jan 2017 15:26:08 +0530 Subject: scsi: mpt3sas: Fix for Crusader to achieve product targets with SAS devices. Small glitch/degraded performance in Crusader is improved with SAS drives by removing unnecessary spinlocks while clearing scsi command in drivers internal lookup table. Signed-off-by: Chaitra P B Signed-off-by: Suganath Prabu S Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_base.c | 1 + drivers/scsi/mpt3sas/mpt3sas_base.h | 1 + drivers/scsi/mpt3sas/mpt3sas_ctl.c | 4 +++- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 31 ++++++++++++++++++++++++++++--- 4 files changed, 33 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index f00ef88a378a..722fab983d2b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5522,6 +5522,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) goto out_free_resources; ioc->non_operational_loop = 0; + ioc->got_task_abort_from_ioctl = 0; return 0; out_free_resources: diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 394fe1338d09..5c1387780cc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -988,6 +988,7 @@ struct MPT3SAS_ADAPTER { u8 broadcast_aen_busy; u16 broadcast_aen_pending; u8 shost_recovery; + u8 got_task_abort_from_ioctl; struct mutex reset_in_progress_mutex; spinlock_t ioc_reset_in_progress_lock; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 95f0f24bac05..02fe1c4aae2f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -826,16 +826,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", ioc->name, le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); - + ioc->got_task_abort_from_ioctl = 1; if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { if (_ctl_set_task_mid(ioc, &karg, tm_request)) { mpt3sas_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; goto out; } } + ioc->got_task_abort_from_ioctl = 0; if (test_bit(device_handle, ioc->device_remove_in_progress)) { dtmprintk(ioc, pr_info(MPT3SAS_FMT diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8f524a25bbaf..b8cd0a1050d1 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1073,6 +1073,26 @@ _scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) return ioc->scsi_lookup[smid - 1].scmd; } +/** + * __scsih_scsi_lookup_get_clear - returns scmd entry without + * holding any lock. + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + * Then will dereference the stored scmd pointer. + */ +static inline struct scsi_cmnd * +__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, + u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + + swap(scmd, ioc->scsi_lookup[smid - 1].scmd); + + return scmd; +} + /** * _scsih_scsi_lookup_get_clear - returns scmd entry * @ioc: per adapter object @@ -1088,8 +1108,7 @@ _scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid) struct scsi_cmnd *scmd; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - scmd = ioc->scsi_lookup[smid - 1].scmd; - ioc->scsi_lookup[smid - 1].scmd = NULL; + scmd = __scsih_scsi_lookup_get_clear(ioc, smid); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return scmd; @@ -4646,7 +4665,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) unsigned long flags; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); - scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + + if (ioc->broadcast_aen_busy || ioc->pci_error_recovery || + ioc->got_task_abort_from_ioctl) + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + else + scmd = __scsih_scsi_lookup_get_clear(ioc, smid); + if (scmd == NULL) return 1; -- cgit v1.2.3-59-g8ed1b From 6b4c335a0f6cc61c69cd24f24e40b118bd9f778a Mon Sep 17 00:00:00 2001 From: Chaitra P B Date: Mon, 23 Jan 2017 15:26:09 +0530 Subject: scsi: mpt3sas: Fix Firmware fault state 0x2100 during heavy 4K RR FIO stress test. Due existence of loop in the IO path our HBA will receive heavy IOs and also as driver is not updating the Reply Post Host Index frequently, So there will be a high chance that our Firmware unable to find any free entry in the Reply Post Descriptor Queue (i.e. Queue overflow occurs) and can observe 0x2100 firmware fault. So to fix this, we have defined a thresh hold value. After continuously processing this thresh hold number of reply descriptors driver will update the Reply Descriptor Host Index so that this thresh hold number of reply descriptors entries will be freed and these entries will be available for firmware and we won't observe this Firmware fault. We have defined this threshold value as 1/3rd of the hba queue depth. Signed-off-by: Chaitra P B Signed-off-by: Suganath Prabu S Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_base.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 722fab983d2b..a3fe1fb55c17 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1040,6 +1040,25 @@ _base_interrupt(int irq, void *bus_id) reply_q->reply_post_free[reply_q->reply_post_host_index]. Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; completed_cmds++; + /* Update the reply post host index after continuously + * processing the threshold number of Reply Descriptors. + * So that FW can find enough entries to post the Reply + * Descriptors in the reply descriptor post queue. + */ + if (completed_cmds > ioc->hba_queue_depth/3) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + completed_cmds = 1; + } if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; if (!reply_q->reply_post_host_index) -- cgit v1.2.3-59-g8ed1b From 7cfa76963f1872461adff2e84edfbaa8e17d189b Mon Sep 17 00:00:00 2001 From: Chaitra P B Date: Mon, 23 Jan 2017 15:26:10 +0530 Subject: scsi: mpt3sas: Updating driver version to v15.100.00.00 Updated driver version to "15.100.00.00" Signed-off-by: Chaitra P B Signed-off-by: Suganath Prabu S Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_base.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 5c1387780cc7..b2f3e24f6bc0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,9 +73,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies " #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "14.101.00.00" -#define MPT3SAS_MAJOR_VERSION 14 -#define MPT3SAS_MINOR_VERSION 101 +#define MPT3SAS_DRIVER_VERSION "15.100.00.00" +#define MPT3SAS_MAJOR_VERSION 15 +#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 -- cgit v1.2.3-59-g8ed1b From 86e6828a8aed250be7d30f678e331b0b4180a892 Mon Sep 17 00:00:00 2001 From: Lukas Herbolt Date: Thu, 26 Jan 2017 10:00:37 +0100 Subject: scsi: scsi_debug: Add OPTIMAL TRANSFER LENGTH GRANULARITY option. [mkp: whitespace fixes] Signed-off-by: Lukas Herbolt Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_debug.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 03051e12a072..17249c3650fe 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -125,6 +125,7 @@ static const char *sdebug_version_date = "20160430"; #define DEF_OPTS 0 #define DEF_OPT_BLKS 1024 #define DEF_PHYSBLK_EXP 0 +#define DEF_OPT_XFERLEN_EXP 0 #define DEF_PTYPE TYPE_DISK #define DEF_REMOVABLE false #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ @@ -590,6 +591,7 @@ static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ static int sdebug_opt_blks = DEF_OPT_BLKS; static int sdebug_opts = DEF_OPTS; static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; +static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP; static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ static int sdebug_scsi_level = DEF_SCSI_LEVEL; static int sdebug_sector_size = DEF_SECTOR_SIZE; @@ -1205,7 +1207,11 @@ static int inquiry_vpd_b0(unsigned char *arr) memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); /* Optimal transfer length granularity */ - gran = 1 << sdebug_physblk_exp; + if (sdebug_opt_xferlen_exp != 0 && + sdebug_physblk_exp < sdebug_opt_xferlen_exp) + gran = 1 << sdebug_opt_xferlen_exp; + else + gran = 1 << sdebug_physblk_exp; put_unaligned_be16(gran, arr + 2); /* Maximum Transfer Length */ @@ -4161,6 +4167,7 @@ module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); +module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO); module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); @@ -4212,6 +4219,7 @@ MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); +MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); -- cgit v1.2.3-59-g8ed1b From d3e19175004583c65361e96da424175b99c3e715 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:15 -0800 Subject: scsi: aacraid: Remove duplicate irq management code Removed duplicate code that for acquiring and releasing irqs Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 58 +++----------------------------------------- 1 file changed, 3 insertions(+), 55 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 3ecbf20ca29f..fd26a2d95239 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1327,35 +1327,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) static void aac_release_resources(struct aac_dev *aac) { - int i; - aac_adapter_disable_int(aac); - if (aac->pdev->device == PMC_DEVICE_S6 || - aac->pdev->device == PMC_DEVICE_S7 || - aac->pdev->device == PMC_DEVICE_S8 || - aac->pdev->device == PMC_DEVICE_S9) { - if (aac->max_msix > 1) { - for (i = 0; i < aac->max_msix; i++) - free_irq(pci_irq_vector(aac->pdev, i), - &(aac->aac_msix[i])); - } else { - free_irq(aac->pdev->irq, &(aac->aac_msix[0])); - } - } else { - free_irq(aac->pdev->irq, aac); - } - if (aac->msi) - pci_disable_msi(aac->pdev); - else if (aac->max_msix > 1) - pci_disable_msix(aac->pdev); - + aac_free_irq(aac); } static int aac_acquire_resources(struct aac_dev *dev) { - int i, j; - int instance = dev->id; - const char *name = dev->name; unsigned long status; /* * First clear out all interrupts. Then enable the one's that we @@ -1377,37 +1354,8 @@ static int aac_acquire_resources(struct aac_dev *dev) if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_ENABLE_MSIX); - if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { - for (i = 0; i < dev->max_msix; i++) { - dev->aac_msix[i].vector_no = i; - dev->aac_msix[i].dev = dev; - - if (request_irq(pci_irq_vector(dev->pdev, i), - dev->a_ops.adapter_intr, - 0, "aacraid", &(dev->aac_msix[i]))) { - printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", - name, instance, i); - for (j = 0 ; j < i ; j++) - free_irq(pci_irq_vector(dev->pdev, j), - &(dev->aac_msix[j])); - pci_disable_msix(dev->pdev); - goto error_iounmap; - } - } - } else { - dev->aac_msix[0].vector_no = 0; - dev->aac_msix[0].dev = dev; - - if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED, "aacraid", - &(dev->aac_msix[0])) < 0) { - if (dev->msi) - pci_disable_msi(dev->pdev); - printk(KERN_ERR "%s%d: Interrupt unavailable.\n", - name, instance); - goto error_iounmap; - } - } + if (aac_acquire_irq(dev)) + goto error_iounmap; aac_adapter_enable_int(dev); -- cgit v1.2.3-59-g8ed1b From 24b043cb6150c21ac02d8edc5c2ba08a0d589ea0 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:16 -0800 Subject: scsi: aacraid: Added aacraid.h include guard Added aacraid.h include guard Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index f059c14efa0c..01b457b91159 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1,3 +1,5 @@ +#ifndef _AACRAID_H_ +#define _AACRAID_H_ #ifndef dprintk # define dprintk(x) #endif @@ -2194,3 +2196,4 @@ extern int aac_commit; extern int update_interval; extern int check_interval; extern int aac_check_reset; +#endif -- cgit v1.2.3-59-g8ed1b From d1ef4da8487fa698ab619a14b8ab6394bb5156ca Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:17 -0800 Subject: scsi: aacraid: added support for init_struct_8 This patch lays the groundwork for supporting the new HBA-1000 controller family.A new INIT structure INIT_STRUCT_8 has been added which allows for a variable size for MSI-x vectors among other things, and is used for both Series-8, HBA-1000 and SmartIOC-2000. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 8 +- drivers/scsi/aacraid/aacraid.h | 100 +++++++++++------ drivers/scsi/aacraid/comminit.c | 239 +++++++++++++++++++++++++--------------- drivers/scsi/aacraid/commsup.c | 13 ++- drivers/scsi/aacraid/linit.c | 2 +- drivers/scsi/aacraid/rkt.c | 2 +- drivers/scsi/aacraid/rx.c | 4 +- drivers/scsi/aacraid/sa.c | 4 +- drivers/scsi/aacraid/src.c | 27 +++-- 9 files changed, 261 insertions(+), 138 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 1ee7c654f7b8..2ad7aeacd092 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1144,7 +1144,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 long ret; aac_fib_init(fib); - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && + !dev->sync_mode) { struct aac_raw_io2 *readcmd2; readcmd2 = (struct aac_raw_io2 *) fib_data(fib); memset(readcmd2, 0, sizeof(struct aac_raw_io2)); @@ -1270,7 +1272,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u long ret; aac_fib_init(fib); - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && + !dev->sync_mode) { struct aac_raw_io2 *writecmd2; writecmd2 = (struct aac_raw_io2 *) fib_data(fib); memset(writecmd2, 0, sizeof(struct aac_raw_io2)); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 01b457b91159..f712d8d6c715 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -81,6 +81,11 @@ enum { #define AAC_DEBUG_INSTRUMENT_AIF_DELETE +/* + * Interrupts + */ +#define AAC_MAX_HRRQ 64 + /* * These macros convert from physical channels to virtual channels */ @@ -491,41 +496,64 @@ enum fib_xfer_state { #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ #define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */ +#define ADAPTER_INIT_STRUCT_REVISION_8 8 // Thor -struct aac_init +union aac_init { - __le32 InitStructRevision; - __le32 Sa_MSIXVectors; - __le32 fsrev; - __le32 CommHeaderAddress; - __le32 FastIoCommAreaAddress; - __le32 AdapterFibsPhysicalAddress; - __le32 AdapterFibsVirtualAddress; - __le32 AdapterFibsSize; - __le32 AdapterFibAlign; - __le32 printfbuf; - __le32 printfbufsiz; - __le32 HostPhysMemPages; /* number of 4k pages of host - physical memory */ - __le32 HostElapsedSeconds; /* number of seconds since 1970. */ - /* - * ADAPTER_INIT_STRUCT_REVISION_4 begins here - */ - __le32 InitFlags; /* flags for supported features */ + struct _r7 { + __le32 init_struct_revision; + __le32 no_of_msix_vectors; + __le32 fsrev; + __le32 comm_header_address; + __le32 fast_io_comm_area_address; + __le32 adapter_fibs_physical_address; + __le32 adapter_fibs_virtual_address; + __le32 adapter_fibs_size; + __le32 adapter_fib_align; + __le32 printfbuf; + __le32 printfbufsiz; + /* number of 4k pages of host phys. mem. */ + __le32 host_phys_mem_pages; + /* number of seconds since 1970. */ + __le32 host_elapsed_seconds; + /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */ + __le32 init_flags; /* flags for supported features */ #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040 #define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080 #define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100 - __le32 MaxIoCommands; /* max outstanding commands */ - __le32 MaxIoSize; /* largest I/O command */ - __le32 MaxFibSize; /* largest FIB to adapter */ - /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ - __le32 MaxNumAif; /* max number of aif */ - /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ - __le32 HostRRQ_AddrLow; - __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ +#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE 0x00000400 + __le32 max_io_commands; /* max outstanding commands */ + __le32 max_io_size; /* largest I/O command */ + __le32 max_fib_size; /* largest FIB to adapter */ + /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ + __le32 max_num_aif; /* max number of aif */ + /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ + /* Host RRQ (response queue) for SRC */ + __le32 host_rrq_addr_low; + __le32 host_rrq_addr_high; + } r7; + struct _r8 { + /* ADAPTER_INIT_STRUCT_REVISION_8 */ + __le32 init_struct_revision; + __le32 rr_queue_count; + __le32 host_elapsed_seconds; /* number of secs since 1970. */ + __le32 init_flags; + __le32 max_io_size; /* largest I/O command */ + __le32 max_num_aif; /* max number of aif */ + __le32 reserved1; + __le32 reserved2; + struct _rrq { + __le32 host_addr_low; + __le32 host_addr_high; + __le16 msix_id; + __le16 element_count; + __le16 comp_thresh; + __le16 unused; + } rrq[1]; /* up to 64 RRQ addresses */ + } r8; }; enum aac_log_level { @@ -729,6 +757,7 @@ struct sa_registers { #define SA_INIT_NUM_MSIXVECTORS 1 +#define SA_MINIPORT_REVISION SA_INIT_NUM_MSIXVECTORS #define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) #define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) @@ -1106,6 +1135,12 @@ struct aac_bus_info_response { #define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) #define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31) +#define AAC_COMM_PRODUCER 0 +#define AAC_COMM_MESSAGE 1 +#define AAC_COMM_MESSAGE_TYPE1 3 +#define AAC_COMM_MESSAGE_TYPE2 4 +#define AAC_COMM_MESSAGE_TYPE3 5 + /* MSIX context */ struct aac_msix_ctx { int vector_no; @@ -1159,8 +1194,11 @@ struct aac_dev resource_size_t base_size, dbg_size; /* Size of * mapped in region */ - - struct aac_init *init; /* Holds initialization info to communicate with adapter */ + /* + * Holds initialization info + * to communicate with adapter + */ + union aac_init *init; dma_addr_t init_pa; /* Holds physical address of the init struct */ u32 *host_rrq; /* response queue @@ -1229,10 +1267,6 @@ struct aac_dev u8 needs_dac; u8 raid_scsi_mode; u8 comm_interface; -# define AAC_COMM_PRODUCER 0 -# define AAC_COMM_MESSAGE 1 -# define AAC_COMM_MESSAGE_TYPE1 3 -# define AAC_COMM_MESSAGE_TYPE2 4 u8 raw_io_interface; u8 raw_io_64; u8 printf_enabled; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 4f56b1003cc7..480ff01caf73 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -68,104 +68,167 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co unsigned long size, align; const unsigned long fibsize = dev->max_fib_size; const unsigned long printfbufsiz = 256; - unsigned long host_rrq_size = 0; - struct aac_init *init; + unsigned long host_rrq_size, aac_init_size; + union aac_init *init; dma_addr_t phys; unsigned long aac_max_hostphysmempages; - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || - dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) - host_rrq_size = (dev->scsi_host_ptr->can_queue - + AAC_NUM_MGT_FIB) * sizeof(u32); - size = fibsize + sizeof(struct aac_init) + commsize + - commalign + printfbufsiz + host_rrq_size; - + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) + host_rrq_size = + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + * sizeof(u32); + else + host_rrq_size = 0; + + aac_init_size = sizeof(union aac_init); + size = fibsize + aac_init_size + commsize + commalign + + printfbufsiz + host_rrq_size; + base = pci_alloc_consistent(dev->pdev, size, &phys); - if(base == NULL) - { + if (base == NULL) { printk(KERN_ERR "aacraid: unable to create mapping.\n"); return 0; } + dev->comm_addr = (void *)base; dev->comm_phys = phys; dev->comm_size = size; - - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || - dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) { dev->host_rrq = (u32 *)(base + fibsize); dev->host_rrq_pa = phys + fibsize; memset(dev->host_rrq, 0, host_rrq_size); } - dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); + dev->init = (union aac_init *)(base + fibsize + host_rrq_size); dev->init_pa = phys + fibsize + host_rrq_size; init = dev->init; - init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); - if (dev->max_fib_size != sizeof(struct hw_fib)) - init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); - init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS); - init->fsrev = cpu_to_le32(dev->fsrev); + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + int i; + u64 addr; + + init->r8.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8); + init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_DRIVER_USES_UTC_TIME | + INITFLAGS_DRIVER_SUPPORTS_PM); + init->r8.init_flags |= + cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE); + init->r8.rr_queue_count = cpu_to_le32(dev->max_msix); + init->r8.max_io_size = + cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); + init->r8.max_num_aif = init->r8.reserved1 = + init->r8.reserved2 = 0; + + for (i = 0; i < dev->max_msix; i++) { + addr = (u64)dev->host_rrq_pa + dev->vector_cap * i * + sizeof(u32); + init->r8.rrq[i].host_addr_high = cpu_to_le32( + upper_32_bits(addr)); + init->r8.rrq[i].host_addr_low = cpu_to_le32( + lower_32_bits(addr)); + init->r8.rrq[i].msix_id = i; + init->r8.rrq[i].element_count = cpu_to_le16( + (u16)dev->vector_cap); + init->r8.rrq[i].comp_thresh = + init->r8.rrq[i].unused = 0; + } - /* - * Adapter Fibs are the first thing allocated so that they - * start page aligned - */ - dev->aif_base_va = (struct hw_fib *)base; - - init->AdapterFibsVirtualAddress = 0; - init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); - init->AdapterFibsSize = cpu_to_le32(fibsize); - init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); - /* - * number of 4k pages of host physical memory. The aacraid fw needs - * this number to be less than 4gb worth of pages. New firmware doesn't - * have any issues with the mapping system, but older Firmware did, and - * had *troubles* dealing with the math overloading past 32 bits, thus - * we must limit this field. - */ - aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12; - if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) - init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages); - else - init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); - - init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | - INITFLAGS_DRIVER_SUPPORTS_PM); - init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); - init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); - init->MaxFibSize = cpu_to_le32(dev->max_fib_size); - init->MaxNumAif = cpu_to_le32(dev->max_num_aif); - - if (dev->comm_interface == AAC_COMM_MESSAGE) { - init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); - dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); - } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { - init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); - init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | - INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); - init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); - init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); - dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n")); - } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { - init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); - init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | - INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); - init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); - init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); - /* number of MSI-X */ - init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); - dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n")); + pr_warn("aacraid: Comm Interface type3 enabled\n"); + } else { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); + if (dev->max_fib_size != sizeof(struct hw_fib)) + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); + init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION); + init->r7.fsrev = cpu_to_le32(dev->fsrev); + + /* + * Adapter Fibs are the first thing allocated so that they + * start page aligned + */ + dev->aif_base_va = (struct hw_fib *)base; + + init->r7.adapter_fibs_virtual_address = 0; + init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys); + init->r7.adapter_fibs_size = cpu_to_le32(fibsize); + init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib)); + + /* + * number of 4k pages of host physical memory. The aacraid fw + * needs this number to be less than 4gb worth of pages. New + * firmware doesn't have any issues with the mapping system, but + * older Firmware did, and had *troubles* dealing with the math + * overloading past 32 bits, thus we must limit this field. + */ + aac_max_hostphysmempages = + dma_get_required_mask(&dev->pdev->dev) >> 12; + if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) + init->r7.host_phys_mem_pages = + cpu_to_le32(aac_max_hostphysmempages); + else + init->r7.host_phys_mem_pages = + cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); + + init->r7.init_flags = + cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | + INITFLAGS_DRIVER_SUPPORTS_PM); + init->r7.max_io_commands = + cpu_to_le32(dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB); + init->r7.max_io_size = + cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); + init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size); + init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif); + + if (dev->comm_interface == AAC_COMM_MESSAGE) { + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); + pr_warn("aacraid: Comm Interface enabled\n"); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | + INITFLAGS_FAST_JBOD_SUPPORTED); + init->r7.host_rrq_addr_high = + cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); + init->r7.host_rrq_addr_low = + cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); + pr_warn("aacraid: Comm Interface type1 enabled\n"); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + init->r7.init_struct_revision = + cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); + init->r7.init_flags |= + cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | + INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | + INITFLAGS_FAST_JBOD_SUPPORTED); + init->r7.host_rrq_addr_high = + cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); + init->r7.host_rrq_addr_low = + cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); + init->r7.no_of_msix_vectors = + cpu_to_le32(dev->max_msix); + /* must be the COMM_PREFERRED_SETTINGS values */ + pr_warn("aacraid: Comm Interface type2 enabled\n"); + } } /* * Increment the base address by the amount already used */ - base = base + fibsize + host_rrq_size + sizeof(struct aac_init); + base = base + fibsize + host_rrq_size + aac_init_size; phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + - sizeof(struct aac_init)); + aac_init_size); /* * Align the beginning of Headers to commalign @@ -177,7 +240,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co * Fill in addresses of the Comm Area Headers and Queues */ *commaddr = base; - init->CommHeaderAddress = cpu_to_le32((u32)phys); + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) + init->r7.comm_header_address = cpu_to_le32((u32)phys); /* * Increment the base address by the size of the CommArea */ @@ -187,12 +251,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co * Place the Printf buffer area after the Fast I/O comm area. */ dev->printfbuf = (void *)base; - init->printfbuf = cpu_to_le32(phys); - init->printfbufsiz = cpu_to_le32(printfbufsiz); + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) { + init->r7.printfbuf = cpu_to_le32(phys); + init->r7.printfbufsiz = cpu_to_le32(printfbufsiz); + } memset(base, 0, printfbufsiz); return 1; } - + static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) { atomic_set(&q->numpending, 0); @@ -436,26 +502,27 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, - status+0, status+1, status+2, status+3, NULL)) && - (status[0] == 0x00000001)) { + status+0, status+1, status+2, status+3, status+4)) && + (status[0] == 0x00000001)) { dev->doorbell_mask = status[3]; - if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) + if (status[1] & AAC_OPT_NEW_COMM_64) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; if (dev->a_ops.adapter_comm && - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) { + (status[1] & AAC_OPT_NEW_COMM)) { dev->comm_interface = AAC_COMM_MESSAGE; dev->raw_io_interface = 1; - if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) { + if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) { /* driver supports TYPE1 (Tupelo) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; - } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { - /* driver supports TYPE2 (Denali) */ + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) { + /* driver supports TYPE2 (Denali, Yosemite) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; - } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) || - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) { - /* driver doesn't TYPE3 and TYPE4 */ - /* switch to sync. mode */ + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) { + /* driver supports TYPE3 (Yosemite, Thor) */ + dev->comm_interface = AAC_COMM_MESSAGE_TYPE3; + } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) { + /* not supported TYPE - switch to sync. mode */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; dev->sync_mode = 1; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 9e7551fe4b19..d18ed9ad45b3 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -129,11 +129,14 @@ int aac_fib_setup(struct aac_dev * dev) struct hw_fib *hw_fib; dma_addr_t hw_fib_pa; int i; + u32 max_cmds; while (((i = fib_map_alloc(dev)) == -ENOMEM) && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { - dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); - dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; + max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1; + dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) + dev->init->r7.max_io_commands = cpu_to_le32(max_cmds); } if (i<0) return -ENOMEM; @@ -761,7 +764,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) unsigned long qflags; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || - dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { kfree(hw_fib); return 0; } @@ -1817,7 +1821,8 @@ int aac_command_thread(void *data) * and pre-allocate a set of fibs outside the * lock. */ - num = le32_to_cpu(dev->init->AdapterFibsSize) + num = le32_to_cpu(dev->init-> + r7.adapter_fibs_size) / sizeof(struct hw_fib); /* some extra */ spin_lock_irqsave(&dev->fib_lock, flagv); entry = dev->fib_list.next; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index fd26a2d95239..0f3de1ee0e62 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1368,7 +1368,7 @@ static int aac_acquire_resources(struct aac_dev *dev) /* After EEH recovery or suspend resume, max_msix count * may change, therfore updating in init as well. */ - dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); + dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix); aac_adapter_start(dev); } return 0; diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 7d8013feedde..6dff8ada4935 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -60,7 +60,7 @@ static int aac_rkt_select_comm(struct aac_dev *dev, int comm) * case warrants this half baked, but convenient, check here. */ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { - dev->init->MaxIoCommands = + dev->init->r7.max_io_commands = cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; } diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index ac1638069335..67213b95b8b6 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -315,10 +315,10 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) static void aac_rx_start_adapter(struct aac_dev *dev) { - struct aac_init *init; + union aac_init *init; init = dev->init; - init->HostElapsedSeconds = cpu_to_le32(get_seconds()); + init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); // We can only use a 32 bit address here rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 869aea23c041..b8538e017e10 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -245,12 +245,12 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev) static void aac_sa_start_adapter(struct aac_dev *dev) { - struct aac_init *init; + union aac_init *init; /* * Fill in the remaining pieces of the init. */ init = dev->init; - init->HostElapsedSeconds = cpu_to_le32(get_seconds()); + init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); /* We can only use a 32 bit address here */ sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 0c453880f214..a5f7a6f2f3d7 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -384,7 +384,7 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) static void aac_src_start_adapter(struct aac_dev *dev) { - struct aac_init *init; + union aac_init *init; int i; /* reset host_rrq_idx first */ @@ -395,11 +395,22 @@ static void aac_src_start_adapter(struct aac_dev *dev) dev->fibs_pushed_no = 0; init = dev->init; - init->HostElapsedSeconds = cpu_to_le32(get_seconds()); + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds()); + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, + (u32)(ulong)dev->init_pa, + (u32)((ulong)dev->init_pa>>32), + sizeof(struct _r8) + + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), + 0, 0, 0, NULL, NULL, NULL, NULL, NULL); + } else { + init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); + // We can only use a 32 bit address here + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, + (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); + } - /* We can only use a 32 bit address here */ - src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, - 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } /** @@ -467,7 +478,8 @@ static int aac_src_deliver_message(struct fib *fib) atomic_inc(&dev->rrq_outstanding[vector_no]); - if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) { /* Calculate the amount to the fibsize bits */ fibsize = (hdr_size + 127) / 128 - 1; if (fibsize > (ALIGN32 - 1)) @@ -897,7 +909,8 @@ int aac_srcv_init(struct aac_dev *dev) if (aac_init_adapter(dev) == NULL) goto error_iounmap; - if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) + if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) && + (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)) goto error_iounmap; if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_ENABLE_MSIX); -- cgit v1.2.3-59-g8ed1b From d503e2fde2b6cea168cf1151b2ab52df3f47be88 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:18 -0800 Subject: scsi: aacraid: Added sa firmware support sa_firmware adds the capability to differentiate the new SmartIOC family of adapters from the series 8 and below. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 4 ++ drivers/scsi/aacraid/comminit.c | 98 +++++++++++++++++------------------------ drivers/scsi/aacraid/linit.c | 2 +- 3 files changed, 45 insertions(+), 59 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index f712d8d6c715..1069c010f06f 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1130,6 +1130,7 @@ struct aac_bus_info_response { #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) +#define AAC_OPT_EXTENDED cpu_to_le32(1<<23) #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) #define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29) #define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) @@ -1141,6 +1142,8 @@ struct aac_bus_info_response { #define AAC_COMM_MESSAGE_TYPE2 4 #define AAC_COMM_MESSAGE_TYPE3 5 +#define AAC_EXTOPT_SA_FIRMWARE cpu_to_le32(1<<1) + /* MSIX context */ struct aac_msix_ctx { int vector_no; @@ -1272,6 +1275,7 @@ struct aac_dev u8 printf_enabled; u8 in_reset; u8 msi; + u8 sa_firmware; int management_fib_count; spinlock_t manage_lock; spinlock_t sync_lock; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 480ff01caf73..5aad018bd4a1 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -75,14 +75,22 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || - (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) + (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && + !dev->sa_firmware)) { host_rrq_size = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) * sizeof(u32); - else + aac_init_size = sizeof(union aac_init); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && + dev->sa_firmware) { + host_rrq_size = (dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX; + aac_init_size = sizeof(union aac_init) + + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq); + } else { host_rrq_size = 0; - - aac_init_size = sizeof(union aac_init); + aac_init_size = sizeof(union aac_init); + } size = fibsize + aac_init_size + commsize + commalign + printfbufsiz + host_rrq_size; @@ -466,9 +474,13 @@ void aac_define_int_mode(struct aac_dev *dev) if (dev->max_msix > msi_count) dev->max_msix = msi_count; } - dev->vector_cap = - (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) / - msi_count; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware) + dev->vector_cap = dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB; + else + dev->vector_cap = (dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) / msi_count; + } struct aac_dev *aac_init_adapter(struct aac_dev *dev) { @@ -527,6 +539,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->sync_mode = 1; } } + if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && + (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) + dev->sa_firmware = 1; + else + dev->sa_firmware = 0; + if ((dev->comm_interface == AAC_COMM_MESSAGE) && (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); @@ -563,61 +581,25 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->sg_tablesize = status[2] & 0xFFFF; if (dev->pdev->device == PMC_DEVICE_S7 || dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) - host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) : - (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB; - else - host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; + dev->pdev->device == PMC_DEVICE_S9) { + if (host->can_queue > (status[3] >> 16) - + AAC_NUM_MGT_FIB) + host->can_queue = (status[3] >> 16) - + AAC_NUM_MGT_FIB; + } else if (host->can_queue > (status[3] & 0xFFFF) - + AAC_NUM_MGT_FIB) + host->can_queue = (status[3] & 0xFFFF) - + AAC_NUM_MGT_FIB; + dev->max_num_aif = status[4] & 0xFFFF; - /* - * NOTE: - * All these overrides are based on a fixed internal - * knowledge and understanding of existing adapters, - * acbsize should be set with caution. - */ - if (acbsize == 512) { - host->max_sectors = AAC_MAX_32BIT_SGBCOUNT; - dev->max_fib_size = 512; - dev->sg_tablesize = host->sg_tablesize - = (512 - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgentry)) - / sizeof(struct sgentry); - host->can_queue = AAC_NUM_IO_FIB; - } else if (acbsize == 2048) { - host->max_sectors = 512; - dev->max_fib_size = 2048; - host->sg_tablesize = 65; - dev->sg_tablesize = 81; - host->can_queue = 512 - AAC_NUM_MGT_FIB; - } else if (acbsize == 4096) { - host->max_sectors = 1024; - dev->max_fib_size = 4096; - host->sg_tablesize = 129; - dev->sg_tablesize = 166; - host->can_queue = 256 - AAC_NUM_MGT_FIB; - } else if (acbsize == 8192) { - host->max_sectors = 2048; - dev->max_fib_size = 8192; - host->sg_tablesize = 257; - dev->sg_tablesize = 337; - host->can_queue = 128 - AAC_NUM_MGT_FIB; - } else if (acbsize > 0) { - printk("Illegal acbsize=%d ignored\n", acbsize); - } } - { - - if (numacb > 0) { - if (numacb < host->can_queue) - host->can_queue = numacb; - else - printk("numacb=%d ignored\n", numacb); - } + if (numacb > 0) { + if (numacb < host->can_queue) + host->can_queue = numacb; + else + pr_warn("numacb=%d ignored\n", numacb); } - if (host->can_queue > AAC_NUM_IO_FIB) - host->can_queue = AAC_NUM_IO_FIB; - if (dev->pdev->device == PMC_DEVICE_S6 || dev->pdev->device == PMC_DEVICE_S7 || dev->pdev->device == PMC_DEVICE_S8 || diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0f3de1ee0e62..1912e7b4a796 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1285,7 +1285,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else shost->this_id = shost->max_id; - if (aac_drivers[index].quirks & AAC_QUIRK_SRC) + if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC) aac_intr_normal(aac, 0, 2, 0, NULL); /* -- cgit v1.2.3-59-g8ed1b From c83b11e31cf9151974dd78e97af31c0fd07927cb Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:19 -0800 Subject: scsi: aacraid: Retrieve and update the device types This patch adds support to retrieve the type of each adapter connected device. Applicable to HBA1000 and SmartIOC2000 products Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 144 ++++++++++++++++++++++++++++++++++++++++- drivers/scsi/aacraid/aacraid.h | 60 ++++++++++++++++- 2 files changed, 202 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 2ad7aeacd092..bd5c80f1b76d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1509,11 +1509,141 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) return aac_scsi_32(fib, cmd); } +/** + * aac_update hba_map()- update current hba map with data from FW + * @dev: aac_dev structure + * @phys_luns: FW information from report phys luns + * + * Update our hba map with the information gathered from the FW + */ +void aac_update_hba_map(struct aac_dev *dev, + struct aac_ciss_phys_luns_resp *phys_luns) +{ + /* ok and extended reporting */ + u32 lun_count, nexus; + u32 i, bus, target; + u8 expose_flag, attribs; + u8 devtype; + + lun_count = ((phys_luns->list_length[0] << 24) + + (phys_luns->list_length[1] << 16) + + (phys_luns->list_length[2] << 8) + + (phys_luns->list_length[3])) / 24; + + for (i = 0; i < lun_count; ++i) { + + bus = phys_luns->lun[i].level2[1] & 0x3f; + target = phys_luns->lun[i].level2[0]; + expose_flag = phys_luns->lun[i].bus >> 6; + attribs = phys_luns->lun[i].node_ident[9]; + nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]); + + if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS) + continue; + + dev->hba_map[bus][target].expose = expose_flag; + + if (expose_flag != 0) { + devtype = AAC_DEVTYPE_RAID_MEMBER; + goto update_devtype; + } + + if (nexus != 0 && (attribs & 8)) { + devtype = AAC_DEVTYPE_NATIVE_RAW; + dev->hba_map[bus][target].rmw_nexus = + nexus; + } else + devtype = AAC_DEVTYPE_ARC_RAW; + + if (devtype != AAC_DEVTYPE_NATIVE_RAW) + goto update_devtype; + +update_devtype: + dev->hba_map[bus][target].devtype = devtype; + } +} + +/** + * aac_report_phys_luns() Process topology change + * @dev: aac_dev structure + * @fibptr: fib pointer + * + * Execute a CISS REPORT PHYS LUNS and process the results into + * the current hba_map. + */ +int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr) +{ + int fibsize, datasize; + struct aac_ciss_phys_luns_resp *phys_luns; + struct aac_srb *srbcmd; + struct sgmap64 *sg64; + dma_addr_t addr; + u32 vbus, vid; + u32 rcode = 0; + + /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ + fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + + sizeof(struct sgentry64); + datasize = sizeof(struct aac_ciss_phys_luns_resp) + + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); + + phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent( + dev->pdev, datasize, &addr); + + if (phys_luns == NULL) { + rcode = -ENOMEM; + goto err_out; + } + + vbus = (u32) le16_to_cpu( + dev->supplement_adapter_info.VirtDeviceBus); + vid = (u32) le16_to_cpu( + dev->supplement_adapter_info.VirtDeviceTarget); + + aac_fib_init(fibptr); + + srbcmd = (struct aac_srb *) fib_data(fibptr); + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(vbus); + srbcmd->id = cpu_to_le32(vid); + srbcmd->lun = 0; + srbcmd->flags = cpu_to_le32(SRB_DataIn); + srbcmd->timeout = cpu_to_le32(10); + srbcmd->retry_limit = 0; + srbcmd->cdb_size = cpu_to_le32(12); + srbcmd->count = cpu_to_le32(datasize); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS; + srbcmd->cdb[1] = 2; /* extended reporting */ + srbcmd->cdb[8] = (u8)(datasize >> 8); + srbcmd->cdb[9] = (u8)(datasize); + + sg64 = (struct sgmap64 *) &srbcmd->sg; + sg64->count = cpu_to_le32(1); + sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr)); + sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr)); + sg64->sg[0].count = cpu_to_le32(datasize); + + rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, + FsaNormal, 1, 1, NULL, NULL); + + /* analyse data */ + if (rcode >= 0 && phys_luns->resp_flag == 2) { + /* ok and extended reporting */ + aac_update_hba_map(dev, phys_luns); + } + + pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr); +err_out: + return rcode; +} + int aac_get_adapter_info(struct aac_dev* dev) { struct fib* fibptr; int rcode; - u32 tmp; + u32 tmp, bus, target; struct aac_adapter_info *info; struct aac_bus_info *command; struct aac_bus_info_response *bus_info; @@ -1544,6 +1674,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } memcpy(&dev->adapter_info, info, sizeof(*info)); + dev->supplement_adapter_info.VirtDeviceBus = 0xffff; if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { struct aac_supplement_adapter_info * sinfo; @@ -1571,6 +1702,11 @@ int aac_get_adapter_info(struct aac_dev* dev) } + /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */ + for (bus = 0; bus < AAC_MAX_BUSES; bus++) { + for (target = 0; target < AAC_MAX_TARGETS; target++) + dev->hba_map[bus][target].devtype = 0; + } /* * GetBusInfo @@ -1603,6 +1739,12 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); } + if (!dev->sync_mode && dev->sa_firmware && + dev->supplement_adapter_info.VirtDeviceBus != 0xffff) { + /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ + rcode = aac_report_phys_luns(dev, fibptr); + } + if (!dev->in_reset) { char buffer[16]; tmp = le32_to_cpu(dev->adapter_info.kernelrev); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1069c010f06f..00df610993a3 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -81,6 +81,27 @@ enum { #define AAC_DEBUG_INSTRUMENT_AIF_DELETE +#define AAC_MAX_NATIVE_TARGETS 1024 +/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */ +#define AAC_MAX_BUSES 5 +#define AAC_MAX_TARGETS 256 +#define AAC_MAX_NATIVE_SIZE 2048 + +#define CISS_REPORT_PHYSICAL_LUNS 0xc3 + +struct aac_ciss_phys_luns_resp { + u8 list_length[4]; /* LUN list length (N-7, big endian) */ + u8 resp_flag; /* extended response_flag */ + u8 reserved[3]; + struct _ciss_lun { + u8 tid[3]; /* Target ID */ + u8 bus; /* Bus, flag (bits 6,7) */ + u8 level3[2]; + u8 level2[2]; + u8 node_ident[16]; /* phys. node identifier */ + } lun[1]; /* List of phys. devices */ +}; + /* * Interrupts */ @@ -993,6 +1014,20 @@ struct fib { dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ }; +#define AAC_DEVTYPE_RAID_MEMBER 1 +#define AAC_DEVTYPE_ARC_RAW 2 +#define AAC_DEVTYPE_NATIVE_RAW 3 +#define AAC_EXPOSE_DISK 0 +#define AAC_HIDE_DISK 3 + +struct aac_hba_map_info { + __le32 rmw_nexus; /* nexus for native HBA devices */ + u8 devtype; /* device type */ + u8 reset_state; /* 0 - no reset, 1..x - */ + /* after xth TM LUN reset */ + u8 expose; /*checks if to expose or not*/ +}; + /* * Adapter Information Block * @@ -1056,7 +1091,28 @@ struct aac_supplement_adapter_info /* StructExpansion == 1 */ __le32 FeatureBits3; __le32 SupportedPerformanceModes; - __le32 ReservedForFutureGrowth[80]; + u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */ + u8 HostBusWidth; /* actual width in bits or links */ + u16 HostBusSpeed; /* actual bus speed/link rate in MHz */ + u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */ + u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */ + + u8 CpldVerLoaded; + u8 CpldVerInFlash; + + __le64 MaxRRCCapacity; + __le32 CompiledMaxHistLogLevel; + u8 CustomBoardName[12]; + u16 SupportedCntlrMode; /* identify supported controller mode */ + u16 ReservedForFuture16; + __le32 SupportedOptions3; /* reserved for future options */ + + __le16 VirtDeviceBus; /* virt. SCSI device for Thor */ + __le16 VirtDeviceTarget; + __le16 VirtDeviceLUN; + __le16 Unused; + __le32 ReservedForFutureGrowth[68]; + }; #define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) #define AAC_FEATURE_JBOD cpu_to_le32(0x08000000) @@ -1287,6 +1343,7 @@ struct aac_dev u32 vector_cap; /* MSI-X vector capab.*/ int msi_enabled; /* MSI/MSI-X enabled */ struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ + struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS]; u8 adapter_shutdown; u32 handle_pci_error; }; @@ -2171,6 +2228,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) int aac_acquire_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev); +int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr); const char *aac_driverinfo(struct Scsi_Host *); void aac_fib_vector_assign(struct aac_dev *dev); struct fib *aac_fib_alloc(struct aac_dev *dev); -- cgit v1.2.3-59-g8ed1b From c4e2fbca374b9797276061840dc95708adf512ed Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:20 -0800 Subject: scsi: aacraid: Reworked scsi command submission path Moved the READ and WRITE switch cases to the top. Added a default case to the switch case and replaced duplicate scsi result value with a macro. The idea is that since most of scsi commands we care about performance wise are read or write, we need to process them first. Internally the compiler (GCC) converts a switch case into either a jump table or a bunch of if else conditions, so placing the often used read, write cases at the top is an effort in optimization. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 172 +++++++++++++++++------------------------- 1 file changed, 70 insertions(+), 102 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index bd5c80f1b76d..8d8e6c2f9691 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -106,6 +106,8 @@ #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 #define ASENCODE_OVERLAPPED_COMMAND 0x00 +#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD) + #define BYTE0(x) (unsigned char)(x) #define BYTE1(x) (unsigned char)((x) >> 8) #define BYTE2(x) (unsigned char)((x) >> 16) @@ -2476,8 +2478,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) if((cid >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)) { scsicmd->result = DID_NO_CONNECT << 16; - scsicmd->scsi_done(scsicmd); - return 0; + goto scsi_done_ret; } /* @@ -2512,8 +2513,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) return aac_send_srb_fib(scsicmd); } else { scsicmd->result = DID_NO_CONNECT << 16; - scsicmd->scsi_done(scsicmd); - return 0; + goto scsi_done_ret; } } } @@ -2531,13 +2531,34 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); - scsicmd->scsi_done(scsicmd); - return 0; + goto scsi_done_ret; } - - /* Handle commands here that don't really require going out to the adapter */ switch (scsicmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + if (dev->in_reset) + return -1; + return aac_read(scsicmd); + + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + if (dev->in_reset) + return -1; + return aac_write(scsicmd); + + case SYNCHRONIZE_CACHE: + if (((aac_cache & 6) == 6) && dev->cache_protected) { + scsicmd->result = AAC_STAT_GOOD; + break; + } + /* Issue FIB to tell Firmware to flush it's cache */ + if ((aac_cache & 6) != 2) + return aac_synchronize(scsicmd); case INQUIRY: { struct inquiry_data inq_data; @@ -2560,8 +2581,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) arr[1] = scsicmd->cmnd[2]; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + scsicmd->result = AAC_STAT_GOOD; } else if (scsicmd->cmnd[2] == 0x80) { /* unit serial number page */ arr[3] = setinqserial(dev, &arr[4], @@ -2572,8 +2592,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) if (aac_wwn != 2) return aac_get_container_serial( scsicmd); - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + scsicmd->result = AAC_STAT_GOOD; } else if (scsicmd->cmnd[2] == 0x83) { /* vpd page 0x83 - Device Identification Page */ char *sno = (char *)&inq_data; @@ -2582,8 +2601,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) if (aac_wwn != 2) return aac_get_container_serial( scsicmd); - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + scsicmd->result = AAC_STAT_GOOD; } else { /* vpd page not implemented */ scsicmd->result = DID_OK << 16 | @@ -2598,8 +2616,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); } - scsicmd->scsi_done(scsicmd); - return 0; + break; } inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ @@ -2615,9 +2632,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; } if (dev->in_reset) return -1; @@ -2665,10 +2681,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; } case READ_CAPACITY: @@ -2693,11 +2707,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | - SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; } case MODE_SENSE: @@ -2775,10 +2786,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) scsi_sg_copy_from_buffer(scsicmd, (char *)&mpd, mode_buf_length); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; } case MODE_SENSE_10: { @@ -2854,18 +2863,17 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) (char *)&mpd10, mode_buf_length); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; } case REQUEST_SENSE: dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); - memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data)); - memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data)); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - return 0; + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + sizeof(struct sense_data)); + memset(&dev->fsa_dev[cid].sense_data, 0, + sizeof(struct sense_data)); + scsicmd->result = AAC_STAT_GOOD; + break; case ALLOW_MEDIUM_REMOVAL: dprintk((KERN_DEBUG "LOCK command.\n")); @@ -2874,9 +2882,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) else fsa_dev_ptr[cid].locked = 0; - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; /* * These commands are all No-Ops */ @@ -2892,80 +2899,41 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); - scsicmd->scsi_done(scsicmd); - return 0; + break; } - /* FALLTHRU */ case RESERVE: case RELEASE: case REZERO_UNIT: case REASSIGN_BLOCKS: case SEEK_10: - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - return 0; + scsicmd->result = AAC_STAT_GOOD; + break; case START_STOP: return aac_start_stop(scsicmd); - } - switch (scsicmd->cmnd[0]) - { - case READ_6: - case READ_10: - case READ_12: - case READ_16: - if (dev->in_reset) - return -1; - /* - * Hack to keep track of ordinal number of the device that - * corresponds to a container. Needed to convert - * containers to /dev/sd device names - */ - - if (scsicmd->request->rq_disk) - strlcpy(fsa_dev_ptr[cid].devname, - scsicmd->request->rq_disk->disk_name, - min(sizeof(fsa_dev_ptr[cid].devname), - sizeof(scsicmd->request->rq_disk->disk_name) + 1)); - - return aac_read(scsicmd); - - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - if (dev->in_reset) - return -1; - return aac_write(scsicmd); - - case SYNCHRONIZE_CACHE: - if (((aac_cache & 6) == 6) && dev->cache_protected) { - scsicmd->result = DID_OK << 16 | - COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; - scsicmd->scsi_done(scsicmd); - return 0; - } - /* Issue FIB to tell Firmware to flush it's cache */ - if ((aac_cache & 6) != 2) - return aac_synchronize(scsicmd); - /* FALLTHRU */ - default: - /* - * Unhandled commands - */ - dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; - set_sense(&dev->fsa_dev[cid].sense_data, + /* FALLTHRU */ + default: + /* + * Unhandled commands + */ + dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", + scsicmd->cmnd[0])); + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, ASENCODE_INVALID_COMMAND, 0, 0); - memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); - scsicmd->scsi_done(scsicmd); - return 0; } + +scsi_done_ret: + + scsicmd->scsi_done(scsicmd); + return 0; } static int query_disk(struct aac_dev *dev, void __user *arg) -- cgit v1.2.3-59-g8ed1b From 4ec57fb4edaec523f0f78a0449a3b063749ac58b Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:21 -0800 Subject: scsi: aacraid: Process Error for response I/O Make sure that the driver processes error conditions even in the fast response path for response from the adapter. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 289 ++++++++++++++++++++++-------------------- 1 file changed, 151 insertions(+), 138 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 8d8e6c2f9691..f719b952dd4d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -3068,16 +3068,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr) return; BUG_ON(fibptr == NULL); - dev = fibptr->dev; - - scsi_dma_unmap(scsicmd); - /* expose physical device if expose_physicald flag is on */ - if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) - && expose_physicals > 0) - aac_expose_phy_device(scsicmd); + dev = fibptr->dev; srbreply = (struct aac_srb_reply *) fib_data(fibptr); + scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { @@ -3090,158 +3085,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr) */ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) - le32_to_cpu(srbreply->data_xfer_length)); - /* - * First check the fib status - */ + } - if (le32_to_cpu(srbreply->status) != ST_OK) { - int len; - printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), - SCSI_SENSE_BUFFERSIZE); - scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8 - | SAM_STAT_CHECK_CONDITION; - memcpy(scsicmd->sense_buffer, - srbreply->sense_data, len); - } + scsi_dma_unmap(scsicmd); - /* - * Next check the srb status - */ - switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { - case SRB_STATUS_ERROR_RECOVERY: - case SRB_STATUS_PENDING: - case SRB_STATUS_SUCCESS: - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - break; - case SRB_STATUS_DATA_OVERRUN: - switch (scsicmd->cmnd[0]) { - case READ_6: - case WRITE_6: - case READ_10: - case WRITE_10: - case READ_12: - case WRITE_12: - case READ_16: - case WRITE_16: - if (le32_to_cpu(srbreply->data_xfer_length) - < scsicmd->underflow) - printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); - else - printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); - scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8; - break; - case INQUIRY: { - scsicmd->result = DID_OK << 16 - | COMMAND_COMPLETE << 8; - break; - } - default: - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - break; - } - break; - case SRB_STATUS_ABORTED: - scsicmd->result = DID_ABORT << 16 | ABORT << 8; - break; - case SRB_STATUS_ABORT_FAILED: - /* - * Not sure about this one - but assuming the - * hba was trying to abort for some reason - */ - scsicmd->result = DID_ERROR << 16 | ABORT << 8; - break; - case SRB_STATUS_PARITY_ERROR: - scsicmd->result = DID_PARITY << 16 - | MSG_PARITY_ERROR << 8; - break; - case SRB_STATUS_NO_DEVICE: - case SRB_STATUS_INVALID_PATH_ID: - case SRB_STATUS_INVALID_TARGET_ID: - case SRB_STATUS_INVALID_LUN: - case SRB_STATUS_SELECTION_TIMEOUT: - scsicmd->result = DID_NO_CONNECT << 16 - | COMMAND_COMPLETE << 8; - break; + /* expose physical device if expose_physicald flag is on */ + if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) + && expose_physicals > 0) + aac_expose_phy_device(scsicmd); - case SRB_STATUS_COMMAND_TIMEOUT: - case SRB_STATUS_TIMEOUT: - scsicmd->result = DID_TIME_OUT << 16 - | COMMAND_COMPLETE << 8; - break; + /* + * First check the fib status + */ - case SRB_STATUS_BUSY: - scsicmd->result = DID_BUS_BUSY << 16 - | COMMAND_COMPLETE << 8; - break; + if (le32_to_cpu(srbreply->status) != ST_OK) { + int len; - case SRB_STATUS_BUS_RESET: - scsicmd->result = DID_RESET << 16 - | COMMAND_COMPLETE << 8; - break; + pr_warn("aac_srb_callback: srb failed, status = %d\n", + le32_to_cpu(srbreply->status)); + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); + scsicmd->result = DID_ERROR << 16 + | COMMAND_COMPLETE << 8 + | SAM_STAT_CHECK_CONDITION; + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); + } - case SRB_STATUS_MESSAGE_REJECTED: + /* + * Next check the srb status + */ + switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { + case SRB_STATUS_ERROR_RECOVERY: + case SRB_STATUS_PENDING: + case SRB_STATUS_SUCCESS: + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + case SRB_STATUS_DATA_OVERRUN: + switch (scsicmd->cmnd[0]) { + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_16: + case WRITE_16: + if (le32_to_cpu(srbreply->data_xfer_length) + < scsicmd->underflow) + pr_warn("aacraid: SCSI CMD underflow\n"); + else + pr_warn("aacraid: SCSI CMD Data Overrun\n"); scsicmd->result = DID_ERROR << 16 - | MESSAGE_REJECT << 8; + | COMMAND_COMPLETE << 8; + break; + case INQUIRY: + scsicmd->result = DID_OK << 16 + | COMMAND_COMPLETE << 8; break; - case SRB_STATUS_REQUEST_FLUSHED: - case SRB_STATUS_ERROR: - case SRB_STATUS_INVALID_REQUEST: - case SRB_STATUS_REQUEST_SENSE_FAILED: - case SRB_STATUS_NO_HBA: - case SRB_STATUS_UNEXPECTED_BUS_FREE: - case SRB_STATUS_PHASE_SEQUENCE_FAILURE: - case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: - case SRB_STATUS_DELAYED_RETRY: - case SRB_STATUS_BAD_FUNCTION: - case SRB_STATUS_NOT_STARTED: - case SRB_STATUS_NOT_IN_USE: - case SRB_STATUS_FORCE_ABORT: - case SRB_STATUS_DOMAIN_VALIDATION_FAIL: default: + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + } + break; + case SRB_STATUS_ABORTED: + scsicmd->result = DID_ABORT << 16 | ABORT << 8; + break; + case SRB_STATUS_ABORT_FAILED: + /* + * Not sure about this one - but assuming the + * hba was trying to abort for some reason + */ + scsicmd->result = DID_ERROR << 16 | ABORT << 8; + break; + case SRB_STATUS_PARITY_ERROR: + scsicmd->result = DID_PARITY << 16 + | MSG_PARITY_ERROR << 8; + break; + case SRB_STATUS_NO_DEVICE: + case SRB_STATUS_INVALID_PATH_ID: + case SRB_STATUS_INVALID_TARGET_ID: + case SRB_STATUS_INVALID_LUN: + case SRB_STATUS_SELECTION_TIMEOUT: + scsicmd->result = DID_NO_CONNECT << 16 + | COMMAND_COMPLETE << 8; + break; + + case SRB_STATUS_COMMAND_TIMEOUT: + case SRB_STATUS_TIMEOUT: + scsicmd->result = DID_TIME_OUT << 16 + | COMMAND_COMPLETE << 8; + break; + + case SRB_STATUS_BUSY: + scsicmd->result = DID_BUS_BUSY << 16 + | COMMAND_COMPLETE << 8; + break; + + case SRB_STATUS_BUS_RESET: + scsicmd->result = DID_RESET << 16 + | COMMAND_COMPLETE << 8; + break; + + case SRB_STATUS_MESSAGE_REJECTED: + scsicmd->result = DID_ERROR << 16 + | MESSAGE_REJECT << 8; + break; + case SRB_STATUS_REQUEST_FLUSHED: + case SRB_STATUS_ERROR: + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_REQUEST_SENSE_FAILED: + case SRB_STATUS_NO_HBA: + case SRB_STATUS_UNEXPECTED_BUS_FREE: + case SRB_STATUS_PHASE_SEQUENCE_FAILURE: + case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: + case SRB_STATUS_DELAYED_RETRY: + case SRB_STATUS_BAD_FUNCTION: + case SRB_STATUS_NOT_STARTED: + case SRB_STATUS_NOT_IN_USE: + case SRB_STATUS_FORCE_ABORT: + case SRB_STATUS_DOMAIN_VALIDATION_FAIL: + default: #ifdef AAC_DETAILED_STATUS_INFO - printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", - le32_to_cpu(srbreply->srb_status) & 0x3F, - aac_get_status_string( - le32_to_cpu(srbreply->srb_status) & 0x3F), - scsicmd->cmnd[0], - le32_to_cpu(srbreply->scsi_status)); + pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n", + le32_to_cpu(srbreply->srb_status) & 0x3F, + aac_get_status_string( + le32_to_cpu(srbreply->srb_status) & 0x3F), + scsicmd->cmnd[0], + le32_to_cpu(srbreply->scsi_status)); #endif - if ((scsicmd->cmnd[0] == ATA_12) - || (scsicmd->cmnd[0] == ATA_16)) { - if (scsicmd->cmnd[2] & (0x01 << 5)) { - scsicmd->result = DID_OK << 16 - | COMMAND_COMPLETE << 8; - break; - } else { - scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8; - break; - } + /* + * When the CC bit is SET by the host in ATA pass thru CDB, + * driver is supposed to return DID_OK + * + * When the CC bit is RESET by the host, driver should + * return DID_ERROR + */ + if ((scsicmd->cmnd[0] == ATA_12) + || (scsicmd->cmnd[0] == ATA_16)) { + + if (scsicmd->cmnd[2] & (0x01 << 5)) { + scsicmd->result = DID_OK << 16 + | COMMAND_COMPLETE << 8; + break; } else { scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; - break; + break; } + } else { + scsicmd->result = DID_ERROR << 16 + | COMMAND_COMPLETE << 8; + break; } - if (le32_to_cpu(srbreply->scsi_status) - == SAM_STAT_CHECK_CONDITION) { - int len; + } + if (le32_to_cpu(srbreply->scsi_status) + == SAM_STAT_CHECK_CONDITION) { + int len; - scsicmd->result |= SAM_STAT_CHECK_CONDITION; - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), - SCSI_SENSE_BUFFERSIZE); + scsicmd->result |= SAM_STAT_CHECK_CONDITION; + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); #ifdef AAC_DETAILED_STATUS_INFO - printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", - le32_to_cpu(srbreply->status), len); + pr_warn("aac_srb_callback: check condition, status = %d len=%d\n", + le32_to_cpu(srbreply->status), len); #endif - memcpy(scsicmd->sense_buffer, - srbreply->sense_data, len); - } + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); } + /* * OR in the scsi status (already shifted up a bit) */ -- cgit v1.2.3-59-g8ed1b From 3ffd6c5a74d916a10afada8b679df8c964c1479b Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:22 -0800 Subject: scsi: aacraid: Added support for response path This patch enables the driver to actually process the I/O, or srb replies from adapter. In addition to any HBA1000 or SmartIOC2000 adapter events. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 34 +++++++++++++++------------ drivers/scsi/aacraid/aacraid.h | 52 +++++++++++++++++++++++------------------- drivers/scsi/aacraid/commsup.c | 17 +++++++++----- drivers/scsi/aacraid/dpcsup.c | 20 ++++++++-------- drivers/scsi/aacraid/src.c | 38 ++++++++++++++++++++++-------- 5 files changed, 99 insertions(+), 62 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index f719b952dd4d..8fc06e3da3a7 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -329,7 +329,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd, } scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; device = scsicmd->device; - if (unlikely(!device || !scsi_device_online(device))) { + if (unlikely(!device)) { dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); aac_fib_complete(fibptr); return 0; @@ -475,16 +475,26 @@ int aac_get_containers(struct aac_dev *dev) if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) maximum_num_containers = MAXIMUM_NUM_CONTAINERS; - fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers, - GFP_KERNEL); - if (!fsa_dev_ptr) - return -ENOMEM; + if (dev->fsa_dev == NULL || + dev->maximum_num_containers != maximum_num_containers) { + + fsa_dev_ptr = dev->fsa_dev; - dev->fsa_dev = fsa_dev_ptr; - dev->maximum_num_containers = maximum_num_containers; + dev->fsa_dev = kcalloc(maximum_num_containers, + sizeof(*fsa_dev_ptr), GFP_KERNEL); - for (index = 0; index < dev->maximum_num_containers; ) { - fsa_dev_ptr[index].devname[0] = '\0'; + kfree(fsa_dev_ptr); + fsa_dev_ptr = NULL; + + + if (!dev->fsa_dev) + return -ENOMEM; + + dev->maximum_num_containers = maximum_num_containers; + } + for (index = 0; index < dev->maximum_num_containers; index++) { + dev->fsa_dev[index].devname[0] = '\0'; + dev->fsa_dev[index].valid = 0; status = aac_probe_container(dev, index); @@ -492,12 +502,6 @@ int aac_get_containers(struct aac_dev *dev) printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); break; } - - /* - * If there are no more containers, then stop asking. - */ - if (++index >= status) - break; } return status; } diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 00df610993a3..d1f5a66770aa 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -407,7 +407,7 @@ struct aac_fibhdr { __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */ __le32 TimeStamp; /* otherwise timestamp for FW internal use */ } u; - u32 Handle; /* FIB handle used for MSGU commnunication */ + __le32 Handle; /* FIB handle used for MSGU commnunication */ u32 Previous; /* FW internal use */ u32 Next; /* FW internal use */ }; @@ -872,18 +872,20 @@ struct rkt_registers { #define src_inbound rx_inbound struct src_mu_registers { - /* PCI*| Name */ - __le32 reserved0[6]; /* 00h | Reserved */ - __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ - __le32 IDR; /* 20h | Inbound Doorbell Register */ - __le32 IISR; /* 24h | Inbound Int. Status Register */ - __le32 reserved1[3]; /* 28h | Reserved */ - __le32 OIMR; /* 34h | Outbound Int. Mask Register */ - __le32 reserved2[25]; /* 38h | Reserved */ - __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ - __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ - __le32 reserved3[6]; /* a4h | Reserved */ - __le32 OMR; /* bch | Outbound Message Register */ + /* PCI*| Name */ + __le32 reserved0[6]; /* 00h | Reserved */ + __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ + __le32 IDR; /* 20h | Inbound Doorbell Register */ + __le32 IISR; /* 24h | Inbound Int. Status Register */ + __le32 reserved1[3]; /* 28h | Reserved */ + __le32 OIMR; /* 34h | Outbound Int. Mask Register */ + __le32 reserved2[25]; /* 38h | Reserved */ + __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ + __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ + __le32 reserved3[3]; /* a4h | Reserved */ + __le32 SCR0; /* b0h | Scratchpad 0 */ + __le32 reserved4[2]; /* b4h | Reserved */ + __le32 OMR; /* bch | Outbound Message Register */ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ __le32 IQ_H; /* c4h | Inbound Queue (High address) */ __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ @@ -982,6 +984,7 @@ struct fsa_dev_info { char devname[8]; struct sense_data sense_data; u32 block_size; + u8 identifier[16]; }; struct fib { @@ -1012,6 +1015,7 @@ struct fib { u32 vector_no; struct hw_fib *hw_fib_va; /* Actual shared object */ dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ + u32 hbacmd_size; /* cmd size for native */ }; #define AAC_DEVTYPE_RAID_MEMBER 1 @@ -1215,9 +1219,11 @@ struct aac_dev /* * negotiated FIB settings */ - unsigned max_fib_size; - unsigned sg_tablesize; - unsigned max_num_aif; + unsigned int max_fib_size; + unsigned int sg_tablesize; + unsigned int max_num_aif; + + unsigned int max_cmd_size; /* max_fib_size or MAX_NATIVE */ /* * Map for 128 fib objects (64k) @@ -1259,18 +1265,17 @@ struct aac_dev */ union aac_init *init; dma_addr_t init_pa; /* Holds physical address of the init struct */ - - u32 *host_rrq; /* response queue - * if AAC_COMM_MESSAGE_TYPE1 */ - + /* response queue (if AAC_COMM_MESSAGE_TYPE1) */ + __le32 *host_rrq; dma_addr_t host_rrq_pa; /* phys. address */ /* index into rrq buffer */ u32 host_rrq_idx[AAC_MAX_MSIX]; atomic_t rrq_outstanding[AAC_MAX_MSIX]; u32 fibs_pushed_no; struct pci_dev *pdev; /* Our PCI interface */ - void * printfbuf; /* pointer to buffer used for printf's from the adapter */ - void * comm_addr; /* Base address of Comm area */ + /* pointer to buffer used for printf's from the adapter */ + void *printfbuf; + void *comm_addr; /* Base address of Comm area */ dma_addr_t comm_phys; /* Physical Address of Comm area */ size_t comm_size; @@ -1342,6 +1347,8 @@ struct aac_dev u32 max_msix; /* max. MSI-X vectors */ u32 vector_cap; /* MSI-X vector capab.*/ int msi_enabled; /* MSI/MSI-X enabled */ + atomic_t msix_counter; + struct msix_entry msixentry[AAC_MAX_MSIX]; struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS]; u8 adapter_shutdown; @@ -2281,7 +2288,6 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm); int aac_rx_deliver_producer(struct fib * fib); char * get_container_type(unsigned type); extern int numacb; -extern int acbsize; extern char aac_driver_version[]; extern int startup_timeout; extern int aif_timeout; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index d18ed9ad45b3..4b1177aa5e01 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -60,12 +60,17 @@ static int fib_map_alloc(struct aac_dev *dev) { + if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) + dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; + else + dev->max_cmd_size = dev->max_fib_size; + dprintk((KERN_INFO "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", - dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, + dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); dev->hw_fib_va = pci_alloc_consistent(dev->pdev, - (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) + (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), &dev->hw_fib_pa); if (dev->hw_fib_va == NULL) @@ -83,9 +88,9 @@ static int fib_map_alloc(struct aac_dev *dev) void aac_fib_map_free(struct aac_dev *dev) { - if (dev->hw_fib_va && dev->max_fib_size) { + if (dev->hw_fib_va && dev->max_cmd_size) { pci_free_consistent(dev->pdev, - (dev->max_fib_size * + (dev->max_cmd_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), dev->hw_fib_va, dev->hw_fib_pa); } @@ -176,9 +181,9 @@ int aac_fib_setup(struct aac_dev * dev) hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); fibptr->hw_fib_pa = hw_fib_pa; hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + - dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); hw_fib_pa = hw_fib_pa + - dev->max_fib_size + sizeof(struct aac_fib_xporthdr); + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr); } /* diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 7e836205aef1..8077dbad984e 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -122,7 +122,6 @@ unsigned int aac_response_normal(struct aac_queue * q) * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ - fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; fib->callback(fib->callback_data, fib); } else { unsigned long flagv; @@ -251,8 +250,9 @@ static void aac_aif_callback(void *context, struct fib * fibptr) BUG_ON(fibptr == NULL); dev = fibptr->dev; - if (fibptr->hw_fib_va->header.XferState & - cpu_to_le32(NoMoreAifDataAvailable)) { + if ((fibptr->hw_fib_va->header.XferState & + cpu_to_le32(NoMoreAifDataAvailable)) || + dev->sa_firmware) { aac_fib_complete(fibptr); aac_fib_free(fibptr); return; @@ -282,8 +282,8 @@ static void aac_aif_callback(void *context, struct fib * fibptr) * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. */ -unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, - int isAif, int isFastResponse, struct hw_fib *aif_fib) +unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, + int isFastResponse, struct hw_fib *aif_fib) { unsigned long mflags; dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); @@ -305,12 +305,14 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, kfree (fib); return 1; } - if (aif_fib != NULL) { + if (dev->sa_firmware) { + fib->hbacmd_size = index; /* store event type */ + } else if (aif_fib != NULL) { memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); } else { - memcpy(hw_fib, - (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + - index), sizeof(struct hw_fib)); + memcpy(hw_fib, (struct hw_fib *) + (((uintptr_t)(dev->regs.sa)) + index), + sizeof(struct hw_fib)); } INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index a5f7a6f2f3d7..5508893ca8b7 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -135,8 +135,16 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) if (mode & AAC_INT_MODE_AIF) { /* handle AIF */ - if (dev->aif_thread && dev->fsa_dev) - aac_intr_normal(dev, 0, 2, 0, NULL); + if (dev->sa_firmware) { + u32 events = src_readl(dev, MUnit.SCR0); + + aac_intr_normal(dev, events, 1, 0, NULL); + writel(events, &dev->IndexRegs->Mailbox[0]); + src_writel(dev, MUnit.IDR, 1 << 23); + } else { + if (dev->aif_thread && dev->fsa_dev) + aac_intr_normal(dev, 0, 2, 0, NULL); + } if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); mode = 0; @@ -148,17 +156,19 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) for (;;) { isFastResponse = 0; /* remove toggle bit (31) */ - handle = (dev->host_rrq[index] & 0x7fffffff); - /* check fast response bit (30) */ + handle = le32_to_cpu((dev->host_rrq[index]) + & 0x7fffffff); + /* check fast response bits (30, 1) */ if (handle & 0x40000000) isFastResponse = 1; handle &= 0x0000ffff; if (handle == 0) break; + handle >>= 2; if (dev->msi_enabled && dev->max_msix > 1) atomic_dec(&dev->rrq_outstanding[vector_no]); + aac_intr_normal(dev, handle, 0, isFastResponse, NULL); dev->host_rrq[index++] = 0; - aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); if (index == (vector_no + 1) * dev->vector_cap) index = vector_no * dev->vector_cap; dev->host_rrq_idx[vector_no] = index; @@ -392,6 +402,7 @@ static void aac_src_start_adapter(struct aac_dev *dev) dev->host_rrq_idx[i] = i * dev->vector_cap; atomic_set(&dev->rrq_outstanding[i], 0); } + atomic_set(&dev->msix_counter, 0); dev->fibs_pushed_no = 0; init = dev->init; @@ -565,9 +576,18 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) dev->base = dev->regs.src.bar0 = NULL; return 0; } + + dev->regs.src.bar1 = + ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE); + dev->base = NULL; + if (dev->regs.src.bar1 == NULL) + return -1; dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); - if (dev->base == NULL) + if (dev->base == NULL) { + iounmap(dev->regs.src.bar1); + dev->regs.src.bar1 = NULL; return -1; + } dev->IndexRegs = &((struct src_registers __iomem *) dev->base)->u.denali.IndexRegs; return 0; @@ -918,9 +938,9 @@ int aac_srcv_init(struct aac_dev *dev) if (aac_acquire_irq(dev)) goto error_iounmap; - dev->dbg_base = dev->base_start; - dev->dbg_base_mapped = dev->base; - dev->dbg_size = dev->base_size; + dev->dbg_base = pci_resource_start(dev->pdev, 2); + dev->dbg_base_mapped = dev->regs.src.bar1; + dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE; dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); -- cgit v1.2.3-59-g8ed1b From f956a669bf1c2a7b273f753023717d630222d2cc Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:23 -0800 Subject: scsi: aacraid: Added support for read medium error This patch processes Raw IO read medium errors. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 10 ++++++++++ drivers/scsi/aacraid/aacraid.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 8fc06e3da3a7..b854c4928f6f 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -62,6 +62,7 @@ #define SENCODE_END_OF_DATA 0x00 #define SENCODE_BECOMING_READY 0x04 #define SENCODE_INIT_CMD_REQUIRED 0x04 +#define SENCODE_UNRECOVERED_READ_ERROR 0x11 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A #define SENCODE_INVALID_COMMAND 0x20 #define SENCODE_LBA_OUT_OF_RANGE 0x21 @@ -1997,6 +1998,15 @@ static void io_callback(void *context, struct fib * fibptr) min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); break; + case ST_MEDERR: + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR, + SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + break; default: #ifdef AAC_DETAILED_STATUS_INFO printk(KERN_WARNING "io_callback: io failed, status = %d\n", diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d1f5a66770aa..b54c1bfd81a7 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1462,6 +1462,7 @@ struct aac_dev #define ST_IO 5 #define ST_NXIO 6 #define ST_E2BIG 7 +#define ST_MEDERR 8 #define ST_ACCES 13 #define ST_EXIST 17 #define ST_XDEV 18 -- cgit v1.2.3-59-g8ed1b From 113156bcea9ef1e6b3bafc53e64f7812c356b21b Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:24 -0800 Subject: scsi: aacraid: Reworked aac_command_thread Reworked aac_command_thread into aac_process_events Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/commsup.c | 468 ++++++++++++++++++++++++----------------- 1 file changed, 274 insertions(+), 194 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 4b1177aa5e01..3dfb9da918a7 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1729,6 +1729,279 @@ out: return BlinkLED; } +static int get_fib_count(struct aac_dev *dev) +{ + unsigned int num = 0; + struct list_head *entry; + unsigned long flagv; + + /* + * Warning: no sleep allowed while + * holding spinlock. We take the estimate + * and pre-allocate a set of fibs outside the + * lock. + */ + num = le32_to_cpu(dev->init->r7.adapter_fibs_size) + / sizeof(struct hw_fib); /* some extra */ + spin_lock_irqsave(&dev->fib_lock, flagv); + entry = dev->fib_list.next; + while (entry != &dev->fib_list) { + entry = entry->next; + ++num; + } + spin_unlock_irqrestore(&dev->fib_lock, flagv); + + return num; +} + +static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, + struct fib **fib_pool, + unsigned int num) +{ + struct hw_fib **hw_fib_p; + struct fib **fib_p; + int rcode = 1; + + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL); + if (!(*(hw_fib_p++))) { + --hw_fib_p; + break; + } + + *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL); + if (!(*(fib_p++))) { + kfree(*(--hw_fib_p)); + break; + } + } + + num = hw_fib_p - hw_fib_pool; + if (!num) + rcode = 0; + + return rcode; +} + +static void wakeup_fibctx_threads(struct aac_dev *dev, + struct hw_fib **hw_fib_pool, + struct fib **fib_pool, + struct fib *fib, + struct hw_fib *hw_fib, + unsigned int num) +{ + unsigned long flagv; + struct list_head *entry; + struct hw_fib **hw_fib_p; + struct fib **fib_p; + u32 time_now, time_last; + struct hw_fib *hw_newfib; + struct fib *newfib; + struct aac_fib_context *fibctx; + + time_now = jiffies/HZ; + spin_lock_irqsave(&dev->fib_lock, flagv); + entry = dev->fib_list.next; + /* + * For each Context that is on the + * fibctxList, make a copy of the + * fib, and then set the event to wake up the + * thread that is waiting for it. + */ + + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (entry != &dev->fib_list) { + /* + * Extract the fibctx + */ + fibctx = list_entry(entry, struct aac_fib_context, + next); + /* + * Check if the queue is getting + * backlogged + */ + if (fibctx->count > 20) { + /* + * It's *not* jiffies folks, + * but jiffies / HZ so do not + * panic ... + */ + time_last = fibctx->jiffies; + /* + * Has it been > 2 minutes + * since the last read off + * the queue? + */ + if ((time_now - time_last) > aif_timeout) { + entry = entry->next; + aac_close_fib_context(dev, fibctx); + continue; + } + } + /* + * Warning: no sleep allowed while + * holding spinlock + */ + if (hw_fib_p >= &hw_fib_pool[num]) { + pr_warn("aifd: didn't allocate NewFib\n"); + entry = entry->next; + continue; + } + + hw_newfib = *hw_fib_p; + *(hw_fib_p++) = NULL; + newfib = *fib_p; + *(fib_p++) = NULL; + /* + * Make the copy of the FIB + */ + memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); + memcpy(newfib, fib, sizeof(struct fib)); + newfib->hw_fib_va = hw_newfib; + /* + * Put the FIB onto the + * fibctx's fibs + */ + list_add_tail(&newfib->fiblink, &fibctx->fib_list); + fibctx->count++; + /* + * Set the event to wake up the + * thread that is waiting. + */ + up(&fibctx->wait_sem); + + entry = entry->next; + } + /* + * Set the status of this FIB + */ + *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); + aac_fib_adapter_complete(fib, sizeof(u32)); + spin_unlock_irqrestore(&dev->fib_lock, flagv); + +} + +static void aac_process_events(struct aac_dev *dev) +{ + struct hw_fib *hw_fib; + struct fib *fib; + unsigned long flags; + spinlock_t *t_lock; + unsigned int rcode; + + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_lock_irqsave(t_lock, flags); + + while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { + struct list_head *entry; + struct aac_aifcmd *aifcmd; + unsigned int num; + struct hw_fib **hw_fib_pool, **hw_fib_p; + struct fib **fib_pool, **fib_p; + + set_current_state(TASK_RUNNING); + + entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; + list_del(entry); + + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_unlock_irqrestore(t_lock, flags); + + fib = list_entry(entry, struct fib, fiblink); + hw_fib = fib->hw_fib_va; + /* + * We will process the FIB here or pass it to a + * worker thread that is TBD. We Really can't + * do anything at this point since we don't have + * anything defined for this thread to do. + */ + memset(fib, 0, sizeof(struct fib)); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof(struct fib); + fib->hw_fib_va = hw_fib; + fib->data = hw_fib->data; + fib->dev = dev; + /* + * We only handle AifRequest fibs from the adapter. + */ + + aifcmd = (struct aac_aifcmd *) hw_fib->data; + if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { + /* Handle Driver Notify Events */ + aac_handle_aif(dev, fib); + *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); + goto free_fib; + } + /* + * The u32 here is important and intended. We are using + * 32bit wrapping time to fit the adapter field + */ + + /* Sniff events */ + if (aifcmd->command == cpu_to_le32(AifCmdEventNotify) + || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) { + aac_handle_aif(dev, fib); + } + + /* + * get number of fibs to process + */ + num = get_fib_count(dev); + if (!num) + goto free_fib; + + hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *), + GFP_KERNEL); + if (!hw_fib_pool) + goto free_fib; + + fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL); + if (!fib_pool) + goto free_hw_fib_pool; + + /* + * Fill up fib pointer pools with actual fibs + * and hw_fibs + */ + rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); + if (!rcode) + goto free_mem; + + /* + * wakeup the thread that is waiting for + * the response from fw (ioctl) + */ + wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool, + fib, hw_fib, num); + +free_mem: + /* Free up the remaining resources */ + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + kfree(*hw_fib_p); + kfree(*fib_p); + ++fib_p; + ++hw_fib_p; + } + kfree(fib_pool); +free_hw_fib_pool: + kfree(hw_fib_pool); +free_fib: + kfree(fib); + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_lock_irqsave(t_lock, flags); + } + /* + * There are no more AIF's + */ + t_lock = dev->queues->queue[HostNormCmdQueue].lock; + spin_unlock_irqrestore(t_lock, flags); +} /** * aac_command_thread - command processing thread @@ -1743,10 +2016,6 @@ out: int aac_command_thread(void *data) { struct aac_dev *dev = data; - struct hw_fib *hw_fib, *hw_newfib; - struct fib *fib, *newfib; - struct aac_fib_context *fibctx; - unsigned long flags; DECLARE_WAITQUEUE(wait, current); unsigned long next_jiffies = jiffies + HZ; unsigned long next_check_jiffies = next_jiffies; @@ -1766,197 +2035,8 @@ int aac_command_thread(void *data) set_current_state(TASK_INTERRUPTIBLE); dprintk ((KERN_INFO "aac_command_thread start\n")); while (1) { - spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); - while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { - struct list_head *entry; - struct aac_aifcmd * aifcmd; - - set_current_state(TASK_RUNNING); - - entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; - list_del(entry); - - spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); - fib = list_entry(entry, struct fib, fiblink); - /* - * We will process the FIB here or pass it to a - * worker thread that is TBD. We Really can't - * do anything at this point since we don't have - * anything defined for this thread to do. - */ - hw_fib = fib->hw_fib_va; - memset(fib, 0, sizeof(struct fib)); - fib->type = FSAFS_NTC_FIB_CONTEXT; - fib->size = sizeof(struct fib); - fib->hw_fib_va = hw_fib; - fib->data = hw_fib->data; - fib->dev = dev; - /* - * We only handle AifRequest fibs from the adapter. - */ - aifcmd = (struct aac_aifcmd *) hw_fib->data; - if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { - /* Handle Driver Notify Events */ - aac_handle_aif(dev, fib); - *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); - aac_fib_adapter_complete(fib, (u16)sizeof(u32)); - } else { - /* The u32 here is important and intended. We are using - 32bit wrapping time to fit the adapter field */ - - u32 time_now, time_last; - unsigned long flagv; - unsigned num; - struct hw_fib ** hw_fib_pool, ** hw_fib_p; - struct fib ** fib_pool, ** fib_p; - - /* Sniff events */ - if ((aifcmd->command == - cpu_to_le32(AifCmdEventNotify)) || - (aifcmd->command == - cpu_to_le32(AifCmdJobProgress))) { - aac_handle_aif(dev, fib); - } - - time_now = jiffies/HZ; - /* - * Warning: no sleep allowed while - * holding spinlock. We take the estimate - * and pre-allocate a set of fibs outside the - * lock. - */ - num = le32_to_cpu(dev->init-> - r7.adapter_fibs_size) - / sizeof(struct hw_fib); /* some extra */ - spin_lock_irqsave(&dev->fib_lock, flagv); - entry = dev->fib_list.next; - while (entry != &dev->fib_list) { - entry = entry->next; - ++num; - } - spin_unlock_irqrestore(&dev->fib_lock, flagv); - hw_fib_pool = NULL; - fib_pool = NULL; - if (num - && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) - && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { - hw_fib_p = hw_fib_pool; - fib_p = fib_pool; - while (hw_fib_p < &hw_fib_pool[num]) { - if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { - --hw_fib_p; - break; - } - if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { - kfree(*(--hw_fib_p)); - break; - } - } - if ((num = hw_fib_p - hw_fib_pool) == 0) { - kfree(fib_pool); - fib_pool = NULL; - kfree(hw_fib_pool); - hw_fib_pool = NULL; - } - } else { - kfree(hw_fib_pool); - hw_fib_pool = NULL; - } - spin_lock_irqsave(&dev->fib_lock, flagv); - entry = dev->fib_list.next; - /* - * For each Context that is on the - * fibctxList, make a copy of the - * fib, and then set the event to wake up the - * thread that is waiting for it. - */ - hw_fib_p = hw_fib_pool; - fib_p = fib_pool; - while (entry != &dev->fib_list) { - /* - * Extract the fibctx - */ - fibctx = list_entry(entry, struct aac_fib_context, next); - /* - * Check if the queue is getting - * backlogged - */ - if (fibctx->count > 20) - { - /* - * It's *not* jiffies folks, - * but jiffies / HZ so do not - * panic ... - */ - time_last = fibctx->jiffies; - /* - * Has it been > 2 minutes - * since the last read off - * the queue? - */ - if ((time_now - time_last) > aif_timeout) { - entry = entry->next; - aac_close_fib_context(dev, fibctx); - continue; - } - } - /* - * Warning: no sleep allowed while - * holding spinlock - */ - if (hw_fib_p < &hw_fib_pool[num]) { - hw_newfib = *hw_fib_p; - *(hw_fib_p++) = NULL; - newfib = *fib_p; - *(fib_p++) = NULL; - /* - * Make the copy of the FIB - */ - memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); - memcpy(newfib, fib, sizeof(struct fib)); - newfib->hw_fib_va = hw_newfib; - /* - * Put the FIB onto the - * fibctx's fibs - */ - list_add_tail(&newfib->fiblink, &fibctx->fib_list); - fibctx->count++; - /* - * Set the event to wake up the - * thread that is waiting. - */ - up(&fibctx->wait_sem); - } else { - printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - } - entry = entry->next; - } - /* - * Set the status of this FIB - */ - *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); - aac_fib_adapter_complete(fib, sizeof(u32)); - spin_unlock_irqrestore(&dev->fib_lock, flagv); - /* Free up the remaining resources */ - hw_fib_p = hw_fib_pool; - fib_p = fib_pool; - while (hw_fib_p < &hw_fib_pool[num]) { - kfree(*hw_fib_p); - kfree(*fib_p); - ++fib_p; - ++hw_fib_p; - } - kfree(hw_fib_pool); - kfree(fib_pool); - } - kfree(fib); - spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); - } - /* - * There are no more AIF's - */ - spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); + aac_process_events(dev); /* * Background activity -- cgit v1.2.3-59-g8ed1b From 3d77d84044783533581eec7b6229df1154c0b55f Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:25 -0800 Subject: scsi: aacraid: Added support for periodic wellness sync This patch adds a new functions that periodically sync the time of host to the adapter. In addition also informs the adapter that the driver is alive and kicking. Only applicable to the HBA1000 and SMARTIOC2000. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 3 + drivers/scsi/aacraid/commsup.c | 179 ++++++++++++++++++++++++++++++++++------- 2 files changed, 151 insertions(+), 31 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index b54c1bfd81a7..05884e62672b 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -88,6 +88,9 @@ enum { #define AAC_MAX_NATIVE_SIZE 2048 #define CISS_REPORT_PHYSICAL_LUNS 0xc3 +#define WRITE_HOST_WELLNESS 0xa5 +#define BMIC_IN 0x26 +#define BMIC_OUT 0x27 struct aac_ciss_phys_luns_resp { u8 list_length[4]; /* LUN list length (N-7, big endian) */ diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 3dfb9da918a7..a56aa2e6f7ee 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -2003,6 +2004,142 @@ free_fib: spin_unlock_irqrestore(t_lock, flags); } +static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, + u32 datasize) +{ + struct aac_srb *srbcmd; + struct sgmap64 *sg64; + dma_addr_t addr; + char *dma_buf; + struct fib *fibptr; + int ret = -ENOMEM; + u32 vbus, vid; + + fibptr = aac_fib_alloc(dev); + if (!fibptr) + goto out; + + dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr); + if (!dma_buf) + goto fib_free_out; + + aac_fib_init(fibptr); + + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + + srbcmd = (struct aac_srb *)fib_data(fibptr); + + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(vbus); + srbcmd->id = cpu_to_le32(vid); + srbcmd->lun = 0; + srbcmd->flags = cpu_to_le32(SRB_DataOut); + srbcmd->timeout = cpu_to_le32(10); + srbcmd->retry_limit = 0; + srbcmd->cdb_size = cpu_to_le32(12); + srbcmd->count = cpu_to_le32(datasize); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + srbcmd->cdb[0] = BMIC_OUT; + srbcmd->cdb[6] = WRITE_HOST_WELLNESS; + memcpy(dma_buf, (char *)wellness_str, datasize); + + sg64 = (struct sgmap64 *)&srbcmd->sg; + sg64->count = cpu_to_le32(1); + sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); + sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); + sg64->sg[0].count = cpu_to_le32(datasize); + + ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), + FsaNormal, 1, 1, NULL, NULL); + + pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr); + + /* + * Do not set XferState to zero unless + * receives a response from F/W + */ + if (ret >= 0) + aac_fib_complete(fibptr); + + /* + * FIB should be freed only after + * getting the response from the F/W + */ + if (ret != -ERESTARTSYS) + goto fib_free_out; + +out: + return ret; +fib_free_out: + aac_fib_free(fibptr); + goto out; +} + +int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) +{ + struct tm cur_tm; + char wellness_str[] = "TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; + u32 datasize = sizeof(wellness_str); + unsigned long local_time; + int ret = -ENODEV; + + if (!dev->sa_firmware) + goto out; + + local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); + time_to_tm(local_time, 0, &cur_tm); + cur_tm.tm_mon += 1; + cur_tm.tm_year += 1900; + wellness_str[8] = bin2bcd(cur_tm.tm_hour); + wellness_str[9] = bin2bcd(cur_tm.tm_min); + wellness_str[10] = bin2bcd(cur_tm.tm_sec); + wellness_str[12] = bin2bcd(cur_tm.tm_mon); + wellness_str[13] = bin2bcd(cur_tm.tm_mday); + wellness_str[14] = bin2bcd(cur_tm.tm_year / 100); + wellness_str[15] = bin2bcd(cur_tm.tm_year % 100); + + ret = aac_send_wellness_command(dev, wellness_str, datasize); + +out: + return ret; +} + +int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) +{ + int ret = -ENOMEM; + struct fib *fibptr; + __le32 *info; + + fibptr = aac_fib_alloc(dev); + if (!fibptr) + goto out; + + aac_fib_init(fibptr); + info = (__le32 *)fib_data(fibptr); + *info = cpu_to_le32(now->tv_sec); + ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, + 1, 1, NULL, NULL); + + /* + * Do not set XferState to zero unless + * receives a response from F/W + */ + if (ret >= 0) + aac_fib_complete(fibptr); + + /* + * FIB should be freed only after + * getting the response from the F/W + */ + if (ret != -ERESTARTSYS) + aac_fib_free(fibptr); + +out: + return ret; +} + /** * aac_command_thread - command processing thread * @dev: Adapter to monitor @@ -2058,7 +2195,7 @@ int aac_command_thread(void *data) /* Don't even try to talk to adapter if its sick */ ret = aac_check_health(dev); - if (!ret && !dev->queues) + if (!dev->queues) break; next_check_jiffies = jiffies + ((long)(unsigned)check_interval) @@ -2071,36 +2208,16 @@ int aac_command_thread(void *data) difference = (((1000000 - now.tv_usec) * HZ) + 500000) / 1000000; else if (ret == 0) { - struct fib *fibptr; - - if ((fibptr = aac_fib_alloc(dev))) { - int status; - __le32 *info; - - aac_fib_init(fibptr); - - info = (__le32 *) fib_data(fibptr); - if (now.tv_usec > 500000) - ++now.tv_sec; - - *info = cpu_to_le32(now.tv_sec); - - status = aac_fib_send(SendHostTime, - fibptr, - sizeof(*info), - FsaNormal, - 1, 1, - NULL, - NULL); - /* Do not set XferState to zero unless - * receives a response from F/W */ - if (status >= 0) - aac_fib_complete(fibptr); - /* FIB should be freed only after - * getting the response from the F/W */ - if (status != -ERESTARTSYS) - aac_fib_free(fibptr); - } + + if (now.tv_usec > 500000) + ++now.tv_sec; + + if (dev->sa_firmware) + ret = + aac_send_safw_hostttime(dev, &now); + else + ret = aac_send_hosttime(dev, &now); + difference = (long)(unsigned)update_interval*HZ; } else { /* retry shortly */ -- cgit v1.2.3-59-g8ed1b From 71a91ca4f9838433a92f5d4e3d80955a753bda88 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:26 -0800 Subject: scsi: aacraid: Retrieve Queue Depth from Adapter FW Retrieved queue depth from fw and saved it for future use. Only applicable for HBA1000 drives. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 78 +++++++++++++++++++++++++++++++++++++++- drivers/scsi/aacraid/aacraid.h | 81 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 157 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index b854c4928f6f..20908a19ff90 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1516,6 +1516,77 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) return aac_scsi_32(fib, cmd); } +int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) +{ + struct fib *fibptr; + struct aac_srb *srbcmd; + struct sgmap64 *sg64; + struct aac_ciss_identify_pd *identify_resp; + dma_addr_t addr; + u32 vbus, vid; + u16 fibsize, datasize; + int rcode = -ENOMEM; + + fibptr = aac_fib_alloc(dev); + if (!fibptr) + goto out; + + fibsize = sizeof(struct aac_srb) - + sizeof(struct sgentry) + sizeof(struct sgentry64); + datasize = sizeof(struct aac_ciss_identify_pd); + + identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr); + + if (!identify_resp) + goto fib_free_ptr; + + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + + aac_fib_init(fibptr); + + srbcmd = (struct aac_srb *) fib_data(fibptr); + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(vbus); + srbcmd->id = cpu_to_le32(vid); + srbcmd->lun = 0; + srbcmd->flags = cpu_to_le32(SRB_DataIn); + srbcmd->timeout = cpu_to_le32(10); + srbcmd->retry_limit = 0; + srbcmd->cdb_size = cpu_to_le32(12); + srbcmd->count = cpu_to_le32(datasize); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + srbcmd->cdb[0] = 0x26; + srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF); + srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE; + + sg64 = (struct sgmap64 *)&srbcmd->sg; + sg64->count = cpu_to_le32(1); + sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); + sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); + sg64->sg[0].count = cpu_to_le32(datasize); + + rcode = aac_fib_send(ScsiPortCommand64, + fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL); + + if (identify_resp->current_queue_depth_limit <= 0 || + identify_resp->current_queue_depth_limit > 32) + dev->hba_map[bus][target].qd_limit = 32; + else + dev->hba_map[bus][target].qd_limit = + identify_resp->current_queue_depth_limit; + + pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr); + + aac_fib_complete(fibptr); + +fib_free_ptr: + aac_fib_free(fibptr); +out: + return rcode; +} + /** * aac_update hba_map()- update current hba map with data from FW * @dev: aac_dev structure @@ -1565,6 +1636,9 @@ void aac_update_hba_map(struct aac_dev *dev, if (devtype != AAC_DEVTYPE_NATIVE_RAW) goto update_devtype; + if (aac_issue_bmic_identify(dev, bus, target) < 0) + dev->hba_map[bus][target].qd_limit = 32; + update_devtype: dev->hba_map[bus][target].devtype = devtype; } @@ -1711,8 +1785,10 @@ int aac_get_adapter_info(struct aac_dev* dev) /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */ for (bus = 0; bus < AAC_MAX_BUSES; bus++) { - for (target = 0; target < AAC_MAX_TARGETS; target++) + for (target = 0; target < AAC_MAX_TARGETS; target++) { dev->hba_map[bus][target].devtype = 0; + dev->hba_map[bus][target].qd_limit = 0; + } } /* diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 05884e62672b..02b0279bc91b 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -74,7 +74,7 @@ enum { #define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB) #define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) -#define AAC_MAX_LUN (8) +#define AAC_MAX_LUN (256) #define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) #define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) @@ -89,6 +89,7 @@ enum { #define CISS_REPORT_PHYSICAL_LUNS 0xc3 #define WRITE_HOST_WELLNESS 0xa5 +#define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15 #define BMIC_IN 0x26 #define BMIC_OUT 0x27 @@ -110,6 +111,82 @@ struct aac_ciss_phys_luns_resp { */ #define AAC_MAX_HRRQ 64 +struct aac_ciss_identify_pd { + u8 scsi_bus; /* SCSI Bus number on controller */ + u8 scsi_id; /* SCSI ID on this bus */ + u16 block_size; /* sector size in bytes */ + u32 total_blocks; /* number for sectors on drive */ + u32 reserved_blocks; /* controller reserved (RIS) */ + u8 model[40]; /* Physical Drive Model */ + u8 serial_number[40]; /* Drive Serial Number */ + u8 firmware_revision[8]; /* drive firmware revision */ + u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ + u8 compaq_drive_stamp; /* 0 means drive not stamped */ + u8 last_failure_reason; + + u8 flags; + u8 more_flags; + u8 scsi_lun; /* SCSI LUN for phys drive */ + u8 yet_more_flags; + u8 even_more_flags; + u32 spi_speed_rules; /* SPI Speed :Ultra disable diagnose */ + u8 phys_connector[2]; /* connector number on controller */ + u8 phys_box_on_bus; /* phys enclosure this drive resides */ + u8 phys_bay_in_box; /* phys drv bay this drive resides */ + u32 rpm; /* Drive rotational speed in rpm */ + u8 device_type; /* type of drive */ + u8 sata_version; /* only valid when drive_type is SATA */ + u64 big_total_block_count; + u64 ris_starting_lba; + u32 ris_size; + u8 wwid[20]; + u8 controller_phy_map[32]; + u16 phy_count; + u8 phy_connected_dev_type[256]; + u8 phy_to_drive_bay_num[256]; + u16 phy_to_attached_dev_index[256]; + u8 box_index; + u8 spitfire_support; + u16 extra_physical_drive_flags; + u8 negotiated_link_rate[256]; + u8 phy_to_phy_map[256]; + u8 redundant_path_present_map; + u8 redundant_path_failure_map; + u8 active_path_number; + u16 alternate_paths_phys_connector[8]; + u8 alternate_paths_phys_box_on_port[8]; + u8 multi_lun_device_lun_count; + u8 minimum_good_fw_revision[8]; + u8 unique_inquiry_bytes[20]; + u8 current_temperature_degreesC; + u8 temperature_threshold_degreesC; + u8 max_temperature_degreesC; + u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512 * 2^exp */ + u16 current_queue_depth_limit; + u8 switch_name[10]; + u16 switch_port; + u8 alternate_paths_switch_name[40]; + u8 alternate_paths_switch_port[8]; + u16 power_on_hours; /* valid only if gas gauge supported */ + u16 percent_endurance_used; /* valid only if gas gauge supported. */ + u8 drive_authentication; + u8 smart_carrier_authentication; + u8 smart_carrier_app_fw_version; + u8 smart_carrier_bootloader_fw_version; + u8 SanitizeSecureEraseSupport; + u8 DriveKeyFlags; + u8 encryption_key_name[64]; + u32 misc_drive_flags; + u16 dek_index; + u16 drive_encryption_flags; + u8 sanitize_maximum_time[6]; + u8 connector_info_mode; + u8 connector_info_number[4]; + u8 long_connector_name[64]; + u8 device_unique_identifier[16]; + u8 padto_2K[17]; +} __packed; + /* * These macros convert from physical channels to virtual channels */ @@ -1032,6 +1109,7 @@ struct aac_hba_map_info { u8 devtype; /* device type */ u8 reset_state; /* 0 - no reset, 1..x - */ /* after xth TM LUN reset */ + u16 qd_limit; u8 expose; /*checks if to expose or not*/ }; @@ -2240,6 +2318,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) int aac_acquire_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev); int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr); +int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target); const char *aac_driverinfo(struct Scsi_Host *); void aac_fib_vector_assign(struct aac_dev *dev); struct fib *aac_fib_alloc(struct aac_dev *dev); -- cgit v1.2.3-59-g8ed1b From a052865fe2871a3888dbb4ecf8c5dcab77a19ae8 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:27 -0800 Subject: scsi: aacraid: Added support to set QD of attached drives Added support to set qd of drives in slave_configure.This only works for HBA1000 attached drives. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 102 ++++++++++++++++++++++++++++++------------- 1 file changed, 71 insertions(+), 31 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 1912e7b4a796..03d935c8de93 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -401,61 +401,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, static int aac_slave_configure(struct scsi_device *sdev) { struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; + int chn, tid; + unsigned int depth = 0; + unsigned int set_timeout = 0; + + chn = aac_logical_to_phys(sdev_channel(sdev)); + tid = sdev_id(sdev); + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && + aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + depth = aac->hba_map[chn][tid].qd_limit; + set_timeout = 1; + goto common_config; + } + + if (aac->jbod && (sdev->type == TYPE_DISK)) sdev->removable = 1; - if ((sdev->type == TYPE_DISK) && - (sdev_channel(sdev) != CONTAINER_CHANNEL) && - (!aac->jbod || sdev->inq_periph_qual) && - (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { + + if (sdev->type == TYPE_DISK + && sdev_channel(sdev) != CONTAINER_CHANNEL + && (!aac->jbod || sdev->inq_periph_qual) + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { + if (expose_physicals == 0) return -ENXIO; + if (expose_physicals < 0) sdev->no_uld_attach = 1; } - if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && - (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) && - !sdev->no_uld_attach) { + + if (sdev->tagged_supported + && sdev->type == TYPE_DISK + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + && !sdev->no_uld_attach) { + struct scsi_device * dev; struct Scsi_Host *host = sdev->host; unsigned num_lsu = 0; unsigned num_one = 0; - unsigned depth; unsigned cid; - /* - * Firmware has an individual device recovery time typically - * of 35 seconds, give us a margin. - */ - if (sdev->request_queue->rq_timeout < (45 * HZ)) - blk_queue_rq_timeout(sdev->request_queue, 45*HZ); + set_timeout = 1; + for (cid = 0; cid < aac->maximum_num_containers; ++cid) if (aac->fsa_dev[cid].valid) ++num_lsu; + __shost_for_each_device(dev, host) { - if (dev->tagged_supported && (dev->type == TYPE_DISK) && - (!aac->raid_scsi_mode || - (sdev_channel(sdev) != 2)) && - !dev->no_uld_attach) { + if (dev->tagged_supported + && dev->type == TYPE_DISK + && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + && !dev->no_uld_attach) { if ((sdev_channel(dev) != CONTAINER_CHANNEL) - || !aac->fsa_dev[sdev_id(dev)].valid) + || !aac->fsa_dev[sdev_id(dev)].valid) { ++num_lsu; - } else + } + } else { ++num_one; + } } + if (num_lsu == 0) ++num_lsu; - depth = (host->can_queue - num_one) / num_lsu; - if (depth > 256) - depth = 256; - else if (depth < 2) - depth = 2; - scsi_change_queue_depth(sdev, depth); - } else { - scsi_change_queue_depth(sdev, 1); - sdev->tagged_supported = 1; + depth = (host->can_queue - num_one) / num_lsu; } +common_config: + /* + * Firmware has an individual device recovery time typically + * of 35 seconds, give us a margin. + */ + if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ)) + blk_queue_rq_timeout(sdev->request_queue, 45*HZ); + + if (depth > 256) + depth = 256; + else if (depth < 1) + depth = 1; + + scsi_change_queue_depth(sdev, depth); + + sdev->tagged_supported = 1; + return 0; } @@ -470,6 +498,15 @@ static int aac_slave_configure(struct scsi_device *sdev) static int aac_change_queue_depth(struct scsi_device *sdev, int depth) { + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); + int chn, tid, is_native_device = 0; + + chn = aac_logical_to_phys(sdev_channel(sdev)); + tid = sdev_id(sdev); + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && + aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) + is_native_device = 1; + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && (sdev_channel(sdev) == CONTAINER_CHANNEL)) { struct scsi_device * dev; @@ -491,9 +528,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) else if (depth < 2) depth = 2; return scsi_change_queue_depth(sdev, depth); + } else if (is_native_device) { + scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit); + } else { + scsi_change_queue_depth(sdev, 1); } - - return scsi_change_queue_depth(sdev, 1); + return sdev->queue_depth; } static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.3-59-g8ed1b From 6223a39fe6fbbeef0877a56dc427a6351f22ef6c Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:28 -0800 Subject: scsi: aacraid: Added support for hotplug Added support for drive hotplug add and removal Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 13 ++-- drivers/scsi/aacraid/aacraid.h | 17 ++++- drivers/scsi/aacraid/commsup.c | 137 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 20908a19ff90..5719ac39c45a 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1595,7 +1595,7 @@ out: * Update our hba map with the information gathered from the FW */ void aac_update_hba_map(struct aac_dev *dev, - struct aac_ciss_phys_luns_resp *phys_luns) + struct aac_ciss_phys_luns_resp *phys_luns, int rescan) { /* ok and extended reporting */ u32 lun_count, nexus; @@ -1640,7 +1640,10 @@ void aac_update_hba_map(struct aac_dev *dev, dev->hba_map[bus][target].qd_limit = 32; update_devtype: - dev->hba_map[bus][target].devtype = devtype; + if (rescan == AAC_INIT) + dev->hba_map[bus][target].devtype = devtype; + else + dev->hba_map[bus][target].new_devtype = devtype; } } @@ -1652,7 +1655,7 @@ update_devtype: * Execute a CISS REPORT PHYS LUNS and process the results into * the current hba_map. */ -int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr) +int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) { int fibsize, datasize; struct aac_ciss_phys_luns_resp *phys_luns; @@ -1712,7 +1715,7 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr) /* analyse data */ if (rcode >= 0 && phys_luns->resp_flag == 2) { /* ok and extended reporting */ - aac_update_hba_map(dev, phys_luns); + aac_update_hba_map(dev, phys_luns, rescan); } pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr); @@ -1825,7 +1828,7 @@ int aac_get_adapter_info(struct aac_dev* dev) if (!dev->sync_mode && dev->sa_firmware && dev->supplement_adapter_info.VirtDeviceBus != 0xffff) { /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ - rcode = aac_report_phys_luns(dev, fibptr); + rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT); } if (!dev->in_reset) { diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 02b0279bc91b..a8b418e1438d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -74,7 +74,7 @@ enum { #define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB) #define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) -#define AAC_MAX_LUN (256) +#define AAC_MAX_LUN 256 #define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) #define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) @@ -87,6 +87,14 @@ enum { #define AAC_MAX_TARGETS 256 #define AAC_MAX_NATIVE_SIZE 2048 +/* Thor AIF events */ +#define SA_AIF_HOTPLUG (1<<1) +#define SA_AIF_HARDWARE (1<<2) +#define SA_AIF_PDEV_CHANGE (1<<4) +#define SA_AIF_LDEV_CHANGE (1<<5) +#define SA_AIF_BPSTAT_CHANGE (1<<30) +#define SA_AIF_BPCFG_CHANGE (1<<31) + #define CISS_REPORT_PHYSICAL_LUNS 0xc3 #define WRITE_HOST_WELLNESS 0xa5 #define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15 @@ -194,6 +202,7 @@ struct aac_ciss_identify_pd { #define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL) #define CONTAINER_TO_ID(cont) (cont) #define CONTAINER_TO_LUN(cont) (0) +#define ENCLOSURE_CHANNEL (3) #define PMC_DEVICE_S6 0x28b #define PMC_DEVICE_S7 0x28c @@ -1098,6 +1107,9 @@ struct fib { u32 hbacmd_size; /* cmd size for native */ }; +#define AAC_INIT 0 +#define AAC_RESCAN 1 + #define AAC_DEVTYPE_RAID_MEMBER 1 #define AAC_DEVTYPE_ARC_RAW 2 #define AAC_DEVTYPE_NATIVE_RAW 3 @@ -1107,6 +1119,7 @@ struct fib { struct aac_hba_map_info { __le32 rmw_nexus; /* nexus for native HBA devices */ u8 devtype; /* device type */ + u8 new_devtype; u8 reset_state; /* 0 - no reset, 1..x - */ /* after xth TM LUN reset */ u16 qd_limit; @@ -2317,7 +2330,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) int aac_acquire_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev); -int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr); +int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan); int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target); const char *aac_driverinfo(struct Scsi_Host *); void aac_fib_vector_assign(struct aac_dev *dev); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index a56aa2e6f7ee..e906ae1fcb23 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1730,6 +1730,137 @@ out: return BlinkLED; } + +static void aac_resolve_luns(struct aac_dev *dev) +{ + int bus, target, channel; + struct scsi_device *sdev; + u8 devtype; + u8 new_devtype; + + for (bus = 0; bus < AAC_MAX_BUSES; bus++) { + for (target = 0; target < AAC_MAX_TARGETS; target++) { + + if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL) + continue; + + if (bus == CONTAINER_CHANNEL) + channel = CONTAINER_CHANNEL; + else + channel = aac_phys_to_logical(bus); + + devtype = dev->hba_map[bus][target].devtype; + new_devtype = dev->hba_map[bus][target].new_devtype; + + sdev = scsi_device_lookup(dev->scsi_host_ptr, channel, + target, 0); + + if (!sdev && devtype) + scsi_add_device(dev->scsi_host_ptr, channel, + target, 0); + else if (sdev && new_devtype != devtype) + scsi_remove_device(sdev); + else if (sdev && new_devtype == devtype) + scsi_rescan_device(&sdev->sdev_gendev); + + if (sdev) + scsi_device_put(sdev); + + dev->hba_map[bus][target].devtype = new_devtype; + } + } +} + +/** + * aac_handle_sa_aif Handle a message from the firmware + * @dev: Which adapter this fib is from + * @fibptr: Pointer to fibptr from adapter + * + * This routine handles a driver notify fib from the adapter and + * dispatches it to the appropriate routine for handling. + */ +static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr) +{ + int i, bus, target, container, rcode = 0; + u32 events = 0; + struct fib *fib; + struct scsi_device *sdev; + + if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) + events = SA_AIF_HOTPLUG; + else if (fibptr->hbacmd_size & SA_AIF_HARDWARE) + events = SA_AIF_HARDWARE; + else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE) + events = SA_AIF_PDEV_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE) + events = SA_AIF_LDEV_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE) + events = SA_AIF_BPSTAT_CHANGE; + else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE) + events = SA_AIF_BPCFG_CHANGE; + + switch (events) { + case SA_AIF_HOTPLUG: + case SA_AIF_HARDWARE: + case SA_AIF_PDEV_CHANGE: + case SA_AIF_LDEV_CHANGE: + case SA_AIF_BPCFG_CHANGE: + + fib = aac_fib_alloc(dev); + if (!fib) { + pr_err("aac_handle_sa_aif: out of memory\n"); + return; + } + for (bus = 0; bus < AAC_MAX_BUSES; bus++) + for (target = 0; target < AAC_MAX_TARGETS; target++) + dev->hba_map[bus][target].new_devtype = 0; + + rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN); + + if (rcode != -ERESTARTSYS) + aac_fib_free(fib); + + aac_resolve_luns(dev); + + if (events == SA_AIF_LDEV_CHANGE || + events == SA_AIF_BPCFG_CHANGE) { + aac_get_containers(dev); + for (container = 0; container < + dev->maximum_num_containers; ++container) { + sdev = scsi_device_lookup(dev->scsi_host_ptr, + CONTAINER_CHANNEL, + container, 0); + if (dev->fsa_dev[container].valid && !sdev) { + scsi_add_device(dev->scsi_host_ptr, + CONTAINER_CHANNEL, + container, 0); + } else if (!dev->fsa_dev[container].valid && + sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else if (sdev) { + scsi_rescan_device(&sdev->sdev_gendev); + scsi_device_put(sdev); + } + } + } + break; + + case SA_AIF_BPSTAT_CHANGE: + /* currently do nothing */ + break; + } + + for (i = 1; i <= 10; ++i) { + events = src_readl(dev, MUnit.IDR); + if (events & (1<<23)) { + pr_warn(" AIF not cleared by firmware - %d/%d)\n", + i, 10); + ssleep(1); + } + } +} + static int get_fib_count(struct aac_dev *dev) { unsigned int num = 0; @@ -1913,6 +2044,12 @@ static void aac_process_events(struct aac_dev *dev) fib = list_entry(entry, struct fib, fiblink); hw_fib = fib->hw_fib_va; + if (dev->sa_firmware) { + /* Thor AIF */ + aac_handle_sa_aif(dev, fib); + aac_fib_adapter_complete(fib, (u16)sizeof(u32)); + continue; + } /* * We will process the FIB here or pass it to a * worker thread that is TBD. We Really can't -- cgit v1.2.3-59-g8ed1b From 423400e64d377c0d8a2459795420681177e51e74 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:29 -0800 Subject: scsi: aacraid: Include HBA direct interface Added support to send direct pasthru srb commands from management utilty to the controller. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 175 ++++++++++++++++++++++-- drivers/scsi/aacraid/commctrl.c | 294 ++++++++++++++++++++++++++++++---------- drivers/scsi/aacraid/commsup.c | 134 +++++++++++++++--- drivers/scsi/aacraid/dpcsup.c | 136 ++++++++++++------- drivers/scsi/aacraid/linit.c | 1 + drivers/scsi/aacraid/src.c | 118 ++++++++++------ 6 files changed, 669 insertions(+), 189 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index a8b418e1438d..2f584b402c6b 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -86,6 +86,7 @@ enum { #define AAC_MAX_BUSES 5 #define AAC_MAX_TARGETS 256 #define AAC_MAX_NATIVE_SIZE 2048 +#define FW_ERROR_BUFFER_SIZE 512 /* Thor AIF events */ #define SA_AIF_HOTPLUG (1<<1) @@ -95,6 +96,141 @@ enum { #define SA_AIF_BPSTAT_CHANGE (1<<30) #define SA_AIF_BPCFG_CHANGE (1<<31) +#define HBA_MAX_SG_EMBEDDED 28 +#define HBA_MAX_SG_SEPARATE 90 +#define HBA_SENSE_DATA_LEN_MAX 32 +#define HBA_REQUEST_TAG_ERROR_FLAG 0x00000002 +#define HBA_SGL_FLAGS_EXT 0x80000000UL + +struct aac_hba_sgl { + u32 addr_lo; /* Lower 32-bits of SGL element address */ + u32 addr_hi; /* Upper 32-bits of SGL element address */ + u32 len; /* Length of SGL element in bytes */ + u32 flags; /* SGL element flags */ +}; + +enum { + HBA_IU_TYPE_SCSI_CMD_REQ = 0x40, + HBA_IU_TYPE_SCSI_TM_REQ = 0x41, + HBA_IU_TYPE_SATA_REQ = 0x42, + HBA_IU_TYPE_RESP = 0x60, + HBA_IU_TYPE_COALESCED_RESP = 0x61, + HBA_IU_TYPE_INT_COALESCING_CFG_REQ = 0x70 +}; + +enum { + HBA_CMD_BYTE1_DATA_DIR_IN = 0x1, + HBA_CMD_BYTE1_DATA_DIR_OUT = 0x2, + HBA_CMD_BYTE1_DATA_TYPE_DDR = 0x4, + HBA_CMD_BYTE1_CRYPTO_ENABLE = 0x8 +}; + +enum { + HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN = 0x0, + HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT, + HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR, + HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE +}; + +enum { + HBA_RESP_DATAPRES_NO_DATA = 0x0, + HBA_RESP_DATAPRES_RESPONSE_DATA, + HBA_RESP_DATAPRES_SENSE_DATA +}; + +enum { + HBA_RESP_SVCRES_TASK_COMPLETE = 0x0, + HBA_RESP_SVCRES_FAILURE, + HBA_RESP_SVCRES_TMF_COMPLETE, + HBA_RESP_SVCRES_TMF_SUCCEEDED, + HBA_RESP_SVCRES_TMF_REJECTED, + HBA_RESP_SVCRES_TMF_LUN_INVALID +}; + +enum { + HBA_RESP_STAT_IO_ERROR = 0x1, + HBA_RESP_STAT_IO_ABORTED, + HBA_RESP_STAT_NO_PATH_TO_DEVICE, + HBA_RESP_STAT_INVALID_DEVICE, + HBA_RESP_STAT_HBAMODE_DISABLED = 0xE, + HBA_RESP_STAT_UNDERRUN = 0x51, + HBA_RESP_STAT_OVERRUN = 0x75 +}; + +struct aac_hba_cmd_req { + u8 iu_type; /* HBA information unit type */ + /* + * byte1: + * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT + * [2] TYPE - 0=PCI, 1=DDR + * [3] CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled + */ + u8 byte1; + u8 reply_qid; /* Host reply queue to post response to */ + u8 reserved1; + __le32 it_nexus; /* Device handle for the request */ + __le32 request_id; /* Sender context */ + /* Lower 32-bits of tweak value for crypto enabled IOs */ + __le32 tweak_value_lo; + u8 cdb[16]; /* SCSI CDB of the command */ + u8 lun[8]; /* SCSI LUN of the command */ + + /* Total data length in bytes to be read/written (if any) */ + __le32 data_length; + + /* [2:0] Task Attribute, [6:3] Command Priority */ + u8 attr_prio; + + /* Number of SGL elements embedded in the HBA req */ + u8 emb_data_desc_count; + + __le16 dek_index; /* DEK index for crypto enabled IOs */ + + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; + + /* Upper 32-bits of tweak value for crypto enabled IOs */ + __le32 tweak_value_hi; + + struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */ + + /* + * structure must not exceed + * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE + */ +}; + +struct aac_hba_resp { + u8 iu_type; /* HBA information unit type */ + u8 reserved1[3]; + __le32 request_identifier; /* sender context */ + __le32 reserved2; + u8 service_response; /* SCSI service response */ + u8 status; /* SCSI status */ + u8 datapres; /* [1:0] - data present, [7:2] - reserved */ + u8 sense_response_data_len; /* Sense/response data length */ + __le32 residual_count; /* Residual data length in bytes */ + /* Sense/response data */ + u8 sense_response_buf[HBA_SENSE_DATA_LEN_MAX]; +}; + +struct aac_native_hba { + union { + struct aac_hba_cmd_req cmd; + u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE]; + } cmd; + union { + struct aac_hba_resp err; + u8 resp_bytes[FW_ERROR_BUFFER_SIZE]; + } resp; +}; + #define CISS_REPORT_PHYSICAL_LUNS 0xc3 #define WRITE_HOST_WELLNESS 0xa5 #define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15 @@ -468,10 +604,10 @@ enum aac_queue_types { /* transport FIB header (PMC) */ struct aac_fib_xporthdr { - u64 HostAddress; /* FIB host address w/o xport header */ - u32 Size; /* FIB size excluding xport header */ - u32 Handle; /* driver handle to reference the FIB */ - u64 Reserved[2]; + __le64 HostAddress; /* FIB host address w/o xport header */ + __le32 Size; /* FIB size excluding xport header */ + __le32 Handle; /* driver handle to reference the FIB */ + __le64 Reserved[2]; }; #define ALIGN32 32 @@ -978,17 +1114,20 @@ struct src_mu_registers { __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ __le32 IQ_H; /* c4h | Inbound Queue (High address) */ __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ + __le32 reserved5; /* cch | Reserved */ + __le32 IQN_L; /* d0h | Inbound (native cmd) low */ + __le32 IQN_H; /* d4h | Inbound (native cmd) high */ }; struct src_registers { struct src_mu_registers MUnit; /* 00h - cbh */ union { struct { - __le32 reserved1[130789]; /* cch - 7fc5fh */ + __le32 reserved1[130786]; /* d8h - 7fc5fh */ struct src_inbound IndexRegs; /* 7fc60h */ } tupelo; struct { - __le32 reserved1[973]; /* cch - fffh */ + __le32 reserved1[970]; /* d8h - fffh */ struct src_inbound IndexRegs; /* 1000h */ } denali; } u; @@ -1102,8 +1241,10 @@ struct fib { struct list_head fiblink; void *data; u32 vector_no; - struct hw_fib *hw_fib_va; /* Actual shared object */ - dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ + struct hw_fib *hw_fib_va; /* also used for native */ + dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ + dma_addr_t hw_sgl_pa; /* extra sgl for native */ + dma_addr_t hw_error_pa; /* error buffer for native */ u32 hbacmd_size; /* cmd size for native */ }; @@ -1285,6 +1426,7 @@ struct aac_bus_info_response { #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) #define AAC_OPT_EXTENDED cpu_to_le32(1<<23) +#define AAC_OPT_NATIVE_HBA cpu_to_le32(1<<25) #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) #define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29) #define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) @@ -1322,8 +1464,8 @@ struct aac_dev /* * Map for 128 fib objects (64k) */ - dma_addr_t hw_fib_pa; - struct hw_fib *hw_fib_va; + dma_addr_t hw_fib_pa; /* also used for native cmd */ + struct hw_fib *hw_fib_va; /* also used for native cmd */ struct hw_fib *aif_base_va; /* * Fib Headers @@ -1498,6 +1640,8 @@ struct aac_dev #define FIB_CONTEXT_FLAG (0x00000002) #define FIB_CONTEXT_FLAG_WAIT (0x00000004) #define FIB_CONTEXT_FLAG_FASTRESP (0x00000008) +#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) +#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) /* * Define the command values @@ -2157,6 +2301,8 @@ struct aac_common #ifdef DBG u32 FibsSent; u32 FibRecved; + u32 NativeSent; + u32 NativeRecved; u32 NoResponseSent; u32 NoResponseRecved; u32 AsyncSent; @@ -2168,7 +2314,6 @@ struct aac_common extern struct aac_common aac_config; - /* * The following macro is used when sending and receiving FIBs. It is * only used for debugging. @@ -2295,9 +2440,10 @@ extern struct aac_common aac_config; /* PMC NEW COMM: Request the event data */ #define AifReqEvent 200 +#define AifRawDeviceRemove 203 /* RAW device deleted */ +#define AifNativeDeviceAdd 204 /* native HBA device added */ +#define AifNativeDeviceRemove 205 /* native HBA device removed */ -/* RAW device deleted */ -#define AifRawDeviceRemove 203 /* * Adapter Initiated FIB command structures. Start with the adapter @@ -2342,9 +2488,12 @@ void aac_fib_free(struct fib * context); void aac_fib_init(struct fib * context); void aac_printf(struct aac_dev *dev, u32 val); int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); +int aac_hba_send(u8 command, struct fib *context, + fib_callback callback, void *ctxt); int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); int aac_fib_complete(struct fib * context); +void aac_hba_callback(void *context, struct fib *fibptr); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); void aac_src_access_devreg(struct aac_dev *dev, int mode); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index e1daff230c7d..f98618c94bf0 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -477,20 +477,24 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) struct fib* srbfib; int status; struct aac_srb *srbcmd = NULL; + struct aac_hba_cmd_req *hbacmd = NULL; struct user_aac_srb *user_srbcmd = NULL; struct user_aac_srb __user *user_srb = arg; struct aac_srb_reply __user *user_reply; - struct aac_srb_reply* reply; + u32 chn; u32 fibsize = 0; u32 flags = 0; s32 rcode = 0; u32 data_dir; - void __user *sg_user[32]; - void *sg_list[32]; + void __user *sg_user[HBA_MAX_SG_EMBEDDED]; + void *sg_list[HBA_MAX_SG_EMBEDDED]; + u32 sg_count[HBA_MAX_SG_EMBEDDED]; u32 sg_indx = 0; u32 byte_count = 0; u32 actual_fibsize64, actual_fibsize = 0; int i; + int is_native_device; + u64 address; if (dev->in_reset) { @@ -507,11 +511,6 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } - aac_fib_init(srbfib); - /* raw_srb FIB is not FastResponseCapable */ - srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); - - srbcmd = (struct aac_srb*) fib_data(srbfib); memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ @@ -538,21 +537,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - user_reply = arg+fibsize; - flags = user_srbcmd->flags; /* from user in cpu order */ - // Fix up srb for endian and force some values - - srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this - srbcmd->channel = cpu_to_le32(user_srbcmd->channel); - srbcmd->id = cpu_to_le32(user_srbcmd->id); - srbcmd->lun = cpu_to_le32(user_srbcmd->lun); - srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); - srbcmd->flags = cpu_to_le32(flags); - srbcmd->retry_limit = 0; // Obsolete parameter - srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); - memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); - switch (flags & (SRB_DataIn | SRB_DataOut)) { case SRB_DataOut: data_dir = DMA_TO_DEVICE; @@ -568,7 +553,12 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", - le32_to_cpu(srbcmd->sg.count))); + user_srbcmd->sg.count)); + rcode = -EINVAL; + goto cleanup; + } + if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { + dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n")); rcode = -EINVAL; goto cleanup; } @@ -588,13 +578,136 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { - dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); - rcode = -EINVAL; - goto cleanup; + + chn = aac_logical_to_phys(user_srbcmd->channel); + if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && + dev->hba_map[chn][user_srbcmd->id].devtype == + AAC_DEVTYPE_NATIVE_RAW) { + is_native_device = 1; + hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; + memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ + + /* iu_type is a parameter of aac_hba_send */ + switch (data_dir) { + case DMA_TO_DEVICE: + hbacmd->byte1 = 2; + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + hbacmd->byte1 = 1; + break; + case DMA_NONE: + default: + break; + } + hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); + hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; + + /* + * we fill in reply_qid later in aac_src_deliver_message + * we fill in iu_type, request_id later in aac_hba_send + * we fill in emb_data_desc_count, data_length later + * in sg list build + */ + + memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); + + address = (u64)srbfib->hw_error_pa; + hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + hbacmd->emb_data_desc_count = + cpu_to_le32(user_srbcmd->sg.count); + srbfib->hbacmd_size = 64 + + user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); + + } else { + is_native_device = 0; + aac_fib_init(srbfib); + + /* raw_srb FIB is not FastResponseCapable */ + srbfib->hw_fib_va->header.XferState &= + ~cpu_to_le32(FastResponseCapable); + + srbcmd = (struct aac_srb *) fib_data(srbfib); + + // Fix up srb for endian and force some values + + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this + srbcmd->channel = cpu_to_le32(user_srbcmd->channel); + srbcmd->id = cpu_to_le32(user_srbcmd->id); + srbcmd->lun = cpu_to_le32(user_srbcmd->lun); + srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); + srbcmd->flags = cpu_to_le32(flags); + srbcmd->retry_limit = 0; // Obsolete parameter + srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); + memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); } + byte_count = 0; - if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { + if (is_native_device) { + struct user_sgmap *usg32 = &user_srbcmd->sg; + struct user_sgmap64 *usg64 = + (struct user_sgmap64 *)&user_srbcmd->sg; + + for (i = 0; i < usg32->count; i++) { + void *p; + u64 addr; + + sg_count[i] = (actual_fibsize64 == fibsize) ? + usg64->sg[i].count : usg32->sg[i].count; + if (sg_count[i] > + (dev->scsi_host_ptr->max_sectors << 9)) { + pr_err("aacraid: upsg->sg[%d].count=%u>%u\n", + i, sg_count[i], + dev->scsi_host_ptr->max_sectors << 9); + rcode = -EINVAL; + goto cleanup; + } + + p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + if (!p) { + rcode = -ENOMEM; + goto cleanup; + } + + if (actual_fibsize64 == fibsize) { + addr = (u64)usg64->sg[i].addr[0]; + addr += ((u64)usg64->sg[i].addr[1]) << 32; + } else { + addr = (u64)usg32->sg[i].addr; + } + + sg_user[i] = (void __user *)(uintptr_t)addr; + sg_list[i] = p; // save so we can clean up later + sg_indx = i; + + if (flags & SRB_DataOut) { + if (copy_from_user(p, sg_user[i], + sg_count[i])) { + rcode = -EFAULT; + goto cleanup; + } + } + addr = pci_map_single(dev->pdev, p, sg_count[i], + data_dir); + hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); + hbacmd->sge[i].addr_lo = cpu_to_le32( + (u32)(addr & 0xffffffff)); + hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); + hbacmd->sge[i].flags = 0; + byte_count += sg_count[i]; + } + + if (usg32->count > 0) /* embedded sglist */ + hbacmd->sge[usg32->count-1].flags = + cpu_to_le32(0x40000000); + hbacmd->data_length = cpu_to_le32(byte_count); + + status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib, + NULL, NULL); + + } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; @@ -606,7 +719,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < upsg->count; i++) { u64 addr; void* p; - if (upsg->sg[i].count > + + sg_count[i] = upsg->sg[i].count; + if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : @@ -615,10 +730,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } /* Does this really need to be GFP_DMA? */ - p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); + p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - upsg->sg[i].count,i,upsg->count)); + sg_count[i], i, upsg->count)); rcode = -ENOMEM; goto cleanup; } @@ -629,18 +744,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) sg_indx = i; if (flags & SRB_DataOut) { - if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ + if (copy_from_user(p, sg_user[i], + sg_count[i])){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } - addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); + addr = pci_map_single(dev->pdev, p, + sg_count[i], data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); - byte_count += upsg->sg[i].count; - psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); } } else { struct user_sgmap* usg; @@ -657,7 +774,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < usg->count; i++) { u64 addr; void* p; - if (usg->sg[i].count > + + sg_count[i] = usg->sg[i].count; + if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : @@ -667,10 +786,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } /* Does this really need to be GFP_DMA? */ - p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); + p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - usg->sg[i].count,i,usg->count)); + sg_count[i], i, usg->count)); kfree(usg); rcode = -ENOMEM; goto cleanup; @@ -680,19 +799,21 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) sg_indx = i; if (flags & SRB_DataOut) { - if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ + if (copy_from_user(p, sg_user[i], + sg_count[i])) { kfree (usg); dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } - addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); + addr = pci_map_single(dev->pdev, p, + sg_count[i], data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); - byte_count += usg->sg[i].count; - psg->sg[i].count = cpu_to_le32(usg->sg[i].count); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); } kfree (usg); } @@ -711,7 +832,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; - if (usg->sg[i].count > + + sg_count[i] = usg->sg[i].count; + if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : @@ -720,10 +843,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } /* Does this really need to be GFP_DMA? */ - p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); - if(!p) { + p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA); + if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - usg->sg[i].count,i,usg->count)); + sg_count[i], i, usg->count)); rcode = -ENOMEM; goto cleanup; } @@ -734,7 +857,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) sg_indx = i; if (flags & SRB_DataOut) { - if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ + if (copy_from_user(p, sg_user[i], + sg_count[i])){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; @@ -744,13 +868,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); byte_count += usg->sg[i].count; - psg->sg[i].count = cpu_to_le32(usg->sg[i].count); + psg->sg[i].count = cpu_to_le32(sg_count[i]); } } else { for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; - if (upsg->sg[i].count > + + sg_count[i] = upsg->sg[i].count; + if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : @@ -758,10 +884,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - p = kmalloc(upsg->sg[i].count, GFP_KERNEL); + p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - upsg->sg[i].count, i, upsg->count)); + sg_count[i], i, upsg->count)); rcode = -ENOMEM; goto cleanup; } @@ -770,19 +896,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) sg_indx = i; if (flags & SRB_DataOut) { - if(copy_from_user(p, sg_user[i], - upsg->sg[i].count)) { + if (copy_from_user(p, sg_user[i], + sg_count[i])) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, - upsg->sg[i].count, data_dir); + sg_count[i], data_dir); psg->sg[i].addr = cpu_to_le32(addr); - byte_count += upsg->sg[i].count; - psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); + byte_count += sg_count[i]; + psg->sg[i].count = cpu_to_le32(sg_count[i]); } } srbcmd->count = cpu_to_le32(byte_count); @@ -792,12 +918,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) psg->count = 0; status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } + if (status == -ERESTARTSYS) { rcode = -ERESTARTSYS; goto cleanup; } - if (status != 0){ + if (status != 0) { dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); rcode = -ENXIO; goto cleanup; @@ -805,11 +932,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ - byte_count = le32_to_cpu( - (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) - ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count - : srbcmd->sg.sg[i].count); - if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ + if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); rcode = -EFAULT; goto cleanup; @@ -818,19 +941,50 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } } - reply = (struct aac_srb_reply *) fib_data(srbfib); - if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ - dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); - rcode = -EFAULT; - goto cleanup; + user_reply = arg + fibsize; + if (is_native_device) { + struct aac_hba_resp *err = + &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; + struct aac_srb_reply reply; + + reply.status = ST_OK; + if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + reply.srb_status = SRB_STATUS_SUCCESS; + reply.scsi_status = 0; + reply.data_xfer_length = byte_count; + } else { + reply.srb_status = err->service_response; + reply.scsi_status = err->status; + reply.data_xfer_length = byte_count - + le32_to_cpu(err->residual_count); + reply.sense_data_size = err->sense_response_data_len; + memcpy(reply.sense_data, err->sense_response_buf, + AAC_SENSE_BUFFERSIZE); + } + if (copy_to_user(user_reply, &reply, + sizeof(struct aac_srb_reply))) { + dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); + rcode = -EFAULT; + goto cleanup; + } + } else { + struct aac_srb_reply *reply; + + reply = (struct aac_srb_reply *) fib_data(srbfib); + if (copy_to_user(user_reply, reply, + sizeof(struct aac_srb_reply))) { + dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); + rcode = -EFAULT; + goto cleanup; + } } cleanup: kfree(user_srbcmd); - for(i=0; i <= sg_indx; i++){ - kfree(sg_list[i]); - } if (rcode != -ERESTARTSYS) { + for (i = 0; i <= sg_indx; i++) + kfree(sg_list[i]); aac_fib_complete(srbfib); aac_fib_free(srbfib); } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e906ae1fcb23..09da354e9956 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -65,6 +65,11 @@ static int fib_map_alloc(struct aac_dev *dev) dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; else dev->max_cmd_size = dev->max_fib_size; + if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { + dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; + } else { + dev->max_cmd_size = dev->max_fib_size; + } dprintk((KERN_INFO "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", @@ -153,7 +158,7 @@ int aac_fib_setup(struct aac_dev * dev) (hw_fib_pa - dev->hw_fib_pa)); dev->hw_fib_pa = hw_fib_pa; memset(dev->hw_fib_va, 0, - (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * + (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); /* add Xport header */ @@ -179,8 +184,18 @@ int aac_fib_setup(struct aac_dev * dev) sema_init(&fibptr->event_wait, 0); spin_lock_init(&fibptr->event_lock); hw_fib->header.XferState = cpu_to_le32(0xffffffff); - hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); + hw_fib->header.SenderSize = + cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */ fibptr->hw_fib_pa = hw_fib_pa; + fibptr->hw_sgl_pa = hw_fib_pa + + offsetof(struct aac_hba_cmd_req, sge[2]); + /* + * one element is for the ptr to the separate sg list, + * second element for 32 byte alignment + */ + fibptr->hw_error_pa = hw_fib_pa + + offsetof(struct aac_native_hba, resp.resp_bytes[0]); + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); hw_fib_pa = hw_fib_pa + @@ -282,7 +297,8 @@ void aac_fib_free(struct fib *fibptr) spin_lock_irqsave(&fibptr->dev->fib_lock, flags); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) aac_config.fib_timeouts++; - if (fibptr->hw_fib_va->header.XferState != 0) { + if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && + fibptr->hw_fib_va->header.XferState != 0) { printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", (void*)fibptr, le32_to_cpu(fibptr->hw_fib_va->header.XferState)); @@ -510,8 +526,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, * Map the fib into 32bits by using the fib number */ - hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); - hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1; + hw_fib->header.SenderFibAddress = + cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); + + /* use the same shifted value for handle to be compatible + * with the new native hba command handle + */ + hw_fib->header.Handle = + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); + /* * Set FIB state to indicate where it came from and if we want a * response from the adapter. Also load the command from the @@ -679,6 +702,82 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, return 0; } +int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, + void *callback_data) +{ + struct aac_dev *dev = fibptr->dev; + int wait; + unsigned long flags = 0; + unsigned long mflags = 0; + + fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); + if (callback) { + wait = 0; + fibptr->callback = callback; + fibptr->callback_data = callback_data; + } else + wait = 1; + + + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { + struct aac_hba_cmd_req *hbacmd = + (struct aac_hba_cmd_req *)fibptr->hw_fib_va; + + hbacmd->iu_type = command; + /* bit1 of request_id must be 0 */ + hbacmd->request_id = + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); + } else + return -EINVAL; + + + if (wait) { + spin_lock_irqsave(&dev->manage_lock, mflags); + if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { + spin_unlock_irqrestore(&dev->manage_lock, mflags); + return -EBUSY; + } + dev->management_fib_count++; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + spin_lock_irqsave(&fibptr->event_lock, flags); + } + + if (aac_adapter_deliver(fibptr) != 0) { + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, mflags); + } + return -EBUSY; + } + FIB_COUNTER_INCREMENT(aac_config.NativeSent); + + if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + /* Only set for first known interruptable command */ + if (down_interruptible(&fibptr->event_wait)) { + fibptr->done = 2; + up(&fibptr->event_wait); + } + spin_lock_irqsave(&fibptr->event_lock, flags); + if ((fibptr->done == 0) || (fibptr->done == 2)) { + fibptr->done = 2; /* Tell interrupt we aborted */ + spin_unlock_irqrestore(&fibptr->event_lock, flags); + return -ERESTARTSYS; + } + spin_unlock_irqrestore(&fibptr->event_lock, flags); + WARN_ON(fibptr->done == 0); + + if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) + return -ETIMEDOUT; + + return 0; + } + + return -EINPROGRESS; +} + /** * aac_consumer_get - get the top of the queue * @dev: Adapter @@ -837,11 +936,17 @@ int aac_fib_complete(struct fib *fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib_va; + if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { + fib_dealloc(fibptr); + return 0; + } + /* - * Check for a fib which has already been completed + * Check for a fib which has already been completed or with a + * status wait timeout */ - if (hw_fib->header.XferState == 0) + if (hw_fib->header.XferState == 0 || fibptr->done == 2) return 0; /* * If we plan to do anything check the structure type first. @@ -994,20 +1099,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) lun = (container >> 16) & 0xFF; container = (u32)-1; channel = aac_phys_to_logical(channel); - device_config_needed = - (((__le32 *)aifcmd->data)[0] == - cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD; - - if (device_config_needed == ADD) { - device = scsi_device_lookup( - dev->scsi_host_ptr, - channel, id, lun); - if (device) { - scsi_remove_device(device); - scsi_device_put(device); - } - } + device_config_needed = DELETE; break; + /* * Morph or Expand complete */ diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 8077dbad984e..c426ea2864ae 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -346,7 +346,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, (fib_callback)aac_aif_callback, fibctx); } else { struct fib *fib = &dev->fibs[index]; - struct hw_fib * hwfib = fib->hw_fib_va; + int start_callback = 0; /* * Remove this fib from the Outstanding I/O queue. @@ -364,60 +364,104 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, return 0; } - if (isFastResponse) { - /* - * Doctor the fib - */ - *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); - hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); - fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; - } - FIB_COUNTER_INCREMENT(aac_config.FibRecved); - if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) - { - __le32 *pstatus = (__le32 *)hwfib->data; - if (*pstatus & cpu_to_le32(0xffff0000)) - *pstatus = cpu_to_le32(ST_OK); - } - if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) - { - if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) - FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); - else - FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); - /* - * NOTE: we cannot touch the fib after this - * call, because it may have been deallocated. - */ - if (likely(fib->callback && fib->callback_data)) { - fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; - fib->callback(fib->callback_data, fib); - } else - dev_info(&dev->pdev->dev, - "Invalid callback_fib[%d] (*%p)(%p)\n", - index, fib->callback, fib->callback_data); + if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { + + if (isFastResponse) + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; + + if (fib->callback) { + start_callback = 1; + } else { + unsigned long flagv; + int complete = 0; + + dprintk((KERN_INFO "event_wait up\n")); + spin_lock_irqsave(&fib->event_lock, flagv); + if (fib->done == 2) { + fib->done = 1; + complete = 1; + } else { + fib->done = 1; + up(&fib->event_wait); + } + spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, + mflags); + + FIB_COUNTER_INCREMENT(aac_config.NativeRecved); + if (complete) + aac_fib_complete(fib); + } } else { - unsigned long flagv; - dprintk((KERN_INFO "event_wait up\n")); - spin_lock_irqsave(&fib->event_lock, flagv); - if (!fib->done) { - fib->done = 1; - up(&fib->event_wait); + struct hw_fib *hwfib = fib->hw_fib_va; + + if (isFastResponse) { + /* Doctor the fib */ + *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); + hwfib->header.XferState |= + cpu_to_le32(AdapterProcessed); + fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; } - spin_unlock_irqrestore(&fib->event_lock, flagv); - spin_lock_irqsave(&dev->manage_lock, mflags); - dev->management_fib_count--; - spin_unlock_irqrestore(&dev->manage_lock, mflags); + if (hwfib->header.Command == + cpu_to_le16(NuFileSystem)) { + __le32 *pstatus = (__le32 *)hwfib->data; - FIB_COUNTER_INCREMENT(aac_config.NormalRecved); - if (fib->done == 2) { + if (*pstatus & cpu_to_le32(0xffff0000)) + *pstatus = cpu_to_le32(ST_OK); + } + if (hwfib->header.XferState & + cpu_to_le32(NoResponseExpected | Async)) { + if (hwfib->header.XferState & cpu_to_le32( + NoResponseExpected)) + FIB_COUNTER_INCREMENT( + aac_config.NoResponseRecved); + else + FIB_COUNTER_INCREMENT( + aac_config.AsyncRecved); + start_callback = 1; + } else { + unsigned long flagv; + int complete = 0; + + dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); - fib->done = 0; + if (fib->done == 2) { + fib->done = 1; + complete = 1; + } else { + fib->done = 1; + up(&fib->event_wait); + } spin_unlock_irqrestore(&fib->event_lock, flagv); + + spin_lock_irqsave(&dev->manage_lock, mflags); + dev->management_fib_count--; + spin_unlock_irqrestore(&dev->manage_lock, + mflags); + + FIB_COUNTER_INCREMENT(aac_config.NormalRecved); + if (complete) + aac_fib_complete(fib); + } + } + + + if (start_callback) { + /* + * NOTE: we cannot touch the fib after this + * call, because it may have been deallocated. + */ + if (likely(fib->callback && fib->callback_data)) { + fib->callback(fib->callback_data, fib); + } else { aac_fib_complete(fib); + aac_fib_free(fib); } } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 03d935c8de93..68a5d2d1e655 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1110,6 +1110,7 @@ static void __aac_shutdown(struct aac_dev * aac) { int i; + aac->adapter_shutdown = 1; aac_send_shutdown(aac); if (aac->aif_thread) { diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 5508893ca8b7..946a010e46df 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -457,6 +457,11 @@ static int aac_src_check_health(struct aac_dev *dev) return 0; } +static inline u32 aac_get_vector(struct aac_dev *dev) +{ + return atomic_inc_return(&dev->msix_counter)%dev->max_msix; +} + /** * aac_src_deliver_message * @fib: fib to issue @@ -470,67 +475,100 @@ static int aac_src_deliver_message(struct fib *fib) u32 fibsize; dma_addr_t address; struct aac_fib_xporthdr *pFibX; + int native_hba; #if !defined(writeq) unsigned long flags; #endif - u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); u16 vector_no; atomic_inc(&q->numpending); - if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && - dev->max_msix > 1) { - vector_no = fib->vector_no; - fib->hw_fib_va->header.Handle += (vector_no << 16); + native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0; + + + if (dev->msi_enabled && dev->max_msix > 1 && + (native_hba || fib->hw_fib_va->header.Command != AifRequest)) { + + if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) + && dev->sa_firmware) + vector_no = aac_get_vector(dev); + else + vector_no = fib->vector_no; + + if (native_hba) { + ((struct aac_hba_cmd_req *)fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_cmd_req *)fib->hw_fib_va)->request_id + += (vector_no << 16); + } else { + fib->hw_fib_va->header.Handle += (vector_no << 16); + } } else { vector_no = 0; } atomic_inc(&dev->rrq_outstanding[vector_no]); - if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || - (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) { - /* Calculate the amount to the fibsize bits */ - fibsize = (hdr_size + 127) / 128 - 1; - if (fibsize > (ALIGN32 - 1)) - return -EMSGSIZE; - /* New FIB header, 32-bit */ + if (native_hba) { address = fib->hw_fib_pa; - fib->hw_fib_va->header.StructType = FIB_MAGIC2; - fib->hw_fib_va->header.SenderFibAddress = (u32)address; - fib->hw_fib_va->header.u.TimeStamp = 0; - BUG_ON(upper_32_bits(address) != 0L); + fibsize = (fib->hbacmd_size + 127) / 128 - 1; + if (fibsize > 31) + fibsize = 31; address |= fibsize; +#if defined(writeq) + src_writeq(dev, MUnit.IQN_L, (u64)address); +#else + spin_lock_irqsave(&fib->dev->iq_lock, flags); + src_writel(dev, MUnit.IQN_H, + upper_32_bits(address) & 0xffffffff); + src_writel(dev, MUnit.IQN_L, address & 0xffffffff); + spin_unlock_irqrestore(&fib->dev->iq_lock, flags); +#endif } else { - /* Calculate the amount to the fibsize bits */ - fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1; - if (fibsize > (ALIGN32 - 1)) - return -EMSGSIZE; - - /* Fill XPORT header */ - pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr); - pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle); - pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa); - pFibX->Size = cpu_to_le32(hdr_size); - - /* - * The xport header has been 32-byte aligned for us so that fibsize - * can be masked out of this address by hardware. -- BenC - */ - address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr); - if (address & (ALIGN32 - 1)) - return -EINVAL; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || + dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { + /* Calculate the amount to the fibsize bits */ + fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size) + + 127) / 128 - 1; + /* New FIB header, 32-bit */ + address = fib->hw_fib_pa; + fib->hw_fib_va->header.StructType = FIB_MAGIC2; + fib->hw_fib_va->header.SenderFibAddress = + cpu_to_le32((u32)address); + fib->hw_fib_va->header.u.TimeStamp = 0; + WARN_ON(((u32)(((address) >> 16) >> 16)) != 0L); + } else { + /* Calculate the amount to the fibsize bits */ + fibsize = (sizeof(struct aac_fib_xporthdr) + + le16_to_cpu(fib->hw_fib_va->header.Size) + + 127) / 128 - 1; + /* Fill XPORT header */ + pFibX = (struct aac_fib_xporthdr *) + ((unsigned char *)fib->hw_fib_va - + sizeof(struct aac_fib_xporthdr)); + pFibX->Handle = fib->hw_fib_va->header.Handle; + pFibX->HostAddress = + cpu_to_le64((u64)fib->hw_fib_pa); + pFibX->Size = cpu_to_le32( + le16_to_cpu(fib->hw_fib_va->header.Size)); + address = fib->hw_fib_pa - + (u64)sizeof(struct aac_fib_xporthdr); + } + if (fibsize > 31) + fibsize = 31; address |= fibsize; - } + #if defined(writeq) - src_writeq(dev, MUnit.IQ_L, (u64)address); + src_writeq(dev, MUnit.IQ_L, (u64)address); #else - spin_lock_irqsave(&fib->dev->iq_lock, flags); - src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); - src_writel(dev, MUnit.IQ_L, address & 0xffffffff); - spin_unlock_irqrestore(&fib->dev->iq_lock, flags); + spin_lock_irqsave(&fib->dev->iq_lock, flags); + src_writel(dev, MUnit.IQ_H, + upper_32_bits(address) & 0xffffffff); + src_writel(dev, MUnit.IQ_L, address & 0xffffffff); + spin_unlock_irqrestore(&fib->dev->iq_lock, flags); #endif + } return 0; } -- cgit v1.2.3-59-g8ed1b From ab5d129f93c91fff74100bcd898d605da461f4a6 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:30 -0800 Subject: scsi: aacraid: Add task management functionality Added support to send out task management commands. [mkp: removed // fibsize... ] Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 366 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 362 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 5719ac39c45a..9e9144860fa5 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -217,9 +217,13 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg); static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg); static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max); +static long aac_build_sghba(struct scsi_cmnd *scsicmd, + struct aac_hba_cmd_req *hbacmd, + int sg_max, u64 sg_address); static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new); static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); +static int aac_send_hba_fib(struct scsi_cmnd *scsicmd); #ifdef AAC_DETAILED_STATUS_INFO static char *aac_get_status_string(u32 status); #endif @@ -1446,6 +1450,52 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd return srbcmd; } +static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib, + struct scsi_cmnd *cmd) +{ + struct aac_hba_cmd_req *hbacmd; + struct aac_dev *dev; + int bus, target; + u64 address; + + dev = (struct aac_dev *)cmd->device->host->hostdata; + + hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va; + memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ + /* iu_type is a parameter of aac_hba_send */ + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + hbacmd->byte1 = 2; + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + hbacmd->byte1 = 1; + break; + case DMA_NONE: + default: + break; + } + hbacmd->lun[1] = cpu_to_le32(cmd->device->lun); + + bus = aac_logical_to_phys(scmd_channel(cmd)); + target = scmd_id(cmd); + hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus; + + /* we fill in reply_qid later in aac_src_deliver_message */ + /* we fill in iu_type, request_id later in aac_hba_send */ + /* we fill in emb_data_desc_count later in aac_build_sghba */ + + memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len); + hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd)); + + address = (u64)fib->hw_error_pa; + hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + + return hbacmd; +} + static void aac_srb_callback(void *context, struct fib * fibptr); static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) @@ -1516,6 +1566,30 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) return aac_scsi_32(fib, cmd); } +static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd) +{ + struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd); + struct aac_dev *dev; + long ret; + + dev = (struct aac_dev *)cmd->device->host->hostdata; + + ret = aac_build_sghba(cmd, hbacmd, + dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa); + if (ret < 0) + return ret; + + /* + * Now send the HBA command to the adapter + */ + fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) * + sizeof(struct aac_hba_sgl); + + return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib, + (fib_callback) aac_hba_callback, + (void *) cmd); +} + int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) { struct fib *fibptr; @@ -1527,6 +1601,7 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) u16 fibsize, datasize; int rcode = -ENOMEM; + fibptr = aac_fib_alloc(dev); if (!fibptr) goto out; @@ -1997,6 +2072,11 @@ int aac_get_adapter_info(struct aac_dev* dev) (dev->scsi_host_ptr->sg_tablesize * 8) + 112; } } + if (!dev->sync_mode && dev->sa_firmware && + dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE) + dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize = + HBA_MAX_SG_SEPARATE; + /* FIB should be freed only after getting the response from the F/W */ if (rcode != -ERESTARTSYS) { aac_fib_complete(fibptr); @@ -2553,7 +2633,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) int aac_scsi_cmd(struct scsi_cmnd * scsicmd) { - u32 cid; + u32 cid, bus; struct Scsi_Host *host = scsicmd->device->host; struct aac_dev *dev = (struct aac_dev *)host->hostdata; struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; @@ -2599,8 +2679,24 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) } } } else { /* check for physical non-dasd devices */ - if (dev->nondasd_support || expose_physicals || - dev->jbod) { + bus = aac_logical_to_phys(scmd_channel(scsicmd)); + if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && + (dev->hba_map[bus][cid].expose + == AAC_HIDE_DISK)){ + if (scsicmd->cmnd[0] == INQUIRY) { + scsicmd->result = DID_NO_CONNECT << 16; + goto scsi_done_ret; + } + } + + if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && + dev->hba_map[bus][cid].devtype + == AAC_DEVTYPE_NATIVE_RAW) { + if (dev->in_reset) + return -1; + return aac_send_hba_fib(scsicmd); + } else if (dev->nondasd_support || expose_physicals || + dev->jbod) { if (dev->in_reset) return -1; return aac_send_srb_fib(scsicmd); @@ -3357,9 +3453,152 @@ static void aac_srb_callback(void *context, struct fib * fibptr) scsicmd->scsi_done(scsicmd); } +static void hba_resp_task_complete(struct aac_dev *dev, + struct scsi_cmnd *scsicmd, + struct aac_hba_resp *err) { + + scsicmd->result = err->status; + /* set residual count */ + scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count)); + + switch (err->status) { + case SAM_STAT_GOOD: + scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + case SAM_STAT_CHECK_CONDITION: + { + int len; + + len = min_t(u8, err->sense_response_data_len, + SCSI_SENSE_BUFFERSIZE); + if (len) + memcpy(scsicmd->sense_buffer, + err->sense_response_buf, len); + scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + } + case SAM_STAT_BUSY: + scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; + break; + case SAM_STAT_TASK_ABORTED: + scsicmd->result |= DID_ABORT << 16 | ABORT << 8; + break; + case SAM_STAT_RESERVATION_CONFLICT: + case SAM_STAT_TASK_SET_FULL: + default: + scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; + break; + } +} + +static void hba_resp_task_failure(struct aac_dev *dev, + struct scsi_cmnd *scsicmd, + struct aac_hba_resp *err) +{ + switch (err->status) { + case HBA_RESP_STAT_HBAMODE_DISABLED: + { + u32 bus, cid; + + bus = aac_logical_to_phys(scmd_channel(scsicmd)); + cid = scmd_id(scsicmd); + if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW; + dev->hba_map[bus][cid].rmw_nexus = 0xffffffff; + } + scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; + break; + } + case HBA_RESP_STAT_IO_ERROR: + case HBA_RESP_STAT_NO_PATH_TO_DEVICE: + scsicmd->result = DID_OK << 16 | + COMMAND_COMPLETE << 8 | SAM_STAT_BUSY; + break; + case HBA_RESP_STAT_IO_ABORTED: + scsicmd->result = DID_ABORT << 16 | ABORT << 8; + break; + case HBA_RESP_STAT_INVALID_DEVICE: + scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; + break; + case HBA_RESP_STAT_UNDERRUN: + /* UNDERRUN is OK */ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + case HBA_RESP_STAT_OVERRUN: + default: + scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; + break; + } +} + +/** + * + * aac_hba_callback + * @context: the context set in the fib - here it is scsi cmd + * @fibptr: pointer to the fib + * + * Handles the completion of a native HBA scsi command + * + */ +void aac_hba_callback(void *context, struct fib *fibptr) +{ + struct aac_dev *dev; + struct scsi_cmnd *scsicmd; + + struct aac_hba_resp *err = + &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; + + scsicmd = (struct scsi_cmnd *) context; + + if (!aac_valid_context(scsicmd, fibptr)) + return; + + WARN_ON(fibptr == NULL); + dev = fibptr->dev; + + if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)) + scsi_dma_unmap(scsicmd); + + if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { + /* fast response */ + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + goto out; + } + + switch (err->service_response) { + case HBA_RESP_SVCRES_TASK_COMPLETE: + hba_resp_task_complete(dev, scsicmd, err); + break; + case HBA_RESP_SVCRES_FAILURE: + hba_resp_task_failure(dev, scsicmd, err); + break; + case HBA_RESP_SVCRES_TMF_REJECTED: + scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8; + break; + case HBA_RESP_SVCRES_TMF_LUN_INVALID: + scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; + break; + case HBA_RESP_SVCRES_TMF_COMPLETE: + case HBA_RESP_SVCRES_TMF_SUCCEEDED: + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + default: + scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; + break; + } + +out: + aac_fib_complete(fibptr); + + if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) + scsicmd->SCp.sent_command = 1; + else + scsicmd->scsi_done(scsicmd); +} + /** * - * aac_send_scb_fib + * aac_send_srb_fib * @scsicmd: the scsi command block * * This routine will form a FIB and fill in the aac_srb from the @@ -3402,6 +3641,54 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) return -1; } +/** + * + * aac_send_hba_fib + * @scsicmd: the scsi command block + * + * This routine will form a FIB and fill in the aac_hba_cmd_req from the + * scsicmd passed in. + */ +static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) +{ + struct fib *cmd_fibcontext; + struct aac_dev *dev; + int status; + + dev = shost_priv(scsicmd->device->host); + if (scmd_id(scsicmd) >= dev->maximum_num_physicals || + scsicmd->device->lun > AAC_MAX_LUN - 1) { + scsicmd->result = DID_NO_CONNECT << 16; + scsicmd->scsi_done(scsicmd); + return 0; + } + + /* + * Allocate and initialize a Fib then setup a BlockWrite command + */ + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) + return -1; + + status = aac_adapter_hba(cmd_fibcontext, scsicmd); + + /* + * Check that the command queued to the controller + */ + if (status == -EINPROGRESS) { + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; + return 0; + } + + pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", + status); + aac_fib_complete(cmd_fibcontext); + aac_fib_free(cmd_fibcontext); + + return -1; +} + + static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) { struct aac_dev *dev; @@ -3654,6 +3941,77 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int return 0; } +static long aac_build_sghba(struct scsi_cmnd *scsicmd, + struct aac_hba_cmd_req *hbacmd, + int sg_max, + u64 sg_address) +{ + unsigned long byte_count = 0; + int nseg; + struct scatterlist *sg; + int i; + u32 cur_size; + struct aac_hba_sgl *sge; + + + + nseg = scsi_dma_map(scsicmd); + if (nseg <= 0) { + byte_count = nseg; + goto out; + } + + if (nseg > HBA_MAX_SG_EMBEDDED) + sge = &hbacmd->sge[2]; + else + sge = &hbacmd->sge[0]; + + scsi_for_each_sg(scsicmd, sg, nseg, i) { + int count = sg_dma_len(sg); + u64 addr = sg_dma_address(sg); + + WARN_ON(i >= sg_max); + sge->addr_hi = cpu_to_le32((u32)(addr>>32)); + sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff)); + cur_size = cpu_to_le32(count); + sge->len = cur_size; + sge->flags = 0; + byte_count += count; + sge++; + } + + sge--; + /* hba wants the size to be exact */ + if (byte_count > scsi_bufflen(scsicmd)) { + u32 temp; + + temp = le32_to_cpu(sge->len) - byte_count + - scsi_bufflen(scsicmd); + sge->len = cpu_to_le32(temp); + byte_count = scsi_bufflen(scsicmd); + } + + if (nseg <= HBA_MAX_SG_EMBEDDED) { + hbacmd->emb_data_desc_count = cpu_to_le32(nseg); + sge->flags = cpu_to_le32(0x40000000); + } else { + /* not embedded */ + hbacmd->sge[0].flags = cpu_to_le32(0x80000000); + hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1); + hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32); + hbacmd->sge[0].addr_lo = + cpu_to_le32((u32)(sg_address & 0xffffffff)); + } + + /* Check for command underflow */ + if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { + pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n", + byte_count, scsicmd->underflow); + } +out: + return byte_count; +} + #ifdef AAC_DETAILED_STATUS_INFO struct aac_srb_status_info { -- cgit v1.2.3-59-g8ed1b From 954b2b5ac76d6bde80974c0779d36f054e036aa5 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:31 -0800 Subject: scsi: aacraid: Added support to abort cmd and reset lun Added task management command support to abort any timed out commands in case of a eh_abort call and to reset lun's in case of eh_reset call. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 48 ++++++ drivers/scsi/aacraid/linit.c | 353 +++++++++++++++++++++++++++++++---------- drivers/scsi/aacraid/src.c | 33 +++- 3 files changed, 346 insertions(+), 88 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 2f584b402c6b..055ab19ab9fc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -206,6 +206,53 @@ struct aac_hba_cmd_req { */ }; +/* Task Management Functions (TMF) */ +#define HBA_TMF_ABORT_TASK 0x01 +#define HBA_TMF_LUN_RESET 0x08 + +struct aac_hba_tm_req { + u8 iu_type; /* HBA information unit type */ + u8 reply_qid; /* Host reply queue to post response to */ + u8 tmf; /* Task management function */ + u8 reserved1; + + __le32 it_nexus; /* Device handle for the command */ + + u8 lun[8]; /* SCSI LUN */ + + /* Used to hold sender context. */ + __le32 request_id; /* Sender context */ + __le32 reserved2; + + /* Request identifier of managed task */ + __le32 managed_request_id; /* Sender context being managed */ + __le32 reserved3; + + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; +}; + +struct aac_hba_reset_req { + u8 iu_type; /* HBA information unit type */ + /* 0 - reset specified device, 1 - reset all devices */ + u8 reset_type; + u8 reply_qid; /* Host reply queue to post response to */ + u8 reserved1; + + __le32 it_nexus; /* Device handle for the command */ + __le32 request_id; /* Sender context */ + /* Lower 32-bits of reserved error data target location on the host */ + __le32 error_ptr_lo; + /* Upper 32-bits of reserved error data target location on the host */ + __le32 error_ptr_hi; + /* Length of reserved error data area on the host in bytes */ + __le32 error_length; +}; + struct aac_hba_resp { u8 iu_type; /* HBA information unit type */ u8 reserved1[3]; @@ -223,6 +270,7 @@ struct aac_hba_resp { struct aac_native_hba { union { struct aac_hba_cmd_req cmd; + struct aac_hba_tm_req tmr; u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE]; } cmd; union { diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 68a5d2d1e655..c09b71c7dd8e 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -574,46 +574,136 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; - int count; + int count, found; + u32 bus, cid; int ret = FAILED; - printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n", - AAC_DRIVERNAME, - host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); - switch (cmd->cmnd[0]) { - case SERVICE_ACTION_IN_16: - if (!(aac->raw_io_interface) || - !(aac->raw_io_64) || - ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) - break; - case INQUIRY: - case READ_CAPACITY: - /* Mark associated FIB to not complete, eh handler does this */ + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + struct fib *fib; + struct aac_hba_tm_req *tmf; + int status; + u64 address; + __le32 managed_request_id; + + pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n", + AAC_DRIVERNAME, + host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun); + + found = 0; for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { - struct fib * fib = &aac->fibs[count]; - if (fib->hw_fib_va->header.XferState && - (fib->flags & FIB_CONTEXT_FLAG) && - (fib->callback_data == cmd)) { - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; - cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; + fib = &aac->fibs[count]; + if (*(u8 *)fib->hw_fib_va != 0 && + (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && + (fib->callback_data == cmd)) { + found = 1; + managed_request_id = ((struct aac_hba_cmd_req *) + fib->hw_fib_va)->request_id; + break; + } + } + if (!found) + return ret; + + /* start a HBA_TMF_ABORT_TASK TMF request */ + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf = HBA_TMF_ABORT_TASK; + tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus; + tmf->lun[1] = cmd->device->lun; + + address = (u64)fib->hw_error_pa; + tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); + tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + + fib->hbacmd_size = sizeof(*tmf); + cmd->SCp.sent_command = 0; + + status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, + (fib_callback) aac_hba_callback, + (void *) cmd); + + /* Wait up to 2 minutes for completion */ + for (count = 0; count < 120; ++count) { + if (cmd->SCp.sent_command) { ret = SUCCESS; + break; } + msleep(1000); } - break; - case TEST_UNIT_READY: - /* Mark associated FIB to not complete, eh handler does this */ - for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { - struct scsi_cmnd * command; - struct fib * fib = &aac->fibs[count]; - if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) && - (fib->flags & FIB_CONTEXT_FLAG) && - ((command = fib->callback_data)) && - (command->device == cmd->device)) { - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; - command->SCp.phase = AAC_OWNER_ERROR_HANDLER; - if (command == cmd) + + if (ret != SUCCESS) + pr_err("%s: Host adapter abort request timed out\n", + AAC_DRIVERNAME); + } else { + pr_err( + "%s: Host adapter abort request.\n" + "%s: Outstanding commands on (%d,%d,%d,%d):\n", + AAC_DRIVERNAME, AAC_DRIVERNAME, + host->host_no, sdev_channel(dev), sdev_id(dev), + (int)dev->lun); + switch (cmd->cmnd[0]) { + case SERVICE_ACTION_IN_16: + if (!(aac->raw_io_interface) || + !(aac->raw_io_64) || + ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; + case INQUIRY: + case READ_CAPACITY: + /* + * Mark associated FIB to not complete, + * eh handler does this + */ + for (count = 0; + count < (host->can_queue + AAC_NUM_MGT_FIB); + ++count) { + struct fib *fib = &aac->fibs[count]; + + if (fib->hw_fib_va->header.XferState && + (fib->flags & FIB_CONTEXT_FLAG) && + (fib->callback_data == cmd)) { + fib->flags |= + FIB_CONTEXT_FLAG_TIMED_OUT; + cmd->SCp.phase = + AAC_OWNER_ERROR_HANDLER; ret = SUCCESS; + } } + break; + case TEST_UNIT_READY: + /* + * Mark associated FIB to not complete, + * eh handler does this + */ + for (count = 0; + count < (host->can_queue + AAC_NUM_MGT_FIB); + ++count) { + struct scsi_cmnd *command; + struct fib *fib = &aac->fibs[count]; + + command = fib->callback_data; + + if ((fib->hw_fib_va->header.XferState & + cpu_to_le32 + (Async | NoResponseExpected)) && + (fib->flags & FIB_CONTEXT_FLAG) && + ((command)) && + (command->device == cmd->device)) { + fib->flags |= + FIB_CONTEXT_FLAG_TIMED_OUT; + command->SCp.phase = + AAC_OWNER_ERROR_HANDLER; + if (command == cmd) + ret = SUCCESS; + } + } + break; } } return ret; @@ -628,70 +718,165 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; - struct scsi_cmnd * command; - int count; struct aac_dev * aac = (struct aac_dev *)host->hostdata; - unsigned long flags; - - /* Mark the associated FIB to not complete, eh handler does this */ - for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { - struct fib * fib = &aac->fibs[count]; - if (fib->hw_fib_va->header.XferState && - (fib->flags & FIB_CONTEXT_FLAG) && - (fib->callback_data == cmd)) { - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; - cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; + int count; + u32 bus, cid; + int ret = FAILED; + + bus = aac_logical_to_phys(scmd_channel(cmd)); + cid = scmd_id(cmd); + if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && + aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { + struct fib *fib; + int status; + u64 address; + u8 command; + + pr_err("%s: Host adapter reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + fib = aac_fib_alloc(aac); + if (!fib) + return ret; + + + if (aac->hba_map[bus][cid].reset_state == 0) { + struct aac_hba_tm_req *tmf; + + /* start a HBA_TMF_LUN_RESET TMF request */ + tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; + memset(tmf, 0, sizeof(*tmf)); + tmf->tmf = HBA_TMF_LUN_RESET; + tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus; + tmf->lun[1] = cmd->device->lun; + + address = (u64)fib->hw_error_pa; + tmf->error_ptr_hi = cpu_to_le32 + ((u32)(address >> 32)); + tmf->error_ptr_lo = cpu_to_le32 + ((u32)(address & 0xffffffff)); + tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*tmf); + + command = HBA_IU_TYPE_SCSI_TM_REQ; + aac->hba_map[bus][cid].reset_state++; + } else if (aac->hba_map[bus][cid].reset_state >= 1) { + struct aac_hba_reset_req *rst; + + /* already tried, start a hard reset now */ + rst = (struct aac_hba_reset_req *)fib->hw_fib_va; + memset(rst, 0, sizeof(*rst)); + /* reset_type is already zero... */ + rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus; + + address = (u64)fib->hw_error_pa; + rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); + rst->error_ptr_lo = cpu_to_le32 + ((u32)(address & 0xffffffff)); + rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); + fib->hbacmd_size = sizeof(*rst); + + command = HBA_IU_TYPE_SATA_REQ; + aac->hba_map[bus][cid].reset_state = 0; } - } - printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", - AAC_DRIVERNAME); + cmd->SCp.sent_command = 0; - if ((count = aac_check_health(aac))) - return count; - /* - * Wait for all commands to complete to this specific - * target (block maximum 60 seconds). - */ - for (count = 60; count; --count) { - int active = aac->in_reset; + status = aac_hba_send(command, fib, + (fib_callback) aac_hba_callback, + (void *) cmd); - if (active == 0) - __shost_for_each_device(dev, host) { - spin_lock_irqsave(&dev->list_lock, flags); - list_for_each_entry(command, &dev->cmd_list, list) { - if ((command != cmd) && - (command->SCp.phase == AAC_OWNER_FIRMWARE)) { - active++; - break; - } - } - spin_unlock_irqrestore(&dev->list_lock, flags); - if (active) + /* Wait up to 2 minutes for completion */ + for (count = 0; count < 120; ++count) { + if (cmd->SCp.sent_command) { + ret = SUCCESS; break; + } + msleep(1000); + } + + if (ret != SUCCESS) + pr_err("%s: Host adapter reset request timed out\n", + AAC_DRIVERNAME); + } else { + struct scsi_cmnd *command; + unsigned long flags; + + /* Mark the assoc. FIB to not complete, eh handler does this */ + for (count = 0; + count < (host->can_queue + AAC_NUM_MGT_FIB); + ++count) { + struct fib *fib = &aac->fibs[count]; + + if (fib->hw_fib_va->header.XferState && + (fib->flags & FIB_CONTEXT_FLAG) && + (fib->callback_data == cmd)) { + fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; + cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; + } + } + + pr_err("%s: Host adapter reset request. SCSI hang ?\n", + AAC_DRIVERNAME); + + count = aac_check_health(aac); + if (count) + return count; + /* + * Wait for all commands to complete to this specific + * target (block maximum 60 seconds). + */ + for (count = 60; count; --count) { + int active = aac->in_reset; + + if (active == 0) + __shost_for_each_device(dev, host) { + spin_lock_irqsave(&dev->list_lock, flags); + list_for_each_entry(command, &dev->cmd_list, + list) { + if ((command != cmd) && + (command->SCp.phase == + AAC_OWNER_FIRMWARE)) { + active++; + break; + } + } + spin_unlock_irqrestore(&dev->list_lock, flags); + if (active) + break; + } + /* + * We can exit If all the commands are complete + */ + if (active == 0) + return SUCCESS; + ssleep(1); } + pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME); + /* - * We can exit If all the commands are complete + * This adapter needs a blind reset, only do so for + * Adapters that support a register, instead of a commanded, + * reset. */ - if (active == 0) - return SUCCESS; - ssleep(1); + if (((aac->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_MU_RESET) || + (aac->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET)) && + aac_check_reset && + ((aac_check_reset != 1) || + !(aac->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_IGNORE_RESET))) { + /* Bypass wait for command quiesce */ + aac_reset_adapter(aac, 2); + } + ret = SUCCESS; } - printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME); /* - * This adapter needs a blind reset, only do so for Adapters that - * support a register, instead of a commanded, reset. + * Cause an immediate retry of the command with a ten second delay + * after successful tur */ - if (((aac->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_MU_RESET) || - (aac->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_DOORBELL_RESET)) && - aac_check_reset && - ((aac_check_reset != 1) || - !(aac->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_IGNORE_RESET))) - aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ - return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ + return ret; } /** diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 946a010e46df..1dd62a4aad2c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -497,10 +497,35 @@ static int aac_src_deliver_message(struct fib *fib) vector_no = fib->vector_no; if (native_hba) { - ((struct aac_hba_cmd_req *)fib->hw_fib_va)->reply_qid - = vector_no; - ((struct aac_hba_cmd_req *)fib->hw_fib_va)->request_id - += (vector_no << 16); + if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { + struct aac_hba_tm_req *tm_req; + + tm_req = (struct aac_hba_tm_req *) + fib->hw_fib_va; + if (tm_req->iu_type == + HBA_IU_TYPE_SCSI_TM_REQ) { + ((struct aac_hba_tm_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_tm_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } else { + ((struct aac_hba_reset_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_reset_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } + } else { + ((struct aac_hba_cmd_req *) + fib->hw_fib_va)->reply_qid + = vector_no; + ((struct aac_hba_cmd_req *) + fib->hw_fib_va)->request_id + += (vector_no << 16); + } } else { fib->hw_fib_va->header.Handle += (vector_no << 16); } -- cgit v1.2.3-59-g8ed1b From 999b3ffc0f3b12bb9eeafabaa88176bb7acb84a1 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:32 -0800 Subject: scsi: aacraid: VPD 83 type3 support This patch adds support to retrieve the unique identifier data (VPD page 83 type3) for Logical drives created on SmartIOC 2000 products. In addition added a sysfs device structure to expose the id information. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 180 ++++++++++++++++++++++++++--------------- drivers/scsi/aacraid/aacraid.h | 2 + drivers/scsi/aacraid/linit.c | 31 +++++++ 3 files changed, 150 insertions(+), 63 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 9e9144860fa5..4c04fd84dd81 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -167,46 +167,56 @@ struct inquiry_data { }; /* Added for VPD 0x83 */ -typedef struct { - u8 CodeSet:4; /* VPD_CODE_SET */ - u8 Reserved:4; - u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ - u8 Reserved2:4; - u8 Reserved3; - u8 IdentifierLength; - u8 VendId[8]; - u8 ProductId[16]; - u8 SerialNumber[8]; /* SN in ASCII */ - -} TVPD_ID_Descriptor_Type_1; +struct tvpd_id_descriptor_type_1 { + u8 codeset:4; /* VPD_CODE_SET */ + u8 reserved:4; + u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2:4; + u8 reserved3; + u8 identifierlength; + u8 venid[8]; + u8 productid[16]; + u8 serialnumber[8]; /* SN in ASCII */ -typedef struct { - u8 CodeSet:4; /* VPD_CODE_SET */ - u8 Reserved:4; - u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ - u8 Reserved2:4; - u8 Reserved3; - u8 IdentifierLength; - struct TEU64Id { +}; + +struct tvpd_id_descriptor_type_2 { + u8 codeset:4; /* VPD_CODE_SET */ + u8 reserved:4; + u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2:4; + u8 reserved3; + u8 identifierlength; + struct teu64id { u32 Serial; /* The serial number supposed to be 40 bits, * bit we only support 32, so make the last byte zero. */ - u8 Reserved; - u8 VendId[3]; - } EU64Id; + u8 reserved; + u8 venid[3]; + } eu64id; -} TVPD_ID_Descriptor_Type_2; +}; -typedef struct { +struct tvpd_id_descriptor_type_3 { + u8 codeset : 4; /* VPD_CODE_SET */ + u8 reserved : 4; + u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */ + u8 reserved2 : 4; + u8 reserved3; + u8 identifierlength; + u8 Identifier[16]; +}; + +struct tvpd_page83 { u8 DeviceType:5; u8 DeviceTypeQualifier:3; u8 PageCode; - u8 Reserved; + u8 reserved; u8 PageLength; - TVPD_ID_Descriptor_Type_1 IdDescriptorType1; - TVPD_ID_Descriptor_Type_2 IdDescriptorType2; - -} TVPD_Page83; + struct tvpd_id_descriptor_type_1 type1; + struct tvpd_id_descriptor_type_2 type2; + struct tvpd_id_descriptor_type_3 type3; +}; /* * M O D U L E G L O B A L S @@ -613,6 +623,7 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) struct fsa_dev_info *fsa_dev_ptr; int (*callback)(struct scsi_cmnd *); struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context; + int i; if (!aac_valid_context(scsicmd, fibptr)) @@ -635,6 +646,10 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) fsa_dev_ptr->block_size = le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); } + for (i = 0; i < 16; i++) + fsa_dev_ptr->identifier[i] = + dresp->mnt[0].fileinfo.bdevinfo + .identifier[i]; fsa_dev_ptr->valid = 1; /* sense_key holds the current state of the spin-up */ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) @@ -929,6 +944,28 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) inqstrcpy ("V1.0", str->prl); } +static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data, + struct aac_dev *dev, struct scsi_cmnd *scsicmd) +{ + int container; + + vpdpage83data->type3.codeset = 1; + vpdpage83data->type3.identifiertype = 3; + vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3) + - 4; + + for (container = 0; container < dev->maximum_num_containers; + container++) { + + if (scmd_id(scsicmd) == container) { + memcpy(vpdpage83data->type3.Identifier, + dev->fsa_dev[container].identifier, + 16); + break; + } + } +} + static void get_container_serial_callback(void *context, struct fib * fibptr) { struct aac_get_serial_resp * get_serial_reply; @@ -946,39 +983,47 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) /*Check to see if it's for VPD 0x83 or 0x80 */ if (scsicmd->cmnd[2] == 0x83) { /* vpd page 0x83 - Device Identification Page */ + struct aac_dev *dev; int i; - TVPD_Page83 VPDPage83Data; + struct tvpd_page83 vpdpage83data; - memset(((u8 *)&VPDPage83Data), 0, - sizeof(VPDPage83Data)); + dev = (struct aac_dev *)scsicmd->device->host->hostdata; + + memset(((u8 *)&vpdpage83data), 0, + sizeof(vpdpage83data)); /* DIRECT_ACCESS_DEVIC */ - VPDPage83Data.DeviceType = 0; + vpdpage83data.DeviceType = 0; /* DEVICE_CONNECTED */ - VPDPage83Data.DeviceTypeQualifier = 0; + vpdpage83data.DeviceTypeQualifier = 0; /* VPD_DEVICE_IDENTIFIERS */ - VPDPage83Data.PageCode = 0x83; - VPDPage83Data.Reserved = 0; - VPDPage83Data.PageLength = - sizeof(VPDPage83Data.IdDescriptorType1) + - sizeof(VPDPage83Data.IdDescriptorType2); + vpdpage83data.PageCode = 0x83; + vpdpage83data.reserved = 0; + vpdpage83data.PageLength = + sizeof(vpdpage83data.type1) + + sizeof(vpdpage83data.type2); + + /* VPD 83 Type 3 is not supported for ARC */ + if (dev->sa_firmware) + vpdpage83data.PageLength += + sizeof(vpdpage83data.type3); /* T10 Vendor Identifier Field Format */ - /* VpdCodeSetAscii */ - VPDPage83Data.IdDescriptorType1.CodeSet = 2; + /* VpdcodesetAscii */ + vpdpage83data.type1.codeset = 2; /* VpdIdentifierTypeVendorId */ - VPDPage83Data.IdDescriptorType1.IdentifierType = 1; - VPDPage83Data.IdDescriptorType1.IdentifierLength = - sizeof(VPDPage83Data.IdDescriptorType1) - 4; + vpdpage83data.type1.identifiertype = 1; + vpdpage83data.type1.identifierlength = + sizeof(vpdpage83data.type1) - 4; /* "ADAPTEC " for adaptec */ - memcpy(VPDPage83Data.IdDescriptorType1.VendId, + memcpy(vpdpage83data.type1.venid, "ADAPTEC ", - sizeof(VPDPage83Data.IdDescriptorType1.VendId)); - memcpy(VPDPage83Data.IdDescriptorType1.ProductId, + sizeof(vpdpage83data.type1.venid)); + memcpy(vpdpage83data.type1.productid, "ARRAY ", sizeof( - VPDPage83Data.IdDescriptorType1.ProductId)); + vpdpage83data.type1.productid)); /* Convert to ascii based serial number. * The LSB is the the end. @@ -987,32 +1032,41 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) u8 temp = (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); if (temp > 0x9) { - VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + vpdpage83data.type1.serialnumber[i] = 'A' + (temp - 0xA); } else { - VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + vpdpage83data.type1.serialnumber[i] = '0' + temp; } } /* VpdCodeSetBinary */ - VPDPage83Data.IdDescriptorType2.CodeSet = 1; - /* VpdIdentifierTypeEUI64 */ - VPDPage83Data.IdDescriptorType2.IdentifierType = 2; - VPDPage83Data.IdDescriptorType2.IdentifierLength = - sizeof(VPDPage83Data.IdDescriptorType2) - 4; + vpdpage83data.type2.codeset = 1; + /* VpdidentifiertypeEUI64 */ + vpdpage83data.type2.identifiertype = 2; + vpdpage83data.type2.identifierlength = + sizeof(vpdpage83data.type2) - 4; - VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0; - VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0; - VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0; + vpdpage83data.type2.eu64id.venid[0] = 0xD0; + vpdpage83data.type2.eu64id.venid[1] = 0; + vpdpage83data.type2.eu64id.venid[2] = 0; - VPDPage83Data.IdDescriptorType2.EU64Id.Serial = + vpdpage83data.type2.eu64id.Serial = get_serial_reply->uid; - VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0; + vpdpage83data.type2.eu64id.reserved = 0; + + /* + * VpdIdentifierTypeFCPHName + * VPD 0x83 Type 3 not supported for ARC + */ + if (dev->sa_firmware) { + build_vpd83_type3(&vpdpage83data, + dev, scsicmd); + } /* Move the inquiry data to the response buffer. */ - scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data, - sizeof(VPDPage83Data)); + scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data, + sizeof(vpdpage83data)); } else { /* It must be for VPD 0x80 */ char sp[13]; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 055ab19ab9fc..10984a4abee6 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2106,6 +2106,8 @@ struct aac_fsinfo { struct aac_blockdevinfo { __le32 block_size; + __le32 logical_phys_map; + u8 identifier[16]; }; union aac_contentinfo { diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index c09b71c7dd8e..58bed5df0860 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -556,8 +556,39 @@ static struct device_attribute aac_raid_level_attr = { .show = aac_show_raid_level }; +static ssize_t aac_show_unique_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); + unsigned char sn[16]; + + memset(sn, 0, sizeof(sn)); + + if (sdev_channel(sdev) == CONTAINER_CHANNEL) + memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn)); + + return snprintf(buf, 16 * 2 + 2, + "%02X%02X%02X%02X%02X%02X%02X%02X %02X%02X%02X%02X%02X%02X%02X%02X\n", + sn[0], sn[1], sn[2], sn[3], + sn[4], sn[5], sn[6], sn[7], + sn[8], sn[9], sn[10], sn[11], + sn[12], sn[13], sn[14], sn[15]); +} + +static struct device_attribute aac_unique_id_attr = { + .attr = { + .name = "unique_id", + .mode = 0444, + }, + .show = aac_show_unique_id +}; + + + static struct device_attribute *aac_dev_attrs[] = { &aac_raid_level_attr, + &aac_unique_id_attr, NULL, }; -- cgit v1.2.3-59-g8ed1b From 3136432956501f071891c3d4b6194feb2df924a5 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:33 -0800 Subject: scsi: aacraid: Added new IWBR reset Added a new IWBR soft reset type, reworked the IOP reset interface for a bit. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 2 - drivers/scsi/aacraid/aacraid.h | 15 +++-- drivers/scsi/aacraid/commsup.c | 17 ++++-- drivers/scsi/aacraid/linit.c | 12 +++- drivers/scsi/aacraid/rx.c | 10 ++-- drivers/scsi/aacraid/sa.c | 2 +- drivers/scsi/aacraid/src.c | 129 ++++++++++++++++++++++++++++++----------- 7 files changed, 133 insertions(+), 54 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 4c04fd84dd81..1a09618b859d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -4007,8 +4007,6 @@ static long aac_build_sghba(struct scsi_cmnd *scsicmd, u32 cur_size; struct aac_hba_sgl *sge; - - nseg = scsi_dma_map(scsicmd); if (nseg <= 0) { byte_count = nseg; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 10984a4abee6..6f465c0bb4d1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -878,7 +878,7 @@ struct adapter_ops void (*adapter_enable_int)(struct aac_dev *dev); int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); int (*adapter_check_health)(struct aac_dev *dev); - int (*adapter_restart)(struct aac_dev *dev, int bled); + int (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type); void (*adapter_start)(struct aac_dev *dev); /* Transport operations */ int (*adapter_ioremap)(struct aac_dev * dev, u32 size); @@ -1657,8 +1657,8 @@ struct aac_dev #define aac_adapter_check_health(dev) \ (dev)->a_ops.adapter_check_health(dev) -#define aac_adapter_restart(dev,bled) \ - (dev)->a_ops.adapter_restart(dev,bled) +#define aac_adapter_restart(dev, bled, reset_type) \ + ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) #define aac_adapter_start(dev) \ ((dev)->a_ops.adapter_start(dev)) @@ -2333,6 +2333,13 @@ struct revision #define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER) #define FSACTL_GET_CONTAINERS 2131 #define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) +/* flags defined for IOP & HW SOFT RESET */ +#define HW_IOP_RESET 0x01 +#define HW_SOFT_RESET 0x02 +#define IOP_HWSOFT_RESET (HW_IOP_RESET | HW_SOFT_RESET) +/* HW Soft Reset register offset */ +#define IBW_SWR_OFFSET 0x4000 +#define SOFT_RESET_TIME 60 struct aac_common @@ -2569,7 +2576,7 @@ unsigned int aac_command_normal(struct aac_queue * q); unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, int isAif, int isFastResponse, struct hw_fib *aif_fib); -int aac_reset_adapter(struct aac_dev * dev, int forced); +int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 09da354e9956..2393bd9aff0e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1455,7 +1455,7 @@ retry_next: } } -static int _aac_reset_adapter(struct aac_dev *aac, int forced) +static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) { int index, quirks; int retval; @@ -1464,6 +1464,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) struct scsi_cmnd *command; struct scsi_cmnd *command_list; int jafo = 0; + int bled; /* * Assumptions: @@ -1488,7 +1489,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) * If a positive health, means in a known DEAD PANIC * state and the adapter could be reset to `try again'. */ - retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac)); + bled = forced ? 0 : aac_adapter_check_health(aac); + retval = aac_adapter_restart(aac, bled, reset_type); if (retval) goto out; @@ -1598,11 +1600,12 @@ out: return retval; } -int aac_reset_adapter(struct aac_dev * aac, int forced) +int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) { unsigned long flagv = 0; int retval; struct Scsi_Host * host; + int bled; if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) return -EBUSY; @@ -1651,7 +1654,9 @@ int aac_reset_adapter(struct aac_dev * aac, int forced) if (forced < 2) aac_send_shutdown(aac); spin_lock_irqsave(host->host_lock, flagv); - retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1))); + bled = forced ? forced : + (aac_check_reset != 0 && aac_check_reset != 1); + retval = _aac_reset_adapter(aac, bled, reset_type); spin_unlock_irqrestore(host->host_lock, flagv); if ((forced < 2) && (retval == -ENODEV)) { @@ -1697,6 +1702,7 @@ int aac_check_health(struct aac_dev * aac) unsigned long time_now, flagv = 0; struct list_head * entry; struct Scsi_Host * host; + int bled; /* Extending the scope of fib_lock slightly to protect aac->in_reset */ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) @@ -1814,7 +1820,8 @@ int aac_check_health(struct aac_dev * aac) host = aac->scsi_host_ptr; if (aac->thread->pid != current->pid) spin_lock_irqsave(host->host_lock, flagv); - BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1); + bled = aac_check_reset != 1 ? 1 : 0; + _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET); if (aac->thread->pid != current->pid) spin_unlock_irqrestore(host->host_lock, flagv); return BlinkLED; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 58bed5df0860..46d9596f882d 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -569,7 +569,7 @@ static ssize_t aac_show_unique_id(struct device *dev, memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn)); return snprintf(buf, 16 * 2 + 2, - "%02X%02X%02X%02X%02X%02X%02X%02X %02X%02X%02X%02X%02X%02X%02X%02X\n", + "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], @@ -899,7 +899,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) !(aac->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_IGNORE_RESET))) { /* Bypass wait for command quiesce */ - aac_reset_adapter(aac, 2); + aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); } ret = SUCCESS; } @@ -1167,10 +1167,16 @@ static ssize_t aac_store_reset_adapter(struct device *device, const char *buf, size_t count) { int retval = -EACCES; + int bled = 0; + struct aac_dev *aac; + if (!capable(CAP_SYS_ADMIN)) return retval; - retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!'); + + aac = (struct aac_dev *)class_to_shost(device)->hostdata; + bled = buf[0] == '!' ? 1:0; + retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET); if (retval >= 0) retval = count; return retval; diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 67213b95b8b6..8287e09f5ecf 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -470,7 +470,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size) return 0; } -static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) +static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { u32 var = 0; @@ -559,7 +559,7 @@ int _aac_rx_init(struct aac_dev *dev) dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; dev->OIMR = status = rx_readb (dev, MUnit.OIMR); if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && - !aac_rx_restart_adapter(dev, 0)) + !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) /* Make sure the Hardware FIFO is empty */ while ((++restart < 512) && (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); @@ -568,7 +568,8 @@ int _aac_rx_init(struct aac_dev *dev) */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & KERNEL_PANIC) { - if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) + if (aac_rx_restart_adapter(dev, + aac_rx_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } @@ -606,7 +607,8 @@ int _aac_rx_init(struct aac_dev *dev) ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { - if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) + if (likely(!aac_rx_restart_adapter(dev, + aac_rx_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index b8538e017e10..6124f1b13bf3 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -257,7 +257,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev) NULL, NULL, NULL, NULL, NULL); } -static int aac_sa_restart_adapter(struct aac_dev *dev, int bled) +static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { return -EINVAL; } diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 1dd62a4aad2c..874096e54a21 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -656,44 +656,100 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) return 0; } -static int aac_src_restart_adapter(struct aac_dev *dev, int bled) +static void aac_set_intx_mode(struct aac_dev *dev) +{ + if (dev->msi_enabled) { + aac_src_access_devreg(dev, AAC_ENABLE_INTX); + dev->msi_enabled = 0; + msleep(5000); /* Delay 5 seconds */ + } +} + +static void aac_send_iop_reset(struct aac_dev *dev, int bled) { u32 var, reset_mask; - if (bled >= 0) { - if (bled) - printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, + 0, 0, 0, 0, 0, 0, &var, + &reset_mask, NULL, NULL, NULL); + + if ((bled || var != 0x00000001) && !dev->doorbell_mask) + bled = -EINVAL; + else if (dev->doorbell_mask) { + reset_mask = dev->doorbell_mask; + bled = 0; + var = 0x00000001; + } + + aac_set_intx_mode(dev); + + if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET)) { + src_writel(dev, MUnit.IDR, reset_mask); + } else { + src_writel(dev, MUnit.IDR, 0x100); + } + msleep(30000); +} + +static void aac_send_hardware_soft_reset(struct aac_dev *dev) +{ + u_int32_t val; + + val = readl(((char *)(dev->base) + IBW_SWR_OFFSET)); + val |= 0x01; + writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET)); + msleep_interruptible(20000); +} + +static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) +{ + unsigned long status, start; + + if (bled < 0) + goto invalid_out; + + if (bled) + pr_err("%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); - dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; - bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, - 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); - if ((bled || (var != 0x00000001)) && - !dev->doorbell_mask) - return -EINVAL; - else if (dev->doorbell_mask) { - reset_mask = dev->doorbell_mask; - bled = 0; - var = 0x00000001; - } - if ((dev->pdev->device == PMC_DEVICE_S7 || - dev->pdev->device == PMC_DEVICE_S8 || - dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) { - aac_src_access_devreg(dev, AAC_ENABLE_INTX); - dev->msi_enabled = 0; - msleep(5000); /* Delay 5 seconds */ - } + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; - if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_DOORBELL_RESET)) { - src_writel(dev, MUnit.IDR, reset_mask); - ssleep(45); - } else { - src_writel(dev, MUnit.IDR, 0x100); - ssleep(45); + switch (reset_type) { + case IOP_HWSOFT_RESET: + aac_send_iop_reset(dev, bled); + /* + * Check to see if KERNEL_UP_AND_RUNNING + * Wait for the adapter to be up and running. + * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset + */ + status = src_readl(dev, MUnit.OMR); + if (dev->sa_firmware + && !(status & KERNEL_UP_AND_RUNNING)) { + start = jiffies; + do { + status = src_readl(dev, MUnit.OMR); + if (time_after(jiffies, + start+HZ*SOFT_RESET_TIME)) { + aac_send_hardware_soft_reset(dev); + start = jiffies; + } + } while (!(status & KERNEL_UP_AND_RUNNING)); } + break; + case HW_SOFT_RESET: + if (dev->sa_firmware) { + aac_send_hardware_soft_reset(dev); + aac_set_intx_mode(dev); + } + break; + default: + aac_send_iop_reset(dev, bled); + break; } +invalid_out: + if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) return -ENODEV; @@ -748,14 +804,15 @@ int aac_src_init(struct aac_dev *dev) dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; if ((aac_reset_devices || reset_devices) && - !aac_src_restart_adapter(dev, 0)) + !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) ++restart; /* * Check to see if the board panic'd while booting. */ status = src_readl(dev, MUnit.OMR); if (status & KERNEL_PANIC) { - if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) + if (aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } @@ -796,7 +853,7 @@ int aac_src_init(struct aac_dev *dev) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_src_restart_adapter(dev, - aac_src_check_health(dev)))) + aac_src_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } @@ -893,7 +950,7 @@ int aac_srcv_init(struct aac_dev *dev) dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; if ((aac_reset_devices || reset_devices) && - !aac_src_restart_adapter(dev, 0)) + !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) ++restart; /* * Check to see if flash update is running. @@ -922,7 +979,8 @@ int aac_srcv_init(struct aac_dev *dev) */ status = src_readl(dev, MUnit.OMR); if (status & KERNEL_PANIC) { - if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) + if (aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } @@ -961,7 +1019,8 @@ int aac_srcv_init(struct aac_dev *dev) ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { - if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev)))) + if (likely(!aac_src_restart_adapter(dev, + aac_src_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } -- cgit v1.2.3-59-g8ed1b From 09867a0e34d20864c3b4b1e49f688470c3f8bdc2 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:34 -0800 Subject: scsi: aacraid: Added ioctl to trigger IOP/IWBR reset Added a new ioctl interface to trigger an IOP or IWBR reset from ioctl. Primary used by management utility to trigger resets. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 1 + drivers/scsi/aacraid/commctrl.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 6f465c0bb4d1..4814fba41193 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2333,6 +2333,7 @@ struct revision #define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER) #define FSACTL_GET_CONTAINERS 2131 #define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) +#define FSACTL_RESET_IOP CTL_CODE(2140, METHOD_BUFFERED) /* flags defined for IOP & HW SOFT RESET */ #define HW_IOP_RESET 0x01 #define HW_SOFT_RESET 0x02 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index f98618c94bf0..d8fc6b8980ba 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -1011,7 +1011,22 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) } return 0; } +struct aac_reset_iop { + u8 reset_type; +}; + +static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) +{ + struct aac_reset_iop reset; + int retval; + if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) + return -EFAULT; + + retval = aac_reset_adapter(dev, 0, reset.reset_type); + return retval; + +} int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) { @@ -1055,6 +1070,10 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; + case FSACTL_RESET_IOP: + status = aac_send_reset_adapter(dev, arg); + break; + default: status = -ENOTTY; break; -- cgit v1.2.3-59-g8ed1b From c799d519bf088c0c5deb481b0190990417ace1bc Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:35 -0800 Subject: scsi: aacraid: Retrieve HBA host information ioctl Added a new ioctl interface to retrieve the host device information. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 52 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/aacraid/commctrl.c | 26 +++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4814fba41193..40733967464b 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2334,6 +2334,7 @@ struct revision #define FSACTL_GET_CONTAINERS 2131 #define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) #define FSACTL_RESET_IOP CTL_CODE(2140, METHOD_BUFFERED) +#define FSACTL_GET_HBA_INFO CTL_CODE(2150, METHOD_BUFFERED) /* flags defined for IOP & HW SOFT RESET */ #define HW_IOP_RESET 0x01 #define HW_SOFT_RESET 0x02 @@ -2372,6 +2373,57 @@ struct aac_common extern struct aac_common aac_config; +/* + * This is for management ioctl purpose only. + */ +struct aac_hba_info { + + u8 driver_name[50]; + u8 adapter_number; + u8 system_io_bus_number; + u8 device_number; + u32 function_number; + u32 vendor_id; + u32 device_id; + u32 sub_vendor_id; + u32 sub_system_id; + u32 mapped_base_address_size; + u32 base_physical_address_high_part; + u32 base_physical_address_low_part; + + u32 max_command_size; + u32 max_fib_size; + u32 max_scatter_gather_from_os; + u32 max_scatter_gather_to_fw; + u32 max_outstanding_fibs; + + u32 queue_start_threshold; + u32 queue_dump_threshold; + u32 max_io_size_queued; + u32 outstanding_io; + + u32 firmware_build_number; + u32 bios_build_number; + u32 driver_build_number; + u32 serial_number_high_part; + u32 serial_number_low_part; + u32 supported_options; + u32 feature_bits; + u32 currentnumber_ports; + + u8 new_comm_interface:1; + u8 new_commands_supported:1; + u8 disable_passthrough:1; + u8 expose_non_dasd:1; + u8 queue_allowed:1; + u8 bled_check_enabled:1; + u8 reserved1:1; + u8 reserted2:1; + + u32 reserved3[10]; + +}; + /* * The following macro is used when sending and receiving FIBs. It is * only used for debugging. diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d8fc6b8980ba..033a054d6c6d 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -1011,6 +1011,29 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) } return 0; } + +static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) +{ + struct aac_hba_info hbainfo; + + hbainfo.adapter_number = (u8) dev->id; + hbainfo.system_io_bus_number = dev->pdev->bus->number; + hbainfo.device_number = (dev->pdev->devfn >> 3); + hbainfo.function_number = (dev->pdev->devfn & 0x0007); + + hbainfo.vendor_id = dev->pdev->vendor; + hbainfo.device_id = dev->pdev->device; + hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; + hbainfo.sub_system_id = dev->pdev->subsystem_device; + + if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { + dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n")); + return -EFAULT; + } + + return 0; +} + struct aac_reset_iop { u8 reset_type; }; @@ -1070,6 +1093,9 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; + case FSACTL_GET_HBA_INFO: + status = aac_get_hba_info(dev, arg); + break; case FSACTL_RESET_IOP: status = aac_send_reset_adapter(dev, arg); break; -- cgit v1.2.3-59-g8ed1b From f4babba0af362481e470fb493ea269c2bcd9e9fb Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:36 -0800 Subject: scsi: aacraid: Update copyrights Added new copyright messages Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 8 +++++++- drivers/scsi/aacraid/aacraid.h | 32 ++++++++++++++++++++++++++++++++ drivers/scsi/aacraid/commctrl.c | 3 ++- drivers/scsi/aacraid/comminit.c | 3 ++- drivers/scsi/aacraid/commsup.c | 3 ++- drivers/scsi/aacraid/dpcsup.c | 3 ++- drivers/scsi/aacraid/linit.c | 3 ++- drivers/scsi/aacraid/nark.c | 3 ++- drivers/scsi/aacraid/rkt.c | 3 ++- drivers/scsi/aacraid/rx.c | 3 ++- drivers/scsi/aacraid/sa.c | 3 ++- drivers/scsi/aacraid/src.c | 3 ++- 12 files changed, 59 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 1a09618b859d..3b5ddf430723 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,6 +23,11 @@ * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * + * Module Name: + * aachba.c + * + * Abstract: Contains Interfaces to manage IOs. + * */ #include diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 40733967464b..e8a3f0718699 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1,3 +1,35 @@ +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Module Name: + * aacraid.h + * + * Abstract: Contains all routines for control of the aacraid driver + * + */ + #ifndef _AACRAID_H_ #define _AACRAID_H_ #ifndef dprintk diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 033a054d6c6d..614842a9eb07 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 5aad018bd4a1..d0c77246d13d 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 2393bd9aff0e..969727b67cdd 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index c426ea2864ae..417ba349e10e 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 46d9596f882d..445e7d7973cc 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index 6c53b1d8b2ba..c59074e782d6 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c @@ -5,7 +5,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 6dff8ada4935..a1bc5bbf7a34 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 8287e09f5ecf..0e69a80c3275 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 6124f1b13bf3..553922fed524 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 874096e54a21..46976a3b6952 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -6,7 +6,8 @@ * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. - * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by -- cgit v1.2.3-59-g8ed1b From 0ba8fdae311fa2a862304aef2483a1b590104be0 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:37 -0800 Subject: scsi: aacraid: Change Driver Version Prefix Change the aacraid driver prefix from 1.2-1 to 1.2.1 Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/linit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 445e7d7973cc..137d22d3a005 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -58,7 +58,7 @@ #include "aacraid.h" -#define AAC_DRIVER_VERSION "1.2-1" +#define AAC_DRIVER_VERSION "1.2.1" #ifndef AAC_DRIVER_BRANCH #define AAC_DRIVER_BRANCH "" #endif -- cgit v1.2.3-59-g8ed1b From 96f6a6134766de0d42a98c7758736dde16e0add5 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Thu, 2 Feb 2017 15:53:38 -0800 Subject: scsi: aacraid: update version Update the driver version to 50740 Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index e8a3f0718699..f2344971e3cb 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -97,8 +97,8 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 41066 -# define AAC_DRIVER_BRANCH "-ms" +# define AAC_DRIVER_BUILD 50740 +# define AAC_DRIVER_BRANCH "-custom" #endif #define MAXIMUM_NUM_CONTAINERS 32 -- cgit v1.2.3-59-g8ed1b From 103eb3b5d0f2cc2771b3c49181dd22f73735aaf2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jan 2017 13:18:56 +0100 Subject: scsi: mvumi: remove fake transport template These days we can specify an eh_timed_out handler in the host_template, so don't have a transport_template definition just for it. [mkp: fixed typo] Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/mvumi.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 39285070f3b5..247df5e79b71 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2225,15 +2225,12 @@ static struct scsi_host_template mvumi_template = { .name = "Marvell Storage Controller", .slave_configure = mvumi_slave_configure, .queuecommand = mvumi_queue_command, + .eh_timed_out = mvumi_timed_out, .eh_host_reset_handler = mvumi_host_reset, .bios_param = mvumi_bios_param, .this_id = -1, }; -static struct scsi_transport_template mvumi_transport_template = { - .eh_timed_out = mvumi_timed_out, -}; - static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) { void *base = NULL; @@ -2451,7 +2448,6 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; host->max_id = mhba->max_target_id; host->max_cmd_len = MAX_COMMAND_SIZE; - host->transportt = &mvumi_transport_template; ret = scsi_add_host(host, &mhba->pdev->dev); if (ret) { -- cgit v1.2.3-59-g8ed1b From 28917d40e63a65c3923d45ed190c748e45b90ac7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jan 2017 13:18:57 +0100 Subject: scsi: libsas: remove sas_scsi_timed_out EH_NOT_HANDLED is the default case if no eh_timed_out method is provided, so there is no need to supply it. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/libsas/sas_init.c | 1 - drivers/scsi/libsas/sas_internal.h | 2 -- drivers/scsi/libsas/sas_scsi_host.c | 7 ------- 3 files changed, 10 deletions(-) diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 362da44f2948..15ef8e2e685c 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -560,7 +560,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft) i = to_sas_internal(stt); i->dft = dft; stt->create_work_queue = 1; - stt->eh_timed_out = sas_scsi_timed_out; stt->eh_strategy_handler = sas_scsi_recover_host; return stt; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 9cf0bc260b0e..b306b7843d99 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -64,8 +64,6 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha); int sas_register_ports(struct sas_ha_struct *sas_ha); void sas_unregister_ports(struct sas_ha_struct *sas_ha); -enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); - int sas_init_events(struct sas_ha_struct *sas_ha); void sas_disable_revalidation(struct sas_ha_struct *ha); void sas_enable_revalidation(struct sas_ha_struct *ha); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 519dac4e341e..9bd55bce83af 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -803,13 +803,6 @@ out: shost->host_failed, tries); } -enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) -{ - scmd_dbg(cmd, "command %p timed out\n", cmd); - - return BLK_EH_NOT_HANDLED; -} - int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { struct domain_device *dev = sdev_to_domain_dev(sdev); -- cgit v1.2.3-59-g8ed1b From b6a05c823fc573a65efc4466f174abf05f922e0f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jan 2017 13:18:58 +0100 Subject: scsi: remove eh_timed_out methods in the transport template Instead define the timeout behavior purely based on the host_template eh_timed_out method and wire up the existing transport implementations in the host templates. This also clears up the confusion that the transport template method overrides the host template one, so some drivers have to re-override the transport template one. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Tyrel Datwyler Signed-off-by: Martin K. Petersen --- drivers/ata/libata-eh.c | 1 + drivers/ata/libata-transport.c | 1 - drivers/ata/libata.h | 1 - drivers/infiniband/ulp/iser/iscsi_iser.c | 1 + drivers/infiniband/ulp/srp/ib_srp.c | 1 + drivers/message/fusion/mptfc.c | 1 + drivers/message/fusion/mptsas.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 1 + drivers/scsi/be2iscsi/be_main.c | 1 + drivers/scsi/bfa/bfad_im.c | 2 ++ drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 1 + drivers/scsi/bnx2i/bnx2i_iscsi.c | 1 + drivers/scsi/csiostor/csio_scsi.c | 2 ++ drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 1 + drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 1 + drivers/scsi/fcoe/fcoe.c | 1 + drivers/scsi/fnic/fnic_main.c | 1 + drivers/scsi/ibmvscsi/ibmvfc.c | 1 + drivers/scsi/ibmvscsi/ibmvscsi.c | 1 + drivers/scsi/iscsi_tcp.c | 1 + drivers/scsi/libiscsi.c | 5 ++--- drivers/scsi/lpfc/lpfc_scsi.c | 2 ++ drivers/scsi/qedi/qedi_iscsi.c | 1 + drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/scsi_error.c | 4 +--- drivers/scsi/scsi_transport_fc.c | 9 ++++----- drivers/scsi/scsi_transport_srp.c | 5 ++--- drivers/scsi/storvsc_drv.c | 5 ----- include/linux/libata.h | 2 ++ include/scsi/libiscsi.h | 1 + include/scsi/scsi_transport.h | 11 ----------- include/scsi/scsi_transport_fc.h | 1 + include/scsi/scsi_transport_srp.h | 1 + 33 files changed, 38 insertions(+), 33 deletions(-) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 0e1ec37070d1..50ee10db160f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -549,6 +549,7 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) DPRINTK("EXIT, ret=%d\n", ret); return ret; } +EXPORT_SYMBOL(ata_scsi_timed_out); static void ata_eh_unload(struct ata_port *ap) { diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 7ef16c085058..46698232e6bf 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -716,7 +716,6 @@ struct scsi_transport_template *ata_attach_transport(void) return NULL; i->t.eh_strategy_handler = ata_scsi_error; - i->t.eh_timed_out = ata_scsi_timed_out; i->t.user_scan = ata_scsi_user_scan; i->t.host_attrs.ac.attrs = &i->port_attrs[0]; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 8f3a5596dd67..06d479d1f302 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -159,7 +159,6 @@ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); extern void ata_eh_acquire(struct ata_port *ap); extern void ata_eh_release(struct ata_port *ap); -extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern void ata_scsi_error(struct Scsi_Host *host); extern void ata_eh_fastdrain_timerfn(unsigned long arg); extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9104e6b8cac9..86fed1956d7d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -997,6 +997,7 @@ static struct scsi_host_template iscsi_iser_sht = { .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8ddc07123193..7866cb0a8f00 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2864,6 +2864,7 @@ static struct scsi_host_template srp_template = { .info = srp_target_info, .queuecommand = srp_queuecommand, .change_queue_depth = srp_change_queue_depth, + .eh_timed_out = srp_timed_out, .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index add6a3a6ef0d..98eafae78576 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -119,6 +119,7 @@ static struct scsi_host_template mptfc_driver_template = { .target_destroy = mptfc_target_destroy, .slave_destroy = mptscsih_slave_destroy, .change_queue_depth = mptscsih_change_queue_depth, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = mptfc_abort, .eh_device_reset_handler = mptfc_dev_reset, .eh_bus_reset_handler = mptfc_bus_reset, diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7ee1667acde4..4ce333643943 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1983,6 +1983,7 @@ static struct scsi_host_template mptsas_driver_template = { .target_destroy = mptsas_target_destroy, .slave_destroy = mptscsih_slave_destroy, .change_queue_depth = mptscsih_change_queue_depth, + .eh_timed_out = mptsas_eh_timed_out, .eh_abort_handler = mptscsih_abort, .eh_device_reset_handler = mptscsih_dev_reset, .eh_host_reset_handler = mptscsih_host_reset, @@ -5398,7 +5399,6 @@ mptsas_init(void) sas_attach_transport(&mptsas_transport_functions); if (!mptsas_transport_template) return -ENODEV; - mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out; mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER, "mptscsih_io_done"); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 07ffdbb5107f..0678cf714c0e 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -330,6 +330,7 @@ static struct scsi_host_template zfcp_scsi_host_template = { .module = THIS_MODULE, .name = "zfcp", .queuecommand = zfcp_scsi_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index c9b9daa91091..32b2713cec93 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -389,6 +389,7 @@ static struct scsi_host_template beiscsi_sht = { .change_queue_depth = scsi_change_queue_depth, .slave_configure = beiscsi_slave_configure, .target_alloc = iscsi_target_alloc, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = beiscsi_eh_abort, .eh_device_reset_handler = beiscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_session_reset, diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 02d806012fa1..7eb0eef18fdd 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -813,6 +813,7 @@ struct scsi_host_template bfad_im_scsi_host_template = { .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_bus_reset_handler = bfad_im_reset_bus_handler, @@ -835,6 +836,7 @@ struct scsi_host_template bfad_im_vport_template = { .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_bus_reset_handler = bfad_im_reset_bus_handler, diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index c639d5a02656..b1e39f985ec9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2947,6 +2947,7 @@ static struct scsi_host_template bnx2fc_shost_template = { .module = THIS_MODULE, .name = "QLogic Offload FCoE Initiator", .queuecommand = bnx2fc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bnx2fc_eh_abort, /* abts */ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 133901fd3e35..f32a66f89d25 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -2259,6 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = { .name = "QLogic Offload iSCSI Initiator", .proc_name = "bnx2i", .queuecommand = iscsi_queuecommand, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 89a52b941ea8..a1ff75f1384f 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -2270,6 +2270,7 @@ struct scsi_host_template csio_fcoe_shost_template = { .name = CSIO_DRV_DESC, .proc_name = KBUILD_MODNAME, .queuecommand = csio_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, .slave_alloc = csio_slave_alloc, @@ -2289,6 +2290,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = { .name = CSIO_DRV_DESC, .proc_name = KBUILD_MODNAME, .queuecommand = csio_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, .slave_alloc = csio_slave_alloc, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 33e83464e091..1880eb6c68f7 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -90,6 +90,7 @@ static struct scsi_host_template cxgb3i_host_template = { .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 9a2fdc305cf2..3fb3f5708ff7 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -103,6 +103,7 @@ static struct scsi_host_template cxgb4i_host_template = { .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 59150cad0353..86af57f7c11a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = { .name = "FCoE Driver", .proc_name = FCOE_NAME, .queuecommand = fc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = fc_eh_abort, .eh_device_reset_handler = fc_eh_device_reset, .eh_host_reset_handler = fc_eh_host_reset, diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 58ce9020d69c..ba58b7953263 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -106,6 +106,7 @@ static struct scsi_host_template fnic_host_template = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = fnic_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = fnic_abort_cmd, .eh_device_reset_handler = fnic_device_reset, .eh_host_reset_handler = fnic_host_reset, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 78b72c28a55d..2c92dabb55f6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3090,6 +3090,7 @@ static struct scsi_host_template driver_template = { .name = "IBM POWER Virtual FC Adapter", .proc_name = IBMVFC_NAME, .queuecommand = ibmvfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = ibmvfc_eh_abort_handler, .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 50cd01165e35..1deb0a9f14a6 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2072,6 +2072,7 @@ static struct scsi_host_template driver_template = { .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, .proc_name = "ibmvscsi", .queuecommand = ibmvscsi_queuecommand, + .eh_timed_out = srp_timed_out, .eh_abort_handler = ibmvscsi_eh_abort_handler, .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index ace4f1f41b8e..4228aba1f654 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -967,6 +967,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .sg_tablesize = 4096, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f9b6fba689ff..834d1212b6d5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1930,7 +1930,7 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) return 0; } -static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) +enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) { enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; struct iscsi_task *task = NULL, *running_task; @@ -2063,6 +2063,7 @@ done: "timer reset" : "nh"); return rc; } +EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); static void iscsi_check_transport_timeouts(unsigned long data) { @@ -2585,8 +2586,6 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) if (!shost->cmd_per_lun) shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; - if (!shost->transportt->eh_timed_out) - shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; return scsi_add_host(shost, pdev); } EXPORT_SYMBOL_GPL(iscsi_host_add); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 19d349fc889f..1180a22beb43 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -5929,6 +5929,7 @@ struct scsi_host_template lpfc_template = { .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, @@ -5955,6 +5956,7 @@ struct scsi_host_template lpfc_vport_template = { .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index d6a205433b66..8b25f0a1eb8c 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -48,6 +48,7 @@ struct scsi_host_template qedi_host_template = { .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", .proc_name = QEDI_MODULE_NAME, .queuecommand = iscsi_queuecommand, + .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8521cfe302e9..c5a9d6c295a0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -262,6 +262,7 @@ struct scsi_host_template qla2xxx_driver_template = { .name = QLA2XXX_DRIVER_NAME, .queuecommand = qla2xxx_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = qla2xxx_eh_abort, .eh_device_reset_handler = qla2xxx_eh_device_reset, .eh_target_reset_handler = qla2xxx_eh_target_reset, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 996e134d79fa..9d7bfbb02389 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -279,9 +279,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) if (host->eh_deadline != -1 && !host->last_reset) host->last_reset = jiffies; - if (host->transportt->eh_timed_out) - rtn = host->transportt->eh_timed_out(scmd); - else if (host->hostt->eh_timed_out) + if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); if (rtn == BLK_EH_NOT_HANDLED) { diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 03577bde6ac5..9a6ea6fccb06 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2055,7 +2055,7 @@ static int fc_vport_match(struct attribute_container *cont, /** - * fc_timed_out - FC Transport I/O timeout intercept handler + * fc_eh_timed_out - FC Transport I/O timeout intercept handler * @scmd: The SCSI command which timed out * * This routine protects against error handlers getting invoked while a @@ -2076,8 +2076,8 @@ static int fc_vport_match(struct attribute_container *cont, * Notes: * This routine assumes no locks are held on entry. */ -static enum blk_eh_timer_return -fc_timed_out(struct scsi_cmnd *scmd) +enum blk_eh_timer_return +fc_eh_timed_out(struct scsi_cmnd *scmd) { struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); @@ -2086,6 +2086,7 @@ fc_timed_out(struct scsi_cmnd *scmd) return BLK_EH_NOT_HANDLED; } +EXPORT_SYMBOL(fc_eh_timed_out); /* * Called by fc_user_scan to locate an rport on the shost that @@ -2211,8 +2212,6 @@ fc_attach_transport(struct fc_function_template *ft) /* Transport uses the shost workq for scsi scanning */ i->t.create_work_queue = 1; - i->t.eh_timed_out = fc_timed_out; - i->t.user_scan = fc_user_scan; /* target-mode drivers' functions */ diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index b87a78673f65..75b57a1855b0 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -591,7 +591,7 @@ EXPORT_SYMBOL(srp_reconnect_rport); * Note: This function is called from soft-IRQ context and with the request * queue lock held. */ -static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -603,6 +603,7 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } +EXPORT_SYMBOL(srp_timed_out); static void srp_rport_release(struct device *dev) { @@ -820,8 +821,6 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; - i->t.eh_timed_out = srp_timed_out; - i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ceadf6b4ebc1..585e54f6512c 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1899,11 +1899,6 @@ static int __init storvsc_drv_init(void) fc_transport_template = fc_attach_transport(&fc_transport_functions); if (!fc_transport_template) return -ENODEV; - - /* - * Install Hyper-V specific timeout handler. - */ - fc_transport_template->eh_timed_out = storvsc_eh_timed_out; #endif ret = vmbus_driver_register(&storvsc_drv); diff --git a/include/linux/libata.h b/include/linux/libata.h index c170be548b7f..46e18c0619c6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1130,6 +1130,7 @@ extern int ata_sas_port_start(struct ata_port *ap); extern void ata_sas_port_stop(struct ata_port *ap); extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); +extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern int sata_scr_valid(struct ata_link *link); extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); @@ -1355,6 +1356,7 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .proc_name = drv_name, \ .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ + .eh_timed_out = ata_scsi_timed_out, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ .sdev_attrs = ata_common_sdev_attrs diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 4d1c46aac331..b0e275de6dec 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -383,6 +383,7 @@ extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc); +extern enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc); /* * iSCSI host helpers. diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 81292392adbc..6c3bb9f3dc0f 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -57,17 +57,6 @@ struct scsi_transport_template { */ void (* eh_strategy_handler)(struct Scsi_Host *); - /* - * This is an optional routine that allows the transport to become - * involved when a scsi io timer fires. The return value tells the - * timer routine how to finish the io timeout handling: - * EH_HANDLED: I fixed the error, please complete the command - * EH_RESET_TIMER: I need more time, reset the timer and - * begin counting again - * EH_NOT_HANDLED Begin normal error recovery - */ - enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); - /* * Used as callback for the completion of i_t_nexus request * for target drivers. diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 924c8e614b45..b21b8aa58c4d 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -808,6 +808,7 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel, struct fc_vport_identifiers *); int fc_vport_terminate(struct fc_vport *vport); int fc_block_scsi_eh(struct scsi_cmnd *cmnd); +enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd); static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job) { diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index d40d3ef25707..20d96797edc5 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h @@ -124,6 +124,7 @@ extern int srp_reconnect_rport(struct srp_rport *rport); extern void srp_start_tl_fail_timers(struct srp_rport *rport); extern void srp_remove_host(struct Scsi_Host *); extern void srp_stop_rport_timers(struct srp_rport *rport); +enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd); /** * srp_chkready() - evaluate the transport layer state before I/O -- cgit v1.2.3-59-g8ed1b From 556e26a70b64a21507e231f9b54773adf74a7384 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jan 2017 13:18:59 +0100 Subject: scsi: remove tsk_mgmt_response and it_nexus_response transport methods They are never called and just dispatch to methods of the same names in the FC and SRP transport classes that are never implemented. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_transport_fc.c | 17 ----------------- drivers/scsi/scsi_transport_srp.c | 16 ---------------- include/scsi/scsi_transport.h | 12 ------------ include/scsi/scsi_transport_srp.h | 7 ------- 4 files changed, 52 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 9a6ea6fccb06..1be339420b02 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2160,19 +2160,6 @@ fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) return 0; } -static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id, - int result) -{ - struct fc_internal *i = to_fc_internal(shost->transportt); - return i->f->tsk_mgmt_response(shost, nexus, tm_id, result); -} - -static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result) -{ - struct fc_internal *i = to_fc_internal(shost->transportt); - return i->f->it_nexus_response(shost, nexus, result); -} - struct scsi_transport_template * fc_attach_transport(struct fc_function_template *ft) { @@ -2214,10 +2201,6 @@ fc_attach_transport(struct fc_function_template *ft) i->t.user_scan = fc_user_scan; - /* target-mode drivers' functions */ - i->t.tsk_mgmt_response = fc_tsk_mgmt_response; - i->t.it_nexus_response = fc_it_nexus_response; - /* * Setup SCSI Target Attributes. */ diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 75b57a1855b0..3c5d89852e9f 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -794,19 +794,6 @@ void srp_stop_rport_timers(struct srp_rport *rport) } EXPORT_SYMBOL_GPL(srp_stop_rport_timers); -static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id, - int result) -{ - struct srp_internal *i = to_srp_internal(shost->transportt); - return i->f->tsk_mgmt_response(shost, nexus, tm_id, result); -} - -static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result) -{ - struct srp_internal *i = to_srp_internal(shost->transportt); - return i->f->it_nexus_response(shost, nexus, result); -} - /** * srp_attach_transport - instantiate SRP transport template * @ft: SRP transport class function template @@ -821,9 +808,6 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; - i->t.tsk_mgmt_response = srp_tsk_mgmt_response; - i->t.it_nexus_response = srp_it_nexus_response; - i->t.host_size = sizeof(struct srp_host_attrs); i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &srp_host_class.class; diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 6c3bb9f3dc0f..cbb6bb08bf11 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -56,18 +56,6 @@ struct scsi_transport_template { * Allows a transport to override the default error handler. */ void (* eh_strategy_handler)(struct Scsi_Host *); - - /* - * Used as callback for the completion of i_t_nexus request - * for target drivers. - */ - int (* it_nexus_response)(struct Scsi_Host *, u64, int); - - /* - * Used as callback for the completion of task management - * request for target drivers. - */ - int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); }; #define transport_class_to_shost(tc) \ diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index 20d96797edc5..dd096330734e 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h @@ -88,10 +88,6 @@ struct srp_rport { * @terminate_rport_io: Callback function for terminating all outstanding I/O * requests for an rport. * @rport_delete: Callback function that deletes an rport. - * - * Fields that are only relevant for SRP target drivers: - * @tsk_mgmt_response: Callback function for sending a task management response. - * @it_nexus_response: Callback function for processing an IT nexus response. */ struct srp_function_template { /* for initiator drivers */ @@ -103,9 +99,6 @@ struct srp_function_template { int (*reconnect)(struct srp_rport *rport); void (*terminate_rport_io)(struct srp_rport *rport); void (*rport_delete)(struct srp_rport *rport); - /* for target drivers */ - int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); - int (* it_nexus_response)(struct Scsi_Host *, u64, int); }; extern struct scsi_transport_template * -- cgit v1.2.3-59-g8ed1b From a76037ff3479ad333a2505061915f7a21e7f3fb6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Feb 2017 15:11:07 +0100 Subject: scsi: pm8001: switch to pci_irq_alloc_vectors Signed-off-by: Christoph Hellwig Acked-by: Jack Wang Signed-off-by: Martin K. Petersen --- drivers/scsi/pm8001/pm8001_init.c | 35 ++++++++++++++--------------------- drivers/scsi/pm8001/pm8001_sas.h | 2 -- 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 9fc675f57e33..417368ccb686 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -888,7 +888,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) u32 i = 0, j = 0; u32 number_of_intr; int flag = 0; - u32 max_entry; int rc; static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; @@ -900,18 +899,14 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) flag &= ~IRQF_SHARED; } - max_entry = sizeof(pm8001_ha->msix_entries) / - sizeof(pm8001_ha->msix_entries[0]); - for (i = 0; i < max_entry ; i++) - pm8001_ha->msix_entries[i].entry = i; - rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries, - number_of_intr); - pm8001_ha->number_of_intr = number_of_intr; - if (rc) + rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr, + number_of_intr, PCI_IRQ_MSIX); + if (rc < 0) return rc; + pm8001_ha->number_of_intr = number_of_intr; PM8001_INIT_DBG(pm8001_ha, pm8001_printk( - "pci_enable_msix_exact request ret:%d no of intr %d\n", + "pci_alloc_irq_vectors request ret:%d no of intr %d\n", rc, pm8001_ha->number_of_intr)); for (i = 0; i < number_of_intr; i++) { @@ -920,15 +915,15 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) pm8001_ha->irq_vector[i].irq_id = i; pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; - rc = request_irq(pm8001_ha->msix_entries[i].vector, + rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i), pm8001_interrupt_handler_msix, flag, intr_drvname[i], &(pm8001_ha->irq_vector[i])); if (rc) { for (j = 0; j < i; j++) { - free_irq(pm8001_ha->msix_entries[j].vector, + free_irq(pci_irq_vector(pm8001_ha->pdev, i), &(pm8001_ha->irq_vector[i])); } - pci_disable_msix(pm8001_ha->pdev); + pci_free_irq_vectors(pm8001_ha->pdev); break; } } @@ -1102,11 +1097,10 @@ static void pm8001_pci_remove(struct pci_dev *pdev) #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) - synchronize_irq(pm8001_ha->msix_entries[i].vector); + synchronize_irq(pci_irq_vector(pdev, i)); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, - &(pm8001_ha->irq_vector[i])); - pci_disable_msix(pdev); + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); #else free_irq(pm8001_ha->irq, sha); #endif @@ -1152,11 +1146,10 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) - synchronize_irq(pm8001_ha->msix_entries[i].vector); + synchronize_irq(pci_irq_vector(pdev, i)); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, - &(pm8001_ha->irq_vector[i])); - pci_disable_msix(pdev); + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); #else free_irq(pm8001_ha->irq, sha); #endif diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 6628cc38316c..e81a8fa7ef1a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -521,8 +521,6 @@ struct pm8001_hba_info { struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; - /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET -- cgit v1.2.3-59-g8ed1b From 76291469772fb932523c2e0003848934cd29e7cb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 7 Feb 2017 11:51:29 +0000 Subject: scsi: aacraid: rcode is unsigned and should be signed int aac_fib_send can return -ve error returns and hence rcode should be signed. Currently the rcode >= 0 check is always true and -ve errors are not being checked. Thanks to Dan Carpenter for spotting my original broken fix to this issue. Signed-off-by: Colin Ian King Reviewed-by: Raghava Aditya Renukunta Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aachba.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 3b5ddf430723..907f1e80665b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1798,7 +1798,7 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) struct sgmap64 *sg64; dma_addr_t addr; u32 vbus, vid; - u32 rcode = 0; + int rcode = 0; /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) -- cgit v1.2.3-59-g8ed1b From 80a94bb357813901e61f2dc80deae2015c50fdcd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 7 Feb 2017 13:59:30 +0100 Subject: scsi: aacraid: avoid open-coded upper_32_bits Shifting a dma_addr_t right by 32 bits causes a compile-time warning when that type is only 32 bit wide: drivers/scsi/aacraid/src.c: In function 'aac_src_start_adapter': drivers/scsi/aacraid/src.c:414:29: error: right shift count >= width of type [-Werror=shift-count-overflow] This changes the driver to use the predefined macros consistently, including one correct but open-coded upper_32_bits() instance. Fixes: d1ef4da8487f ("scsi: aacraid: added support for init_struct_8") Fixes: 423400e64d37 ("scsi: aacraid: Include HBA direct interface") Signed-off-by: Arnd Bergmann Reviewed-by: Johannes Thumshirn Reviewed-by: Raghava Aditya Renukunta Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/src.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 46976a3b6952..8e4e2ddbafd7 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -410,8 +410,8 @@ static void aac_src_start_adapter(struct aac_dev *dev) if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds()); src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, - (u32)(ulong)dev->init_pa, - (u32)((ulong)dev->init_pa>>32), + lower_32_bits(dev->init_pa), + upper_32_bits(dev->init_pa), sizeof(struct _r8) + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), 0, 0, 0, NULL, NULL, NULL, NULL, NULL); @@ -563,7 +563,7 @@ static int aac_src_deliver_message(struct fib *fib) fib->hw_fib_va->header.SenderFibAddress = cpu_to_le32((u32)address); fib->hw_fib_va->header.u.TimeStamp = 0; - WARN_ON(((u32)(((address) >> 16) >> 16)) != 0L); + WARN_ON(upper_32_bits(address) != 0L); } else { /* Calculate the amount to the fibsize bits */ fibsize = (sizeof(struct aac_fib_xporthdr) + -- cgit v1.2.3-59-g8ed1b From e0b299e36004f58f844b776df58f6882877bb18a Mon Sep 17 00:00:00 2001 From: Gilad Broner Date: Fri, 3 Feb 2017 16:56:40 -0800 Subject: scsi: ufs: skip request abort task when previous aborts failed On certain error conditions request abort task itself might fail when aborting a request. In such case, subsequent request aborts should skip issuing the abort task as it is expected to fail as well, and device reset handler will be called next. Signed-off-by: Gilad Broner Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 20 ++++++++++++++++++++ drivers/scsi/ufs/ufshcd.h | 3 +++ 2 files changed, 23 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a70bf066f277..61fea173df16 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1877,6 +1877,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; + lrbp->req_abort_skip = false; ufshcd_comp_scsi_upiu(hba, lrbp); @@ -4979,6 +4980,17 @@ out: return err; } +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) +{ + struct ufshcd_lrb *lrbp; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + lrbp->req_abort_skip = true; + } +} + /** * ufshcd_abort - abort a specific command * @cmd: SCSI command pointer @@ -5047,6 +5059,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ufshcd_print_pwr_info(hba); ufshcd_print_trs(hba, 1 << tag, true); + + /* Skip task abort in case previous aborts failed and report failure */ + if (lrbp->req_abort_skip) { + err = -EIO; + goto out; + } + for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_QUERY_TASK, &resp); @@ -5120,6 +5139,7 @@ out: err = SUCCESS; } else { dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); err = FAILED; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 7ffcde21975b..292fc14522c7 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -165,6 +165,7 @@ struct ufs_pm_lvl_states { * @lun: LUN of the command * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) * @issue_time_stamp: time stamp for debug purposes + * @req_abort_skip: skip request abort task flag */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; @@ -187,6 +188,8 @@ struct ufshcd_lrb { u8 lun; /* UPIU LUN id field is only 8-bit wide */ bool intr_cmd; ktime_t issue_time_stamp; + + bool req_abort_skip; }; /** -- cgit v1.2.3-59-g8ed1b From 7fabb77b3aa01651f9ebc7c139a253bcabb06880 Mon Sep 17 00:00:00 2001 From: Gilad Broner Date: Fri, 3 Feb 2017 16:56:50 -0800 Subject: scsi: ufs: reduce printout for aborted requests Details printed for each request that is aborted can overload the target as there can be several requests that are aborted at once. This change will print full request details only for the first aborted request since the last link reset, and minimal details for other subsequent requests. Signed-off-by: Gilad Broner Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 46 ++++++++++++++++++++++++++++++++-------------- drivers/scsi/ufs/ufshcd.h | 3 +++ 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 61fea173df16..64d619a24e6e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -396,6 +396,7 @@ static void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) { struct ufshcd_lrb *lrbp; + int prdt_length; int tag; for_each_set_bit(tag, &bitmap, hba->nutrs) { @@ -417,18 +418,17 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) (u64)lrbp->ucd_rsp_dma_addr); ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, sizeof(struct utp_upiu_rsp)); - if (pr_prdt) { - int prdt_length = le16_to_cpu( - lrbp->utr_descriptor_ptr->prd_table_length); - dev_err(hba->dev, - "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", - tag, prdt_length, - (u64)lrbp->ucd_prdt_dma_addr); + prdt_length = le16_to_cpu( + lrbp->utr_descriptor_ptr->prd_table_length); + dev_err(hba->dev, + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", + tag, prdt_length, + (u64)lrbp->ucd_prdt_dma_addr); + + if (pr_prdt) ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, - sizeof(struct ufshcd_sg_entry) * - prdt_length); - } + sizeof(struct ufshcd_sg_entry) * prdt_length); } } @@ -1848,6 +1848,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) } spin_unlock_irqrestore(hba->host->host_lock, flags); + hba->req_abort_count = 0; + /* acquire the tag to make sure device cmds don't use it */ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { /* @@ -4970,7 +4972,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) spin_lock_irqsave(host->host_lock, flags); ufshcd_transfer_req_compl(hba); spin_unlock_irqrestore(host->host_lock, flags); + out: + hba->req_abort_count = 0; if (!err) { err = SUCCESS; } else { @@ -5054,11 +5058,23 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* Print Transfer Request of aborted task */ dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); - scsi_print_command(hba->lrb[tag].cmd); - ufshcd_print_host_regs(hba); - ufshcd_print_pwr_info(hba); - ufshcd_print_trs(hba, 1 << tag, true); + /* + * Print detailed info about aborted request. + * As more than one request might get aborted at the same time, + * print full information only for the first aborted request in order + * to reduce repeated printouts. For other aborted requests only print + * basic details. + */ + scsi_print_command(hba->lrb[tag].cmd); + if (!hba->req_abort_count) { + ufshcd_print_host_regs(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_trs(hba, 1 << tag, true); + } else { + ufshcd_print_trs(hba, 1 << tag, false); + } + hba->req_abort_count++; /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { @@ -5707,6 +5723,8 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size); memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size); memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size); + + hba->req_abort_count = 0; } /** diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 292fc14522c7..b7ce12996a19 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -594,6 +594,9 @@ struct ufs_hba { bool wlun_dev_clr_ua; + /* Number of requests aborts */ + int req_abort_count; + /* Number of lanes available (1 or 2) for Rx/Tx */ u32 lanes_per_direction; struct ufs_pa_layer_attr pwr_info; -- cgit v1.2.3-59-g8ed1b From a3cd5ec55f6c72834f812f9150deb38ddc019782 Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Fri, 3 Feb 2017 16:57:02 -0800 Subject: scsi: ufs: add load based scaling of UFS gear UFS driver's load based clock scaling feature scales down the ufs related clocks in order to allow low power modes of chipsets. UniPro 1.6 supports maximum gear up to HS-G3 (High Speed Gear3) and some of the chipsets low power modes may not be allowed in HS-G3 hence this change adds support to scale gear between HS-G3 and HS-G1 based on same existing load based clock scaling logic. Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 594 ++++++++++++++++++++++++++++++++-------------- drivers/scsi/ufs/ufshcd.h | 9 + 2 files changed, 428 insertions(+), 175 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 64d619a24e6e..e75e50d02e95 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -867,6 +867,396 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) return false; } +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + ktime_t start = ktime_get(); + bool clk_state_changed = false; + + if (!head || list_empty(head)) + goto out; + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); + if (ret) + return ret; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + + clk_state_changed = true; + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled up", clki->name, + clki->curr_freq, + clki->max_freq); + + clki->curr_freq = clki->max_freq; + + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + + clk_state_changed = true; + ret = clk_set_rate(clki->clk, clki->min_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->min_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled down", clki->name, + clki->curr_freq, + clki->min_freq); + clki->curr_freq = clki->min_freq; + } + } + dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + +out: + if (clk_state_changed) + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + return ret; +} + +/** + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not + * @hba: per adapter instance + * @scale_up: True if scaling up and false if scaling down + * + * Returns true if scaling is required, false otherwise. + */ +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, + bool scale_up) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + return false; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + return true; + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + return true; + } + } + } + + return false; +} + +static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, + u64 wait_timeout_us) +{ + unsigned long flags; + int ret = 0; + u32 tm_doorbell; + u32 tr_doorbell; + bool timeout = false, do_last_check = false; + ktime_t start; + + ufshcd_hold(hba, false); + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * Wait for all the outstanding tasks/transfer requests. + * Verify by checking the doorbell registers are clear. + */ + start = ktime_get(); + do { + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + ret = -EBUSY; + goto out; + } + + tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (!tm_doorbell && !tr_doorbell) { + timeout = false; + break; + } else if (do_last_check) { + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + schedule(); + if (ktime_to_us(ktime_sub(ktime_get(), start)) > + wait_timeout_us) { + timeout = true; + /* + * We might have scheduled out for long time so make + * sure to check if doorbells are cleared by this time + * or not. + */ + do_last_check = true; + } + spin_lock_irqsave(hba->host->host_lock, flags); + } while (tm_doorbell || tr_doorbell); + + if (timeout) { + dev_err(hba->dev, + "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", + __func__, tm_doorbell, tr_doorbell); + ret = -EBUSY; + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); + return ret; +} + +/** + * ufshcd_scale_gear - scale up/down UFS gear + * @hba: per adapter instance + * @scale_up: True for scaling up gear and false for scaling down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) +{ + #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1 + int ret = 0; + struct ufs_pa_layer_attr new_pwr_info; + + if (scale_up) { + memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, + sizeof(struct ufs_pa_layer_attr)); + } else { + memcpy(&new_pwr_info, &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN + || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) { + /* save the current power mode */ + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + /* scale down gear */ + new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN; + new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN; + } + } + + /* check if the power mode needs to be changed or not? */ + ret = ufshcd_change_power_mode(hba, &new_pwr_info); + + if (ret) + dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", + __func__, ret, + hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, + new_pwr_info.gear_tx, new_pwr_info.gear_rx); + + return ret; +} + +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) +{ + #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */ + int ret = 0; + /* + * make sure that there are no outstanding requests when + * clock scaling is in progress + */ + scsi_block_requests(hba->host); + down_write(&hba->clk_scaling_lock); + if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { + ret = -EBUSY; + up_write(&hba->clk_scaling_lock); + scsi_unblock_requests(hba->host); + } + + return ret; +} + +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) +{ + up_write(&hba->clk_scaling_lock); + scsi_unblock_requests(hba->host); +} + +/** + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scalin down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + + ret = ufshcd_clock_scaling_prepare(hba); + if (ret) + return ret; + + /* scale down the gear before scaling down clocks */ + if (!scale_up) { + ret = ufshcd_scale_gear(hba, false); + if (ret) + goto out; + } + + ret = ufshcd_scale_clks(hba, scale_up); + if (ret) { + if (!scale_up) + ufshcd_scale_gear(hba, true); + goto out; + } + + /* scale up the gear after scaling up clocks */ + if (scale_up) { + ret = ufshcd_scale_gear(hba, true); + if (ret) { + ufshcd_scale_clks(hba, false); + goto out; + } + } + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + +out: + ufshcd_clock_scaling_unprepare(hba); + return ret; +} + +static int ufshcd_devfreq_target(struct device *dev, + unsigned long *freq, u32 flags) +{ + int ret = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); + ktime_t start; + bool scale_up, release_clk_hold = false; + unsigned long irq_flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + if ((*freq > 0) && (*freq < UINT_MAX)) { + dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq); + return -EINVAL; + } + + scale_up = (*freq == UINT_MAX) ? true : false; + if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) + return 0; /* no state change required */ + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + + if (ufshcd_is_clkgating_allowed(hba)) { + if (cancel_delayed_work(&hba->clk_gating.gate_work) || + (hba->clk_gating.state == CLKS_ON)) { + /* hold the vote until the scaling work is completed */ + hba->clk_gating.active_reqs++; + release_clk_hold = true; + if (hba->clk_gating.state != CLKS_ON) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } + } else { + /* + * Clock gating work seems to be running in parallel + * hence skip scaling work to avoid deadlock between + * current scaling work and gating work. + */ + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + } + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + start = ktime_get(); + + ret = ufshcd_devfreq_scale(hba, scale_up); + + if (release_clk_hold) + ufshcd_release(hba); + + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + + return ret; +} + + +static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + memset(stat, 0, sizeof(*stat)); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!scaling->window_start_t) + goto start_window; + + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + + stat->total_time = jiffies_to_usecs((long)jiffies - + (long)scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; +start_window: + scaling->window_start_t = jiffies; + scaling->tot_busy_t = 0; + + if (hba->outstanding_reqs) { + scaling->busy_start_t = ktime_get(); + scaling->is_busy_started = true; + } else { + scaling->busy_start_t = 0; + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +static struct devfreq_dev_profile ufs_devfreq_profile = { + .polling_ms = 100, + .target = ufshcd_devfreq_target, + .get_dev_status = ufshcd_devfreq_get_dev_status, +}; + + static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) { if (!ufshcd_is_clkscaling_supported(hba)) @@ -910,7 +1300,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, ufshcd_resume_clkscaling(hba); } else { ufshcd_suspend_clkscaling(hba); - err = ufshcd_scale_clks(hba, true); + err = ufshcd_devfreq_scale(hba, true); if (err) dev_err(hba->dev, "%s: failed to scale clocks up %d\n", __func__, err); @@ -923,6 +1313,17 @@ out: return count; } +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) +{ + hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); + hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; + hba->clk_scaling.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -1820,6 +2221,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) BUG(); } + if (!down_read_trylock(&hba->clk_scaling_lock)) + return SCSI_MLQUEUE_HOST_BUSY; + spin_lock_irqsave(hba->host->host_lock, flags); switch (hba->ufshcd_state) { case UFSHCD_STATE_OPERATIONAL: @@ -1899,6 +2303,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: + up_read(&hba->clk_scaling_lock); return err; } @@ -2088,6 +2493,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, struct completion wait; unsigned long flags; + down_read(&hba->clk_scaling_lock); + /* * Get free slot, sleep if slots are unavailable. * Even though we use wait_event() which sleeps indefinitely, @@ -2116,6 +2523,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, out_put_tag: ufshcd_put_dev_cmd_tag(hba, tag); wake_up(&hba->dev_cmd.tag_wq); + up_read(&hba->clk_scaling_lock); return err; } @@ -3330,8 +3738,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, sizeof(struct ufs_pa_layer_attr)); } - ufshcd_print_pwr_info(hba); - return ret; } @@ -3353,6 +3759,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); ret = ufshcd_change_power_mode(hba, &final_params); + if (!ret) + ufshcd_print_pwr_info(hba); return ret; } @@ -5187,6 +5595,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ufshcd_hba_stop(hba, false); spin_unlock_irqrestore(hba->host->host_lock, flags); + /* scale up clocks to max frequency before full reinitialization */ + ufshcd_scale_clks(hba, true); + err = ufshcd_hba_enable(hba); if (err) goto out; @@ -5822,6 +6233,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* Resume devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; ufshcd_resume_clkscaling(hba); hba->clk_scaling.is_allowed = true; } @@ -7200,178 +7614,6 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); -static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) -{ - int ret = 0; - struct ufs_clk_info *clki; - struct list_head *head = &hba->clk_list_head; - ktime_t start = ktime_get(); - bool clk_state_changed = false; - - if (!head || list_empty(head)) - goto out; - - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); - if (ret) - return ret; - - list_for_each_entry(clki, head, list) { - if (!IS_ERR_OR_NULL(clki->clk)) { - if (scale_up && clki->max_freq) { - if (clki->curr_freq == clki->max_freq) - continue; - - clk_state_changed = true; - ret = clk_set_rate(clki->clk, clki->max_freq); - if (ret) { - dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", - __func__, clki->name, - clki->max_freq, ret); - break; - } - trace_ufshcd_clk_scaling(dev_name(hba->dev), - "scaled up", clki->name, - clki->curr_freq, - clki->max_freq); - - clki->curr_freq = clki->max_freq; - - } else if (!scale_up && clki->min_freq) { - if (clki->curr_freq == clki->min_freq) - continue; - - clk_state_changed = true; - ret = clk_set_rate(clki->clk, clki->min_freq); - if (ret) { - dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", - __func__, clki->name, - clki->min_freq, ret); - break; - } - trace_ufshcd_clk_scaling(dev_name(hba->dev), - "scaled down", clki->name, - clki->curr_freq, - clki->min_freq); - clki->curr_freq = clki->min_freq; - } - } - dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, - clki->name, clk_get_rate(clki->clk)); - } - - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); - -out: - if (clk_state_changed) - trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), - (scale_up ? "up" : "down"), - ktime_to_us(ktime_sub(ktime_get(), start)), ret); - return ret; -} - -static int ufshcd_devfreq_target(struct device *dev, - unsigned long *freq, u32 flags) -{ - int err = 0; - struct ufs_hba *hba = dev_get_drvdata(dev); - bool release_clk_hold = false; - unsigned long irq_flags; - - if (!ufshcd_is_clkscaling_supported(hba)) - return -EINVAL; - - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (ufshcd_eh_in_progress(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return 0; - } - - if (ufshcd_is_clkgating_allowed(hba) && - (hba->clk_gating.state != CLKS_ON)) { - if (cancel_delayed_work(&hba->clk_gating.gate_work)) { - /* hold the vote until the scaling work is completed */ - hba->clk_gating.active_reqs++; - release_clk_hold = true; - hba->clk_gating.state = CLKS_ON; - } else { - /* - * Clock gating work seems to be running in parallel - * hence skip scaling work to avoid deadlock between - * current scaling work and gating work. - */ - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return 0; - } - } - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - - if (*freq == UINT_MAX) - err = ufshcd_scale_clks(hba, true); - else if (*freq == 0) - err = ufshcd_scale_clks(hba, false); - - spin_lock_irqsave(hba->host->host_lock, irq_flags); - if (release_clk_hold) - __ufshcd_release(hba); - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - - return err; -} - -static int ufshcd_devfreq_get_dev_status(struct device *dev, - struct devfreq_dev_status *stat) -{ - struct ufs_hba *hba = dev_get_drvdata(dev); - struct ufs_clk_scaling *scaling = &hba->clk_scaling; - unsigned long flags; - - if (!ufshcd_is_clkscaling_supported(hba)) - return -EINVAL; - - memset(stat, 0, sizeof(*stat)); - - spin_lock_irqsave(hba->host->host_lock, flags); - if (!scaling->window_start_t) - goto start_window; - - if (scaling->is_busy_started) - scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), - scaling->busy_start_t)); - - stat->total_time = jiffies_to_usecs((long)jiffies - - (long)scaling->window_start_t); - stat->busy_time = scaling->tot_busy_t; -start_window: - scaling->window_start_t = jiffies; - scaling->tot_busy_t = 0; - - if (hba->outstanding_reqs) { - scaling->busy_start_t = ktime_get(); - scaling->is_busy_started = true; - } else { - scaling->busy_start_t = 0; - scaling->is_busy_started = false; - } - spin_unlock_irqrestore(hba->host->host_lock, flags); - return 0; -} - -static struct devfreq_dev_profile ufs_devfreq_profile = { - .polling_ms = 100, - .target = ufshcd_devfreq_target, - .get_dev_status = ufshcd_devfreq_get_dev_status, -}; -static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) -{ - hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; - hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; - sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); - hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; - hba->clk_scaling.enable_attr.attr.mode = 0644; - if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) - dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); -} - /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -7455,6 +7697,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Initialize mutex for device management commands */ mutex_init(&hba->dev_cmd.lock); + init_rwsem(&hba->clk_scaling_lock); + /* Initialize device management tag acquire wait queue */ init_waitqueue_head(&hba->dev_cmd.tag_wq); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index b7ce12996a19..4da2bbd617e8 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -351,6 +352,11 @@ struct ufs_clk_gating { int active_reqs; }; +struct ufs_saved_pwr_info { + struct ufs_pa_layer_attr info; + bool is_valid; +}; + struct ufs_clk_scaling { ktime_t busy_start_t; bool is_busy_started; @@ -358,6 +364,7 @@ struct ufs_clk_scaling { unsigned long window_start_t; struct device_attribute enable_attr; bool is_allowed; + struct ufs_saved_pwr_info saved_pwr_info; }; /** @@ -634,6 +641,8 @@ struct ufs_hba { enum bkops_status urgent_bkops_lvl; bool is_urgent_bkops_lvl_checked; + + struct rw_semaphore clk_scaling_lock; }; /* Returns true if clocks can be gated. Otherwise false */ -- cgit v1.2.3-59-g8ed1b From 6ba65588381d59ee412b7cb83752d582df731d5c Mon Sep 17 00:00:00 2001 From: Gilad Broner Date: Fri, 3 Feb 2017 16:57:28 -0800 Subject: scsi: ufs: add host state prints in failure cases Whenever some UFS failure occurs the driver prints the UFS registers in order to help with analysis of the failure. However this may not be sufficient in some cases, so having the host controller state as it is represented and managed in the driver will contribute to analysis efforts. Added prints of various fields in the hba struct which may be of interest. Signed-off-by: Gilad Broner Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e75e50d02e95..056e9123b2ad 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -453,6 +453,28 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) } } +static void ufshcd_print_host_state(struct ufs_hba *hba) +{ + dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); + dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", + hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs); + dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", + hba->saved_err, hba->saved_uic_err); + dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", + hba->curr_dev_pwr_mode, hba->uic_link_state); + dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", + hba->pm_op_in_progress, hba->is_sys_suspended); + dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", + hba->auto_bkops_enabled, hba->host->host_self_blocked); + dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); + dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", + hba->eh_flags, hba->req_abort_count); + dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n", + hba->capabilities, hba->caps); + dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, + hba->dev_quirks); +} + /** * ufshcd_print_pwr_info - print power params as saved in hba * power info @@ -4426,6 +4448,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) "OCS error from controller = %x for tag %d\n", ocs, lrbp->task_tag); ufshcd_print_host_regs(hba); + ufshcd_print_host_state(hba); break; } /* end of switch */ @@ -5477,6 +5500,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) scsi_print_command(hba->lrb[tag].cmd); if (!hba->req_abort_count) { ufshcd_print_host_regs(hba); + ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); ufshcd_print_trs(hba, 1 << tag, true); } else { @@ -7738,6 +7762,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (err) { dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); + ufshcd_print_host_state(hba); goto out_remove_scsi_host; } -- cgit v1.2.3-59-g8ed1b From 401f1e4490ee9d6e74d7514903dd01dae61488bf Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Fri, 3 Feb 2017 16:57:39 -0800 Subject: scsi: ufs: don't suspend clock scaling during clock gating Currently we are suspending clock scaling during clock gating which doesn't allow us to have clock gating timeout lower than clock scaling polling window. If clock gating timeout is smaller than the clock scaling polling window then we will mostly suspend the clock scaling before clock scaling polling window expires and we might get stuck in same state (scaled down or scaled up) for quite a long time. And for this reason, we have clock gating timeout (150ms) greater than clock scaling polling window (100ms). We would like to have aggressive clock gating timeout even lower than the clock scaling polling window hence this change is decoupling the clock scaling suspend/resume from clock gate/ungate. We will not suspend the clock scaling as part of clock gating instead clock scaling context will schedule scaling suspend work if there are no more pending transfer requests. Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 185 ++++++++++++++++++++++++++++++++++++---------- drivers/scsi/ufs/ufshcd.h | 31 +++++++- 2 files changed, 171 insertions(+), 45 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 056e9123b2ad..4f6ba244fc6f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -248,6 +248,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static void ufshcd_resume_clkscaling(struct ufs_hba *hba); static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_config_pwr_mode(struct ufs_hba *hba, @@ -1135,6 +1136,9 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) { int ret = 0; + /* let's not get into low power until clock scaling is completed */ + ufshcd_hold(hba, false); + ret = ufshcd_clock_scaling_prepare(hba); if (ret) return ret; @@ -1166,16 +1170,51 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) out: ufshcd_clock_scaling_unprepare(hba); + ufshcd_release(hba); return ret; } +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.suspend_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = true; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + __ufshcd_suspend_clkscaling(hba); +} + +static void ufshcd_clk_scaling_resume_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.resume_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (!hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = false; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + devfreq_resume_device(hba->devfreq); +} + static int ufshcd_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { int ret = 0; struct ufs_hba *hba = dev_get_drvdata(dev); ktime_t start; - bool scale_up, release_clk_hold = false; + bool scale_up, sched_clk_scaling_suspend_work = false; unsigned long irq_flags; if (!ufshcd_is_clkscaling_supported(hba)) @@ -1186,50 +1225,35 @@ static int ufshcd_devfreq_target(struct device *dev, return -EINVAL; } - scale_up = (*freq == UINT_MAX) ? true : false; - if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) - return 0; /* no state change required */ - spin_lock_irqsave(hba->host->host_lock, irq_flags); if (ufshcd_eh_in_progress(hba)) { spin_unlock_irqrestore(hba->host->host_lock, irq_flags); return 0; } - if (ufshcd_is_clkgating_allowed(hba)) { - if (cancel_delayed_work(&hba->clk_gating.gate_work) || - (hba->clk_gating.state == CLKS_ON)) { - /* hold the vote until the scaling work is completed */ - hba->clk_gating.active_reqs++; - release_clk_hold = true; - if (hba->clk_gating.state != CLKS_ON) { - hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), - hba->clk_gating.state); - } - } else { - /* - * Clock gating work seems to be running in parallel - * hence skip scaling work to avoid deadlock between - * current scaling work and gating work. - */ - spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - return 0; - } + if (!hba->clk_scaling.active_reqs) + sched_clk_scaling_suspend_work = true; + + scale_up = (*freq == UINT_MAX) ? true : false; + if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + ret = 0; + goto out; /* no state change required */ } spin_unlock_irqrestore(hba->host->host_lock, irq_flags); start = ktime_get(); - ret = ufshcd_devfreq_scale(hba, scale_up); - if (release_clk_hold) - ufshcd_release(hba); - trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), (scale_up ? "up" : "down"), ktime_to_us(ktime_sub(ktime_get(), start)), ret); +out: + if (sched_clk_scaling_suspend_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.suspend_work); + return ret; } @@ -1278,19 +1302,52 @@ static struct devfreq_dev_profile ufs_devfreq_profile = { .get_dev_status = ufshcd_devfreq_get_dev_status, }; +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + + devfreq_suspend_device(hba->devfreq); + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.window_start_t = 0; + spin_unlock_irqrestore(hba->host->host_lock, flags); +} static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) { + unsigned long flags; + bool suspend = false; + if (!ufshcd_is_clkscaling_supported(hba)) return; - devfreq_suspend_device(hba->devfreq); - hba->clk_scaling.window_start_t = 0; + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (suspend) + __ufshcd_suspend_clkscaling(hba); } static void ufshcd_resume_clkscaling(struct ufs_hba *hba) { - devfreq_resume_device(hba->devfreq); + unsigned long flags; + bool resume = false; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_scaling.is_suspended) { + resume = true; + hba->clk_scaling.is_suspended = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (resume) + devfreq_resume_device(hba->devfreq); } static ssize_t ufshcd_clkscale_enable_show(struct device *dev, @@ -1318,6 +1375,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, pm_runtime_get_sync(hba->dev); ufshcd_hold(hba, false); + cancel_work_sync(&hba->clk_scaling.suspend_work); + cancel_work_sync(&hba->clk_scaling.resume_work); + + hba->clk_scaling.is_allowed = value; + if (value) { ufshcd_resume_clkscaling(hba); } else { @@ -1327,7 +1389,6 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, dev_err(hba->dev, "%s: failed to scale clocks up %d\n", __func__, err); } - hba->clk_scaling.is_allowed = value; ufshcd_release(hba); pm_runtime_put_sync(hba->dev); @@ -1379,8 +1440,6 @@ static void ufshcd_ungate_work(struct work_struct *work) hba->clk_gating.is_suspended = false; } unblock_reqs: - if (hba->clk_scaling.is_allowed) - ufshcd_resume_clkscaling(hba); scsi_unblock_requests(hba->host); } @@ -1509,8 +1568,6 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } - ufshcd_suspend_clkscaling(hba); - if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -1668,9 +1725,27 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) /* Must be called with host lock acquired */ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { + bool queue_resume_work = false; + if (!ufshcd_is_clkscaling_supported(hba)) return; + if (!hba->clk_scaling.active_reqs++) + queue_resume_work = true; + + if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress) + return; + + if (queue_resume_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.resume_work); + + if (!hba->clk_scaling.window_start_t) { + hba->clk_scaling.window_start_t = jiffies; + hba->clk_scaling.tot_busy_t = 0; + hba->clk_scaling.is_busy_started = false; + } + if (!hba->clk_scaling.is_busy_started) { hba->clk_scaling.busy_start_t = ktime_get(); hba->clk_scaling.is_busy_started = true; @@ -4511,6 +4586,10 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, complete(hba->dev_cmd.complete); } } + if (ufshcd_is_clkscaling_supported(hba)) + hba->clk_scaling.active_reqs--; + if (ufshcd_is_clkscaling_supported(hba)) + hba->clk_scaling.active_reqs--; } /* clear corresponding bits of completed commands */ @@ -6785,6 +6864,10 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); ufshcd_suspend_clkscaling(hba); + if (ufshcd_is_clkscaling_supported(hba)) { + ufshcd_suspend_clkscaling(hba); + destroy_workqueue(hba->clk_scaling.workq); + } ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; @@ -7060,7 +7143,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_hold(hba, false); hba->clk_gating.is_suspended = true; - ufshcd_suspend_clkscaling(hba); + if (hba->clk_scaling.is_allowed) { + cancel_work_sync(&hba->clk_scaling.suspend_work); + cancel_work_sync(&hba->clk_scaling.resume_work); + ufshcd_suspend_clkscaling(hba); + } if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && req_link_state == UIC_LINK_ACTIVE_STATE) { @@ -7137,7 +7224,8 @@ disable_clks: goto out; set_link_active: - ufshcd_resume_clkscaling(hba); + if (hba->clk_scaling.is_allowed) + ufshcd_resume_clkscaling(hba); ufshcd_vreg_set_hpm(hba); if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) ufshcd_set_link_active(hba); @@ -7147,7 +7235,8 @@ set_dev_active: if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ufshcd_disable_auto_bkops(hba); enable_gating: - ufshcd_resume_clkscaling(hba); + if (hba->clk_scaling.is_allowed) + ufshcd_resume_clkscaling(hba); hba->clk_gating.is_suspended = false; ufshcd_release(hba); out: @@ -7245,7 +7334,8 @@ disable_vreg: ufshcd_vreg_set_lpm(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); - ufshcd_suspend_clkscaling(hba); + if (hba->clk_scaling.is_allowed) + ufshcd_suspend_clkscaling(hba); ufshcd_setup_clocks(hba, false); out: hba->pm_op_in_progress = 0; @@ -7767,6 +7857,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) } if (ufshcd_is_clkscaling_supported(hba)) { + char wq_name[sizeof("ufs_clkscaling_00")]; + hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile, "simple_ondemand", NULL); if (IS_ERR(hba->devfreq)) { @@ -7775,6 +7867,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = PTR_ERR(hba->devfreq); goto out_remove_scsi_host; } + hba->clk_scaling.is_suspended = false; + + INIT_WORK(&hba->clk_scaling.suspend_work, + ufshcd_clk_scaling_suspend_work); + INIT_WORK(&hba->clk_scaling.resume_work, + ufshcd_clk_scaling_resume_work); + + snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d", + host->host_no); + hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + /* Suspend devfreq until the UFS device is detected */ ufshcd_suspend_clkscaling(hba); ufshcd_clkscaling_init_sysfs(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 4da2bbd617e8..7630600217a2 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -357,14 +357,37 @@ struct ufs_saved_pwr_info { bool is_valid; }; +/** + * struct ufs_clk_scaling - UFS clock scaling related data + * @active_reqs: number of requests that are pending. If this is zero when + * devfreq ->target() function is called then schedule "suspend_work" to + * suspend devfreq. + * @tot_busy_t: Total busy time in current polling window + * @window_start_t: Start time (in jiffies) of the current polling window + * @busy_start_t: Start time of current busy period + * @enable_attr: sysfs attribute to enable/disable clock scaling + * @saved_pwr_info: UFS power mode may also be changed during scaling and this + * one keeps track of previous power mode. + * @workq: workqueue to schedule devfreq suspend/resume work + * @suspend_work: worker to suspend devfreq + * @resume_work: worker to resume devfreq + * @is_allowed: tracks if scaling is currently allowed or not + * @is_busy_started: tracks if busy period has started or not + * @is_suspended: tracks if devfreq is suspended or not + */ struct ufs_clk_scaling { - ktime_t busy_start_t; - bool is_busy_started; - unsigned long tot_busy_t; + int active_reqs; + unsigned long tot_busy_t; unsigned long window_start_t; + ktime_t busy_start_t; struct device_attribute enable_attr; - bool is_allowed; struct ufs_saved_pwr_info saved_pwr_info; + struct workqueue_struct *workq; + struct work_struct suspend_work; + struct work_struct resume_work; + bool is_allowed; + bool is_busy_started; + bool is_suspended; }; /** -- cgit v1.2.3-59-g8ed1b From 0701e49da9852e455d63efb7bcd225d3c8af11df Mon Sep 17 00:00:00 2001 From: "subhashj@codeaurora.org" Date: Fri, 3 Feb 2017 16:58:01 -0800 Subject: scsi: ufs: kick start clock scaling only after device detection UFS clock scaling might start kicking in even before the device is running at the fastest interface speed which is undesirable. This change moves the clock scaling kick start only after the device is detected and running at the fastest interface speed. Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 4f6ba244fc6f..a99a673db71e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6330,19 +6330,31 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ufshcd_scsi_add_wlus(hba)) goto out; + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + if (!hba->devfreq) { + hba->devfreq = devm_devfreq_add_device(hba->dev, + &ufs_devfreq_profile, + "simple_ondemand", + NULL); + if (IS_ERR(hba->devfreq)) { + ret = PTR_ERR(hba->devfreq); + dev_err(hba->dev, "Unable to register with devfreq %d\n", + ret); + goto out; + } + } + hba->clk_scaling.is_allowed = true; + } + scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); } - /* Resume devfreq after UFS device is detected */ - if (ufshcd_is_clkscaling_supported(hba)) { - memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info, - sizeof(struct ufs_pa_layer_attr)); - hba->clk_scaling.saved_pwr_info.is_valid = true; - ufshcd_resume_clkscaling(hba); - hba->clk_scaling.is_allowed = true; - } - if (!hba->is_init_prefetch) hba->is_init_prefetch = true; @@ -6865,7 +6877,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_setup_vreg(hba, false); ufshcd_suspend_clkscaling(hba); if (ufshcd_is_clkscaling_supported(hba)) { - ufshcd_suspend_clkscaling(hba); + if (hba->devfreq) + ufshcd_suspend_clkscaling(hba); destroy_workqueue(hba->clk_scaling.workq); } ufshcd_setup_clocks(hba, false); @@ -7859,16 +7872,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (ufshcd_is_clkscaling_supported(hba)) { char wq_name[sizeof("ufs_clkscaling_00")]; - hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile, - "simple_ondemand", NULL); - if (IS_ERR(hba->devfreq)) { - dev_err(hba->dev, "Unable to register with devfreq %ld\n", - PTR_ERR(hba->devfreq)); - err = PTR_ERR(hba->devfreq); - goto out_remove_scsi_host; - } - hba->clk_scaling.is_suspended = false; - INIT_WORK(&hba->clk_scaling.suspend_work, ufshcd_clk_scaling_suspend_work); INIT_WORK(&hba->clk_scaling.resume_work, @@ -7878,8 +7881,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->host_no); hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); - /* Suspend devfreq until the UFS device is detected */ - ufshcd_suspend_clkscaling(hba); ufshcd_clkscaling_init_sysfs(hba); } -- cgit v1.2.3-59-g8ed1b From 9c46b8676271857e7580f120cb6f8eefa7239ff4 Mon Sep 17 00:00:00 2001 From: Venkat Gopalakrishnan Date: Fri, 3 Feb 2017 16:58:12 -0800 Subject: scsi: ufs-qcom: dump additional testbus registers Change testbus default config, dump additional testbus registers along with other debug vendor specific registers. These additional info are useful in debugging link related failures. Signed-off-by: Venkat Gopalakrishnan Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufs-qcom.c | 48 +++++++++++++++++++++++++++++++++++++++------ drivers/scsi/ufs/ufs-qcom.h | 1 + 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 5ff8a6bf6fd3..ce5d023c1c91 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1497,17 +1497,21 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) { - if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, + UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); - else + } else { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); + } } static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) { /* provide a legal default configuration */ - host->testbus.select_major = TSTBUS_UAWM; - host->testbus.select_minor = 1; + host->testbus.select_major = TSTBUS_UNIPRO; + host->testbus.select_minor = 37; } static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) @@ -1524,7 +1528,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) * mappings of select_minor, since there is no harm in * configuring a non-existent select_minor */ - if (host->testbus.select_minor > 0x1F) { + if (host->testbus.select_minor > 0xFF) { dev_err(host->hba->dev, "%s: 0x%05X is not a legal testbus option\n", __func__, host->testbus.select_minor); @@ -1593,7 +1597,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) break; case TSTBUS_UNIPRO: reg = UFS_UNIPRO_CFG; - offset = 1; + offset = 20; + mask = 0xFFF; break; /* * No need for a default case, since @@ -1612,6 +1617,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) (u32)host->testbus.select_minor << offset, reg); ufs_qcom_enable_test_bus(host); + /* + * Make sure the test bus configuration is + * committed before returning. + */ + mb(); ufshcd_release(host->hba); pm_runtime_put_sync(host->hba->dev); @@ -1623,13 +1633,39 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba) ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); } +static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + u32 *testbus = NULL; + int i, nminor = 256, testbus_len = nminor * sizeof(u32); + + testbus = kmalloc(testbus_len, GFP_KERNEL); + if (!testbus) + return; + + host->testbus.select_major = TSTBUS_UNIPRO; + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + kfree(testbus); +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, "HCI Vendor Specific Registers "); + /* sleep a bit intermittently as we are dumping too much data */ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); + usleep_range(1000, 1100); ufs_qcom_testbus_read(hba); + usleep_range(1000, 1100); + ufs_qcom_print_unipro_testbus(hba); + usleep_range(1000, 1100); } /** diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index fe517cd7dac3..076f52813a4c 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -95,6 +95,7 @@ enum { #define QUNIPRO_SEL UFS_BIT(0) #define TEST_BUS_EN BIT(18) #define TEST_BUS_SEL GENMASK(22, 19) +#define UFS_REG_TEST_BUS_EN BIT(30) /* bit definitions for REG_UFS_CFG2 register */ #define UAWM_HW_CGC_EN (1 << 0) -- cgit v1.2.3-59-g8ed1b From 7942f7b568d7cda28c3aea9c81d48490769c853c Mon Sep 17 00:00:00 2001 From: Venkat Gopalakrishnan Date: Fri, 3 Feb 2017 16:58:24 -0800 Subject: scsi: ufs: dump hw regs on link failures Dump host state, power info and host/vendor specific registers on link failures. This provides useful info to debug the failures. Signed-off-by: Venkat Gopalakrishnan Signed-off-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a99a673db71e..8b721f431dd0 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3554,6 +3554,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) ret = (status != PWR_OK) ? status : -1; } out: + if (ret) { + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_host_regs(hba); + } + spin_lock_irqsave(hba->host->host_lock, flags); hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; @@ -4146,8 +4152,12 @@ link_startup: ret = ufshcd_make_hba_operational(hba); out: - if (ret) + if (ret) { dev_err(hba->dev, "link startup failed %d\n", ret); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_host_regs(hba); + } return ret; } -- cgit v1.2.3-59-g8ed1b From 8b9b22ba75907dcced88c815a5427ec0b4298aec Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 7 Feb 2017 14:52:58 +0000 Subject: scsi: qedi: Fix possible memory leak in qedi_iscsi_update_conn() 'conn_info' is malloced in qedi_iscsi_update_conn() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: ace7f46ba5fd ("scsi: qedi: Add QLogic FastLinQ offload iSCSI driver framework.") Signed-off-by: Wei Yongjun Acked-by: Manish Rangankar Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi_iscsi.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 8b25f0a1eb8c..b9f79d36142d 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -454,13 +454,9 @@ static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, if (rval) { rval = -ENXIO; QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); - goto update_conn_err; } kfree(conn_info); - rval = 0; - -update_conn_err: return rval; } -- cgit v1.2.3-59-g8ed1b From 18bbcabdc6cc6be8c7f6d80c85d314535d76188d Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:01 -0800 Subject: Revert "scsi: megaraid_sas: Enable or Disable Fast path based on the PCI Threshold Bandwidth" This reverts commit "3e5eadb1a881" ("scsi: megaraid_sas: Enable or Disable Fast path based on the PCI Threshold Bandwidth") This patch was aimed to increase performance of R1 Write operation for large IO size. Since this method used timer approach, it turn on/off fast path did not work as expected. Patch 0013 describes new algorithm and performance number. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 8 ----- drivers/scsi/megaraid/megaraid_sas_base.c | 48 ----------------------------- drivers/scsi/megaraid/megaraid_sas_fp.c | 7 ----- drivers/scsi/megaraid/megaraid_sas_fusion.c | 16 ++++------ drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 5 files changed, 7 insertions(+), 74 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ba9fbb71eb35..f5c47424f4f2 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1477,8 +1477,6 @@ enum FW_BOOT_CONTEXT { #define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 #define MFI_1068_FW_READY 0xDDDD0000 -#define MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL HZ - #define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F #define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 @@ -2154,10 +2152,6 @@ struct megasas_instance { atomic_t ldio_outstanding; atomic_t fw_reset_no_pci_access; - atomic64_t bytes_wrote; /* used for raid1 fast path enable or disable */ - atomic_t r1_write_fp_capable; - - struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; struct work_struct work_init; @@ -2199,7 +2193,6 @@ struct megasas_instance { long reset_flags; struct mutex reset_mutex; struct timer_list sriov_heartbeat_timer; - struct timer_list r1_fp_hold_timer; char skip_heartbeat_timer_del; u8 requestorId; char PlasmaFW111; @@ -2216,7 +2209,6 @@ struct megasas_instance { bool is_ventura; bool msix_combined; u16 max_raid_mapsize; - u64 pci_threshold_bandwidth; /* used to control the fp writes */ }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 70891a76ca86..3ed876a8e8fb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1940,9 +1940,6 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) } /* Complete outstanding ioctls when adapter is killed */ megasas_complete_outstanding_ioctls(instance); - if (instance->is_ventura) - del_timer_sync(&instance->r1_fp_hold_timer); - } /** @@ -2441,24 +2438,6 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr) } } -/*Handler for disabling/enabling raid 1 fast paths*/ -void megasas_change_r1_fp_status(unsigned long instance_addr) -{ - struct megasas_instance *instance = - (struct megasas_instance *)instance_addr; - if (atomic64_read(&instance->bytes_wrote) >= - instance->pci_threshold_bandwidth) { - - atomic64_set(&instance->bytes_wrote, 0); - atomic_set(&instance->r1_write_fp_capable, 0); - } else { - atomic64_set(&instance->bytes_wrote, 0); - atomic_set(&instance->r1_write_fp_capable, 1); - } - mod_timer(&instance->r1_fp_hold_timer, - jiffies + MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); -} - /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state @@ -5386,17 +5365,6 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->skip_heartbeat_timer_del = 1; } - if (instance->is_ventura) { - atomic64_set(&instance->bytes_wrote, 0); - atomic_set(&instance->r1_write_fp_capable, 1); - megasas_start_timer(instance, - &instance->r1_fp_hold_timer, - megasas_change_r1_fp_status, - MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); - dev_info(&instance->pdev->dev, "starting the raid 1 fp timer with interval %d\n", - MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); - } - return 0; fail_get_ld_pd_list: @@ -6187,9 +6155,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); - if (instance->is_ventura) - del_timer_sync(&instance->r1_fp_hold_timer); - megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); @@ -6316,16 +6281,6 @@ megasas_resume(struct pci_dev *pdev) megasas_setup_jbod_map(instance); instance->unload = 0; - if (instance->is_ventura) { - atomic64_set(&instance->bytes_wrote, 0); - atomic_set(&instance->r1_write_fp_capable, 1); - megasas_start_timer(instance, - &instance->r1_fp_hold_timer, - megasas_change_r1_fp_status, - MEGASAS_RAID1_FAST_PATH_STATUS_CHECK_INTERVAL); - } - - /* * Initiate AEN (Asynchronous Event Notification) */ @@ -6414,9 +6369,6 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); - if (instance->is_ventura) - del_timer_sync(&instance->r1_fp_hold_timer); - if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 322a72b593e3..f1384b01b3d3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -197,9 +197,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) if (instance->max_raid_mapsize) { fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; - if (fw_map_dyn->pci_threshold_bandwidth) - instance->pci_threshold_bandwidth = - le64_to_cpu(fw_map_dyn->pci_threshold_bandwidth); #if VD_EXT_DEBUG dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n", le32_to_cpu(fw_map_dyn->raid_map_size), @@ -207,8 +204,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n", le32_to_cpu(fw_map_dyn->desc_table_size), le32_to_cpu(fw_map_dyn->desc_table_num_elements)); - dev_dbg(&instance->pdev->dev, "PCIThreasholdBandwidth %llu\n", - instance->pci_threshold_bandwidth); dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n", drv_map, fw_map_dyn->ld_count); #endif @@ -439,8 +434,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } - if (instance->is_ventura && !instance->pci_threshold_bandwidth) - instance->pci_threshold_bandwidth = ULLONG_MAX; } /* diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 9a9c84fb91b1..54728b3deabf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -95,7 +95,6 @@ extern unsigned int resetwaittime; extern unsigned int dual_qdepth_disable; static void megasas_free_rdpq_fusion(struct megasas_instance *instance); static void megasas_free_reply_fusion(struct megasas_instance *instance); -void megasas_change_r1_fp_status(unsigned long instance_addr); @@ -2634,9 +2633,8 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, * to get new command */ if (cmd->is_raid_1_fp_write && - (atomic_inc_return(&instance->fw_outstanding) > - (instance->host->can_queue) || - (!atomic_read(&instance->r1_write_fp_capable)))) { + atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue)) { megasas_fpio_to_ldio(instance, cmd, cmd->scmd); atomic_dec(&instance->fw_outstanding); } else if (cmd->is_raid_1_fp_write) { @@ -2645,19 +2643,17 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); } + /* * Issue the command to the FW */ - if (scmd->sc_data_direction == PCI_DMA_TODEVICE && instance->is_ventura) - atomic64_add(scsi_bufflen(scmd), &instance->bytes_wrote); megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); - if (r1_cmd) { - atomic64_add(scsi_bufflen(scmd), &instance->bytes_wrote); + if (r1_cmd) megasas_fire_cmd_fusion(instance, r1_cmd->request_desc, - instance->is_ventura); - } + instance->is_ventura); + return 0; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 3addd0cb2e99..82a4ff720eab 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -977,7 +977,7 @@ struct MR_FW_RAID_MAP_DYNAMIC { u32 desc_table_size; /* Total Size of desc table */ /* Total Number of elements in the desc table */ u32 desc_table_num_elements; - u64 pci_threshold_bandwidth; + u64 reserved1; u32 reserved2[3]; /*future use */ /* timeout value used by driver in FP IOs */ u8 fp_pd_io_timeout_sec; -- cgit v1.2.3-59-g8ed1b From f6c0d55c5b91c0d626d65aebee1a0d6b0a61851d Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:02 -0800 Subject: scsi: megaraid_sas: cpu select rework. No functional change. Code refactor. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 175 ++++++++++++++++------------ 1 file changed, 100 insertions(+), 75 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 54728b3deabf..514c306ccca0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1818,6 +1818,73 @@ static void megasas_stream_detect(struct megasas_instance *instance, } +/** + * megasas_set_raidflag_cpu_affinity - This function sets the cpu + * affinity (cpu of the controller) and raid_flags in the raid context + * based on IO type. + * + * @praid_context: IO RAID context + * @raid: LD raid map + * @fp_possible: Is fast path possible? + * @is_read: Is read IO? + * + */ +static void +megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, + struct MR_LD_RAID *raid, bool fp_possible, + u8 is_read) +{ + u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &praid_context->raid_context_g35; + if (fp_possible) { + if (is_read) { + if ((raid->cpuAffinity.pdRead.cpu0) && + (raid->cpuAffinity.pdRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.pdWrite.cpu0) && + (raid->cpuAffinity.pdWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + /* Fast path cache by pass capable R0/R1 VD */ + if ((raid->level <= 1) && + (raid->capability.fp_cache_bypass_capable)) { + rctx_g35->routing_flags.bits.sld = 1; + rctx_g35->raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } + } + } else { + if (is_read) { + if ((raid->cpuAffinity.ldRead.cpu0) && + (raid->cpuAffinity.ldRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.ldWrite.cpu0) && + (raid->cpuAffinity.ldWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + + if (rctx_g35->stream_detected && + (raid->level == 5) && + (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && + (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) + cpu_sel = MR_RAID_CTX_CPUSEL_0; + } + } + + rctx_g35->routing_flags.bits.cpu_sel = cpu_sel; +} + /** * megasas_build_ldio_fusion - Prepares IOs to devices * @instance: Adapter soft state @@ -1832,8 +1899,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { - u8 fp_possible; - u32 start_lba_lo, start_lba_hi, device_id, datalength = 0, ld; + bool fp_possible; + u16 ld; + u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; + u32 scsi_buff_len; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; @@ -1842,7 +1911,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, u8 *raidLUN; unsigned long spinlock_flags; union RAID_CONTEXT_UNION *praid_context; - struct MR_LD_RAID *raid; + struct MR_LD_RAID *raid = NULL; + struct MR_PRIV_DEVICE *mrdev_priv; device_id = MEGASAS_DEV_INDEX(scp); @@ -1858,7 +1928,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, start_lba_lo = 0; start_lba_hi = 0; - fp_possible = 0; + fp_possible = false; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb @@ -1915,7 +1985,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.numBlocks = datalength; io_info.ldTgtId = device_id; io_info.r1_alt_dev_handle = MR_PD_INVALID; - io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); + scsi_buff_len = scsi_bufflen(scp); + io_request->DataLength = cpu_to_le32(scsi_buff_len); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; @@ -1927,12 +1998,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { io_request->RaidContext.raid_context.reg_lock_flags = 0; - fp_possible = 0; + fp_possible = false; } else { if (MR_BuildRaidContext(instance, &io_info, &io_request->RaidContext.raid_context, local_map_ptr, &raidLUN)) - fp_possible = io_info.fpOkForIo; + fp_possible = (io_info.fpOkForIo > 0) ? true : false; } /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU @@ -1941,6 +2012,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? raw_smp_processor_id() % instance->msix_vectors : 0; + praid_context = &io_request->RaidContext; + if (instance->is_ventura) { spin_lock_irqsave(&instance->stream_lock, spinlock_flags); megasas_stream_detect(instance, cmd, &io_info); @@ -1948,12 +2021,28 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, /* In ventura if stream detected for a read and it is read ahead * capable make this IO as LDIO */ - if (io_request->RaidContext.raid_context_g35.stream_detected && - io_info.isRead && io_info.ra_capable) + if (praid_context->raid_context_g35.stream_detected && + io_info.isRead && io_info.ra_capable) fp_possible = false; - } - praid_context = &io_request->RaidContext; + if (io_info.r1_alt_dev_handle != MR_PD_INVALID) { + mrdev_priv = scp->device->hostdata; + + if (atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue)) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + } + } + + /* If raid is NULL, set CPU affinity to default CPU0 */ + if (raid) + megasas_set_raidflag_cpu_affinity(praid_context, + raid, fp_possible, io_info.isRead); + else + praid_context->raid_context_g35.routing_flags.bits.cpu_sel = + MR_RAID_CTX_CPUSEL_0; + } if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, @@ -2016,36 +2105,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->DevHandle = io_info.devHandle; /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); - if (instance->is_ventura) { - if (io_info.isRead) { - if ((raid->cpuAffinity.pdRead.cpu0) && - (raid->cpuAffinity.pdRead.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.pdRead.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - } else { - if ((raid->cpuAffinity.pdWrite.cpu0) - && (raid->cpuAffinity.pdWrite.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.pdWrite.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - if (praid_context->raid_context_g35.routing_flags.bits.sld) { - praid_context->raid_context_g35.raid_flags - = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS - << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); - } - } - } } else { io_request->RaidContext.raid_context.timeout_value = cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); @@ -2074,40 +2133,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); - if (instance->is_ventura) { - if (io_info.isRead) { - if ((raid->cpuAffinity.ldRead.cpu0) - && (raid->cpuAffinity.ldRead.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.ldRead.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - } else { - if ((raid->cpuAffinity.ldWrite.cpu0) && - (raid->cpuAffinity.ldWrite.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.ldWrite.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - - if (io_request->RaidContext.raid_context_g35.stream_detected - && (raid->level == 5) && - (raid->writeMode == MR_RL_WRITE_THROUGH_MODE)) { - if (praid_context->raid_context_g35.routing_flags.bits.cpu_sel - == MR_RAID_CTX_CPUSEL_FCFS) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - } - } - } } /* Not FP */ } -- cgit v1.2.3-59-g8ed1b From 8bf7c65d379a6d923dfebb50eb04c2407e4762ed Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:03 -0800 Subject: scsi: megaraid_sas: raid 1 fast path code optimize No functional change. Code refactor. Remove function megasas_fpio_to_ldio as we never require to convert fpio to ldio because of frame unavailability. Grab extra frame of raid 1 write fast path before it creates first frame as Fast Path. Removed is_raid_1_fp_write flag as raid 1 write fast path command is decided using r1_alt_dev_handle only. Move resetting megasas_cmd_fusion fields at common function megasas_return_cmd_fusion. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fp.c | 14 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 349 +++++++++------------------- drivers/scsi/megaraid/megaraid_sas_fusion.h | 3 +- 3 files changed, 118 insertions(+), 248 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index f1384b01b3d3..24258afb207f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1338,20 +1338,8 @@ MR_BuildRaidContext(struct megasas_instance *instance, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ - if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) + if (io_info->devHandle == MR_DEVHANDLE_INVALID) io_info->fpOkForIo = FALSE; - /* if FP possible, set the SLUD bit in - * regLockFlags for ventura - */ - else if ((instance->is_ventura) && (!isRead) && - (raid->writeMode == MR_RL_WRITE_BACK_MODE) && - (raid->capability.fp_cache_bypass_capable)) - ((struct RAID_CONTEXT_G35 *) pRAID_Context)->routing_flags.bits.sld = 1; - /* set raid 1/10 fast path write capable bit in io_info */ - if (io_info->fpOkForIo && - (io_info->r1_alt_dev_handle != MR_PD_INVALID) && - (raid->level == 1) && !isRead) - io_info->is_raid_1_fp_write = 1; return retval; } else if (isRead) { uint stripIdx; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 514c306ccca0..751658996e06 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -181,7 +181,9 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { cmd->scmd = NULL; - memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + cmd->cmd_completed = false; } /** @@ -701,7 +703,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; - cmd->is_raid_1_fp_write = 0; + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; } if (megasas_create_sg_sense_fusion(instance)) @@ -1984,7 +1986,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; - io_info.r1_alt_dev_handle = MR_PD_INVALID; + io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; scsi_buff_len = scsi_bufflen(scp); io_request->DataLength = cpu_to_le32(scsi_buff_len); @@ -2025,7 +2027,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.isRead && io_info.ra_capable) fp_possible = false; - if (io_info.r1_alt_dev_handle != MR_PD_INVALID) { + if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { mrdev_priv = scp->device->hostdata; if (atomic_inc_return(&instance->fw_outstanding) > @@ -2090,9 +2092,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; - cmd->is_raid_1_fp_write = io_info.is_raid_1_fp_write; - if (io_info.is_raid_1_fp_write) + if (instance->is_ventura) cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; + else + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; if ((raidLUN[0] == 1) && (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { @@ -2451,72 +2454,6 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; } -/* - * megasas_fpio_to_ldio- - * This function converts an fp io to ldio - */ - -void megasas_fpio_to_ldio(struct megasas_instance *instance, - struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd) -{ - struct fusion_context *fusion; - union RAID_CONTEXT_UNION *praid_context; - struct MR_LD_RAID *raid; - struct MR_DRV_RAID_MAP_ALL *local_map_ptr; - u32 device_id, ld; - - fusion = instance->ctrl_context; - cmd->request_desc->SCSIIO.RequestFlags = - (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO - << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - cmd->io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - cmd->io_request->DevHandle = cpu_to_le16(MEGASAS_DEV_INDEX(scmd)); - - /*remove FAST PATH ENABLE bit in IoFlags */ - cmd->io_request->IoFlags &= - cpu_to_le16(~MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - - /* if the numSGE > max_sge_in_main_sge set the chain offset*/ - if (cmd->io_request->RaidContext.raid_context_g35.num_sge > - fusion->max_sge_in_main_msg) - cmd->io_request->ChainOffset = fusion->chain_offset_io_request; - memcpy(cmd->io_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); - cmd->io_request->CDB.EEDP32.PrimaryReferenceTag = 0; - cmd->io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; - cmd->io_request->EEDPFlags = 0; - cmd->io_request->Control = 0; - cmd->io_request->EEDPBlockSize = 0; - cmd->is_raid_1_fp_write = 0; - - device_id = MEGASAS_DEV_INDEX(cmd->scmd); - local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; - ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - raid = MR_LdRaidGet(ld, local_map_ptr); - praid_context = &cmd->io_request->RaidContext; - if (cmd->scmd->sc_data_direction == PCI_DMA_FROMDEVICE) { - if ((raid->cpuAffinity.ldRead.cpu0) - && (raid->cpuAffinity.ldRead.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.ldRead.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - } else { - if ((raid->cpuAffinity.ldWrite.cpu0) - && (raid->cpuAffinity.ldWrite.cpu1)) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_FCFS; - else if (raid->cpuAffinity.ldWrite.cpu1) - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_1; - else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel - = MR_RAID_CTX_CPUSEL_0; - } -} /* megasas_prepate_secondRaid1_IO * It prepares the raid 1 second IO @@ -2527,60 +2464,36 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; struct fusion_context *fusion; - fusion = instance->ctrl_context; req_desc = cmd->request_desc; - if (r1_cmd) { - /* copy the io request frame as well - * as 8 SGEs data for r1 command - */ - memcpy(r1_cmd->io_request, cmd->io_request, - sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); - memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, - (fusion->max_sge_in_main_msg * - sizeof(union MPI2_SGE_IO_UNION))); - /*sense buffer is different for r1 command*/ - r1_cmd->io_request->SenseBufferLowAddress = - cpu_to_le32(r1_cmd->sense_phys_addr); - r1_cmd->scmd = cmd->scmd; - req_desc2 = - megasas_get_request_descriptor(instance, r1_cmd->index-1); - if (req_desc2) { - req_desc2->Words = 0; - r1_cmd->request_desc = req_desc2; - req_desc2->SCSIIO.SMID = - cpu_to_le16(r1_cmd->index); - req_desc2->SCSIIO.RequestFlags = - req_desc->SCSIIO.RequestFlags; - r1_cmd->is_raid_1_fp_write = 1; - r1_cmd->request_desc->SCSIIO.DevHandle = - cmd->r1_alt_dev_handle; - r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; - cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = - cpu_to_le16(r1_cmd->index); - r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = - cpu_to_le16(cmd->index); - /* MSIxIndex of both commands request - * descriptors should be same - */ - r1_cmd->request_desc->SCSIIO.MSIxIndex = - cmd->request_desc->SCSIIO.MSIxIndex; - /*span arm is different for r1 cmd*/ - r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = + /* copy the io request frame as well as 8 SGEs data for r1 command*/ + memcpy(r1_cmd->io_request, cmd->io_request, + (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); + memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, + (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); + /*sense buffer is different for r1 command*/ + r1_cmd->io_request->SenseBufferLowAddress = + cpu_to_le32(r1_cmd->sense_phys_addr); + r1_cmd->scmd = cmd->scmd; + req_desc2 = megasas_get_request_descriptor(instance, + (r1_cmd->index - 1)); + req_desc2->Words = 0; + r1_cmd->request_desc = req_desc2; + req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); + req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; + r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; + cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(r1_cmd->index); + r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(cmd->index); + /*MSIxIndex of both commands request descriptors should be same*/ + r1_cmd->request_desc->SCSIIO.MSIxIndex = + cmd->request_desc->SCSIIO.MSIxIndex; + /*span arm is different for r1 cmd*/ + r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; - } else { - megasas_return_cmd_fusion(instance, r1_cmd); - dev_info(&instance->pdev->dev, - "unable to get request descriptor, firing as normal IO\n"); - atomic_dec(&instance->fw_outstanding); - megasas_fpio_to_ldio(instance, cmd, cmd->scmd); - } - } else { - dev_info(&instance->pdev->dev, - "unable to get command, firing as normal IO\n"); - atomic_dec(&instance->fw_outstanding); - megasas_fpio_to_ldio(instance, cmd, cmd->scmd); - } } /** @@ -2624,10 +2537,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); - if (!req_desc) { - atomic_dec(&instance->fw_outstanding); - return SCSI_MLQUEUE_HOST_BUSY; - } req_desc->Words = 0; cmd->request_desc = req_desc; @@ -2657,12 +2566,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, /* driver side count always should be less than max_fw_cmds * to get new command */ - if (cmd->is_raid_1_fp_write && - atomic_inc_return(&instance->fw_outstanding) > - (instance->host->can_queue)) { - megasas_fpio_to_ldio(instance, cmd, cmd->scmd); - atomic_dec(&instance->fw_outstanding); - } else if (cmd->is_raid_1_fp_write) { + if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { r1_cmd = megasas_get_cmd_fusion(instance, (scmd->request->tag + instance->max_fw_cmds)); megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); @@ -2683,6 +2587,61 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, return 0; } +/** + * megasas_complete_r1_command - + * completes R1 FP write commands which has valid peer smid + * @instance: Adapter soft state + * @cmd_fusion: MPT command frame + * + */ +static inline void +megasas_complete_r1_command(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd) +{ + u8 *sense, status, ex_status; + u32 data_length; + u16 peer_smid; + struct fusion_context *fusion; + struct megasas_cmd_fusion *r1_cmd = NULL; + struct scsi_cmnd *scmd_local = NULL; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; + fusion = instance->ctrl_context; + peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid); + + r1_cmd = fusion->cmd_list[peer_smid - 1]; + scmd_local = cmd->scmd; + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = cmd->io_request->DataLength; + sense = cmd->sense; + + cmd->cmd_completed = true; + + /* Check if peer command is completed or not*/ + if (r1_cmd->cmd_completed) { + rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; + if (rctx_g35->status != MFI_STAT_OK) { + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = r1_cmd->io_request->DataLength; + sense = r1_cmd->sense; + } + + megasas_return_cmd_fusion(instance, r1_cmd); + map_cmd_status(fusion, scmd_local, status, ex_status, + le32_to_cpu(data_length), sense); + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); + scmd_local->SCp.ptr = NULL; + megasas_return_cmd_fusion(instance, cmd); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); + } +} + /** * complete_cmd_fusion - Completes command * @instance: Adapter soft state @@ -2696,10 +2655,10 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; - struct megasas_cmd_fusion *cmd_fusion, *r1_cmd = NULL; + struct megasas_cmd_fusion *cmd_fusion; u16 smid, num_completed; - u8 reply_descript_type, *sense; - u32 status, extStatus, device_id, data_length; + u8 reply_descript_type, *sense, status, extStatus; + u32 device_id, data_length; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; @@ -2729,25 +2688,11 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) while (d_val.u.low != cpu_to_le32(UINT_MAX) && d_val.u.high != cpu_to_le32(UINT_MAX)) { - /* - * Ensure that the peer command is NULL here in case a - * command has completed but the R1 FP Write peer has - * not completed yet.If not null, it's possible that - * another thread will complete the peer - * command and should not. - */ - r1_cmd = NULL; smid = le16_to_cpu(reply_desc->SMID); - cmd_fusion = fusion->cmd_list[smid - 1]; - - scsi_io_req = - (struct MPI2_RAID_SCSI_IO_REQUEST *) - cmd_fusion->io_request; - - if (cmd_fusion->scmd) - cmd_fusion->scmd->SCp.ptr = NULL; + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) + cmd_fusion->io_request; scmd_local = cmd_fusion->scmd; status = scsi_io_req->RaidContext.raid_context.status; @@ -2768,88 +2713,33 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ - device_id = MEGASAS_DEV_INDEX(scmd_local); - lbinfo = &fusion->load_balance_info[device_id]; - /* - * check for the raid 1/10 fast path writes - */ - if (!cmd_fusion->is_raid_1_fp_write && - (cmd_fusion->scmd->SCp.Status & - MEGASAS_LOAD_BALANCE_FLAG)) { + if (fusion->load_balance_info && + (cmd_fusion->scmd->SCp.Status & + MEGASAS_LOAD_BALANCE_FLAG)) { + device_id = MEGASAS_DEV_INDEX(scmd_local); + lbinfo = &fusion->load_balance_info[device_id]; atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); - cmd_fusion->scmd->SCp.Status &= - ~MEGASAS_LOAD_BALANCE_FLAG; - } else if (cmd_fusion->is_raid_1_fp_write) { - /* get peer command */ - if (cmd_fusion->index < instance->max_fw_cmds) - r1_cmd = fusion->cmd_list[(cmd_fusion->index + - instance->max_fw_cmds)-1]; - else { - r1_cmd = - fusion->cmd_list[(cmd_fusion->index - - instance->max_fw_cmds)-1]; - } - cmd_fusion->cmd_completed = true; + cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } - - if (reply_descript_type == - MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { - if (megasas_dbg_lvl == 5) - dev_err(&instance->pdev->dev, "\nFAST Path " - "IO Success\n"); - } - /* Fall thru and complete IO */ + //Fall thru and complete IO case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ - /* Map the FW Cmd Status */ - /* - * check for the raid 1/10 fast path writes - */ - if (r1_cmd && r1_cmd->is_raid_1_fp_write - && r1_cmd->cmd_completed) { - /* - * if the peer Raid 1/10 fast path failed, - * mark IO as failed to the scsi layer. - * over write the current status by the failed - * status makes sure that if any one of - * command fails,return fail status to - * scsi layer - */ - if (r1_cmd->io_request->RaidContext.raid_context.status != - MFI_STAT_OK) { - status = - r1_cmd->io_request->RaidContext.raid_context.status; - extStatus = - r1_cmd->io_request->RaidContext.raid_context.ex_status; - data_length = - r1_cmd->io_request->DataLength; - sense = r1_cmd->sense; - } - r1_cmd->io_request->RaidContext.raid_context.status = 0; - r1_cmd->io_request->RaidContext.raid_context.ex_status = 0; - cmd_fusion->is_raid_1_fp_write = 0; - r1_cmd->is_raid_1_fp_write = 0; - r1_cmd->cmd_completed = false; - cmd_fusion->cmd_completed = false; - megasas_return_cmd_fusion(instance, r1_cmd); - } - if (!cmd_fusion->is_raid_1_fp_write) { + atomic_dec(&instance->fw_outstanding); + if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { map_cmd_status(fusion, scmd_local, status, - extStatus, data_length, sense); - scsi_io_req->RaidContext.raid_context.status = 0; - scsi_io_req->RaidContext.raid_context.ex_status = 0; - if (instance->ldio_threshold - && megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + extStatus, le32_to_cpu(data_length), + sense); + if (instance->ldio_threshold && + (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) atomic_dec(&instance->ldio_outstanding); + scmd_local->SCp.ptr = NULL; megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); - } - atomic_dec(&instance->fw_outstanding); - + } else /* Optimal VD - R1 FP command completion. */ + megasas_complete_r1_command(instance, cmd_fusion); break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; - /* Poll mode. Dummy free. * In case of Interrupt mode, caller has reverse check. */ @@ -3896,7 +3786,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { int retval = SUCCESS, i, j, convert = 0; struct megasas_instance *instance; - struct megasas_cmd_fusion *cmd_fusion, *mpt_cmd_fusion; + struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; struct fusion_context *fusion; u32 abs_state, status_reg, reset_adapter; u32 io_timeout_in_crash_mode = 0; @@ -3973,15 +3863,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) cmd_fusion = fusion->cmd_list[i]; /*check for extra commands issued by driver*/ if (instance->is_ventura) { - cmd_fusion->is_raid_1_fp_write = 0; - cmd_fusion->cmd_completed = false; - mpt_cmd_fusion = - fusion->cmd_list[i + instance->max_fw_cmds]; - mpt_cmd_fusion->is_raid_1_fp_write = 0; - mpt_cmd_fusion->cmd_completed = false; - if (mpt_cmd_fusion->scmd) - megasas_return_cmd_fusion(instance, - mpt_cmd_fusion); + r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; + megasas_return_cmd_fusion(instance, r1_cmd); } scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 82a4ff720eab..9d9658ea4e6e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -673,6 +673,7 @@ struct MPI2_IOC_INIT_REQUEST { /* mrpriv defines */ #define MR_PD_INVALID 0xFFFF +#define MR_DEVHANDLE_INVALID 0xFFFF #define MAX_SPAN_DEPTH 8 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) @@ -921,7 +922,6 @@ struct IO_REQUEST_INFO { u8 span_arm; /* span[7:5], arm[4:0] */ u8 pd_after_lb; u16 r1_alt_dev_handle; /* raid 1/10 only */ - bool is_raid_1_fp_write; bool ra_capable; }; @@ -1060,7 +1060,6 @@ struct megasas_cmd_fusion { u32 index; u8 pd_r1_lb; struct completion done; - bool is_raid_1_fp_write; u16 r1_alt_dev_handle; /* raid 1/10 only*/ bool cmd_completed; /* raid 1/10 fp writes status holder */ -- cgit v1.2.3-59-g8ed1b From 45b8a35eed7b1d7e51a4dc04b5b694301a383afa Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:04 -0800 Subject: scsi: megaraid_sas: 32 bit descriptor fire cmd optimization No functional change. Code refactor. megasas_fire_cmd_fusion can always use 32 bit descriptor write for ventura. No need to pass extra flag. Only IOC INIT required 64 bit Descriptor write. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 65 +++++++++++++++-------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 751658996e06..24b72c58e075 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -188,40 +188,35 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, /** * megasas_fire_cmd_fusion - Sends command to the FW + * @instance: Adapter soft state + * @req_desc: 32bit or 64bit Request descriptor + * + * Perform PCI Write. Ventura supports 32 bit Descriptor. + * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. */ + static void megasas_fire_cmd_fusion(struct megasas_instance *instance, - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, bool is_32bit) + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { - struct megasas_register_set __iomem *regs = instance->reg_set; - unsigned long flags; - - if (is_32bit) + if (instance->is_ventura) writel(le32_to_cpu(req_desc->u.low), - &(regs)->inbound_single_queue_port); - else if (instance->is_ventura) { + &instance->reg_set->inbound_single_queue_port); + else { +#if defined(writeq) && defined(CONFIG_64BIT) + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); + + writeq(req_data, &instance->reg_set->inbound_low_queue_port); +#else + unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(le32_to_cpu(req_desc->u.low), - &(regs)->inbound_low_queue_port); + &instance->reg_set->inbound_low_queue_port); writel(le32_to_cpu(req_desc->u.high), - &(regs)->inbound_high_queue_port); + &instance->reg_set->inbound_high_queue_port); mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); - } else { -#if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | - le32_to_cpu(req_desc->u.low)); - - writeq(req_data, &instance->reg_set->inbound_low_queue_port); -#else - - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc->u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); #endif } } @@ -771,6 +766,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_2; + unsigned long flags; fusion = instance->ctrl_context; @@ -897,7 +893,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - megasas_fire_cmd_fusion(instance, &req_desc, false); + /* For Ventura also IOC INIT required 64 bit Descriptor write. */ + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc.u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc.u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); @@ -2577,11 +2580,10 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, * Issue the command to the FW */ - megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); + megasas_fire_cmd_fusion(instance, req_desc); if (r1_cmd) - megasas_fire_cmd_fusion(instance, r1_cmd->request_desc, - instance->is_ventura); + megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); return 0; @@ -3001,7 +3003,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, return DCMD_NOT_FIRED; } - megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); + megasas_fire_cmd_fusion(instance, req_desc); return DCMD_SUCCESS; } @@ -3294,8 +3296,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))) && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); if (refire_cmd) - megasas_fire_cmd_fusion(instance, req_desc, - instance->is_ventura); + megasas_fire_cmd_fusion(instance, req_desc); else megasas_return_cmd(instance, cmd_mfi); } @@ -3474,7 +3475,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, mr_request->tmReqFlags.isTMForLD = 1; init_completion(&cmd_fusion->done); - megasas_fire_cmd_fusion(instance, req_desc, instance->is_ventura); + megasas_fire_cmd_fusion(instance, req_desc); timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ); -- cgit v1.2.3-59-g8ed1b From 3cabd16256584581af2cc3d2cedabcfcf15021ad Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:05 -0800 Subject: scsi: megaraid_sas: Refactor MEGASAS_IS_LOGICAL macro using sdev Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 4 ++-- drivers/scsi/megaraid/megaraid_sas_base.c | 20 ++++++++++---------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index f5c47424f4f2..dff877fd0822 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2301,8 +2301,8 @@ struct megasas_instance_template { struct megasas_cmd *cmd); }; -#define MEGASAS_IS_LOGICAL(scp) \ - ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LOGICAL(sdev) \ + ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3ed876a8e8fb..6ca49efdbf09 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1279,7 +1279,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, u16 flags = 0; struct megasas_pthru_frame *pthru; - is_logical = MEGASAS_IS_LOGICAL(scp); + is_logical = MEGASAS_IS_LOGICAL(scp->device); device_id = MEGASAS_DEV_INDEX(scp); pthru = (struct megasas_pthru_frame *)cmd->frame; @@ -1519,11 +1519,11 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd) case WRITE_6: case READ_16: case WRITE_16: - ret = (MEGASAS_IS_LOGICAL(cmd)) ? + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? READ_WRITE_LDIO : READ_WRITE_SYSPDIO; break; default: - ret = (MEGASAS_IS_LOGICAL(cmd)) ? + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; } return ret; @@ -1699,15 +1699,16 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) scmd->result = 0; - if (MEGASAS_IS_LOGICAL(scmd) && + if (MEGASAS_IS_LOGICAL(scmd->device) && (scmd->device->id >= instance->fw_supported_vd_count || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } - if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) && - (!instance->fw_sync_cache_support)) { + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && + MEGASAS_IS_LOGICAL(scmd->device) && + (!instance->fw_sync_cache_support)) { scmd->result = DID_OK << 16; goto out_done; } @@ -1758,7 +1759,7 @@ void megasas_update_sdev_properties(struct scsi_device *sdev) if (!fusion) return; - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && + if (!MEGASAS_IS_LOGICAL(sdev) && instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; @@ -1826,8 +1827,7 @@ static int megasas_slave_configure(struct scsi_device *sdev) instance = megasas_lookup_instance(sdev->host->host_no); if (instance->pd_list_not_supported) { - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && - sdev->type == TYPE_DISK) { + if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState != @@ -1854,7 +1854,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev) struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + if (!MEGASAS_IS_LOGICAL(sdev)) { /* * Open the OS scan to the SYSTEM PD */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 24b72c58e075..675afc9578fb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3581,13 +3581,13 @@ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) instance = (struct megasas_instance *)sdev->host->hostdata; fusion = instance->ctrl_context; - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + if (!MEGASAS_IS_LOGICAL(sdev)) { if (instance->use_seqnum_jbod_fp) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; - pd_sync = (void *)fusion->pd_seq_sync - [(instance->pd_seq_map_id - 1) & 1]; - devhandle = pd_sync->seq[pd_index].devHandle; + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + devhandle = pd_sync->seq[pd_index].devHandle; } else sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" " without JBOD MAP support from %s %d\n", __func__, __LINE__); -- cgit v1.2.3-59-g8ed1b From ed981b81fa6cb7ce191756822e3de24e51112cd3 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:06 -0800 Subject: scsi: megaraid_sas: RAID map is accessed for SYS PDs when use_seqnum_jbod_fp is not set Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 6ca49efdbf09..67a205a9f059 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1756,28 +1756,31 @@ void megasas_update_sdev_properties(struct scsi_device *sdev) fusion = instance->ctrl_context; mr_device_priv_data = sdev->hostdata; - if (!fusion) + if (!fusion || !mr_device_priv_data) return; - if (!MEGASAS_IS_LOGICAL(sdev) && - instance->use_seqnum_jbod_fp) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; - pd_sync = (void *)fusion->pd_seq_sync - [(instance->pd_seq_map_id - 1) & 1]; - mr_device_priv_data->is_tm_capable = - pd_sync->seq[pd_index].capability.tmCapable; - } else { + if (MEGASAS_IS_LOGICAL(sdev)) { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if (ld >= instance->fw_supported_vd_count) + return; raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + blk_queue_update_dma_alignment(sdev->request_queue, + 0x7); + mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; + } else if (instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + mr_device_priv_data->is_tm_capable = + pd_sync->seq[pd_index].capability.tmCapable; } } -- cgit v1.2.3-59-g8ed1b From f55cf47d925e48cddabafd3bc829f1ebc05c334d Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:07 -0800 Subject: scsi: megaraid_sas: Use DID_REQUEUE Moving to use DID_REQUEUE return type for reliable unconditional retries. Driver wants unconditional re-queue, so replace DID_RESET with DID_REQUEUE Discussed below - https://www.spinics.net/lists/linux-scsi/msg102848.html Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 4 ++-- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 67a205a9f059..80fcdf52eea6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1668,7 +1668,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) /* Check for an mpio path and adjust behavior */ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { if (megasas_check_mpio_paths(instance, scmd) == - (DID_RESET << 16)) { + (DID_REQUEUE << 16)) { return SCSI_MLQUEUE_HOST_BUSY; } else { scmd->result = DID_NO_CONNECT << 16; @@ -2492,7 +2492,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { - reset_cmd->scmd->result = DID_RESET << 16; + reset_cmd->scmd->result = DID_REQUEUE << 16; dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 675afc9578fb..6ec7a18f6dbf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3770,7 +3770,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_instance *peer_instance = NULL; - int retval = (DID_RESET << 16); + int retval = (DID_REQUEUE << 16); if (instance->peerIsPresent) { peer_instance = megasas_get_peer_instance(instance); -- cgit v1.2.3-59-g8ed1b From 52205ac8940b43cca1711abfb43a05e7df08c09e Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:08 -0800 Subject: scsi: megaraid_sas: megasas_get_request_descriptor always return valid desc No functional change. Code clean up. Removing error code which is not valid scenario. In megasas_get_request_descriptor we can remove the error handling which is not required. With fusion controllers, if there is a valid message frame available, we are guaranteed to get a corresponding request descriptor. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 6ec7a18f6dbf..b6c5dc5be5c7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2438,18 +2438,12 @@ megasas_build_io_fusion(struct megasas_instance *instance, return 0; } -union MEGASAS_REQUEST_DESCRIPTOR_UNION * +static union MEGASAS_REQUEST_DESCRIPTOR_UNION * megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) { u8 *p; struct fusion_context *fusion; - if (index >= instance->max_mpt_cmds) { - dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for " - "descriptor for scsi%d\n", index, - instance->host->host_no); - return NULL; - } fusion = instance->ctrl_context; p = fusion->req_frames_desc + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; @@ -2960,7 +2954,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION * build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; u16 index; if (build_mpt_mfi_pass_thru(instance, cmd)) { @@ -2972,9 +2966,6 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) req_desc = megasas_get_request_descriptor(instance, index - 1); - if (!req_desc) - return NULL; - req_desc->Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -2997,11 +2988,6 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = build_mpt_cmd(instance, cmd); - if (!req_desc) { - dev_info(&instance->pdev->dev, "Failed from %s %d\n", - __func__, __LINE__); - return DCMD_NOT_FIRED; - } megasas_fire_cmd_fusion(instance, req_desc); return DCMD_SUCCESS; @@ -3438,12 +3424,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, req_desc = megasas_get_request_descriptor(instance, (cmd_fusion->index - 1)); - if (!req_desc) { - dev_err(&instance->pdev->dev, "Failed from %s %d\n", - __func__, __LINE__); - megasas_return_cmd(instance, cmd_mfi); - return -ENOMEM; - } cmd_fusion->request_desc = req_desc; req_desc->Words = 0; -- cgit v1.2.3-59-g8ed1b From f4fc209326c79b03fecd38a6709cf08da47f15f7 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:09 -0800 Subject: scsi: megaraid_sas: change issue_dcmd to return void from int With the changes to remove checks for a valid request descriptor, issue_dcmd will now always return DCMD_SUCCESS. This patch changes return type of issue_dcmd to void and change all callers appropriately. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 +- drivers/scsi/megaraid/megaraid_sas_base.c | 19 +++++++++++-------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 8 ++++---- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index dff877fd0822..d9049d5054cc 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2297,7 +2297,7 @@ struct megasas_instance_template { u32 (*init_adapter)(struct megasas_instance *); u32 (*build_and_issue_cmd) (struct megasas_instance *, struct scsi_cmnd *); - int (*issue_dcmd)(struct megasas_instance *instance, + void (*issue_dcmd)(struct megasas_instance *instance, struct megasas_cmd *cmd); }; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 80fcdf52eea6..23fb78a631f2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -202,12 +202,12 @@ void megasas_fusion_ocr_wq(struct work_struct *work); static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial); -int +void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); - return 0; + return; } /** @@ -995,13 +995,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + return wait_and_poll(instance, cmd, instance->requestorId ? MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); } @@ -1023,13 +1024,14 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, int ret = 0; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); @@ -1087,13 +1089,14 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cmd->sync_cmd = 1; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index b6c5dc5be5c7..c38fde0c9a6b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1115,7 +1115,7 @@ megasas_get_map_info(struct megasas_instance *instance) int megasas_sync_map_info(struct megasas_instance *instance) { - int ret = 0, i; + int i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u32 size_sync_info, num_lds; @@ -1184,7 +1184,7 @@ megasas_sync_map_info(struct megasas_instance *instance) instance->instancet->issue_dcmd(instance, cmd); - return ret; + return 0; } /* @@ -2981,7 +2981,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) * @cmd: mfi cmd pointer * */ -int +void megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { @@ -2990,7 +2990,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, req_desc = build_mpt_cmd(instance, cmd); megasas_fire_cmd_fusion(instance, req_desc); - return DCMD_SUCCESS; + return; } /** -- cgit v1.2.3-59-g8ed1b From 15dd03811d99dcf828f4eeb2c2b6a02ddc1201c7 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:10 -0800 Subject: scsi: megaraid_sas: NVME Interface detection and prop settings Adding detection logic for NVME device attached behind Ventura controller. Driver set HostPageSize in IOC_INIT frame to inform about page size for NVME devices. Firmware reports NVME page size to the driver. PD INFO DCMD provide new interface type NVME_PD. Driver set property of NVME device. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 23 ++-- drivers/scsi/megaraid/megaraid_sas_base.c | 170 ++++++++++++++++++++-------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 6 +- drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 4 files changed, 142 insertions(+), 59 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index d9049d5054cc..f9efddf32641 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -733,7 +733,6 @@ struct megasas_pd_list { u16 tid; u8 driveType; u8 driveState; - u8 interface; } __packed; /* @@ -1530,8 +1529,8 @@ struct megasas_register_set { u32 outbound_scratch_pad ; /*00B0h*/ u32 outbound_scratch_pad_2; /*00B4h*/ u32 outbound_scratch_pad_3; /*00B8h*/ + u32 outbound_scratch_pad_4; /*00BCh*/ - u32 reserved_4; /*00BCh*/ u32 inbound_low_queue_port ; /*00C0h*/ @@ -1864,6 +1863,7 @@ union megasas_frame { struct MR_PRIV_DEVICE { bool is_tm_capable; bool tm_busy; + u8 interface_type; }; struct megasas_cmd; @@ -2055,17 +2055,24 @@ struct MR_DRV_SYSTEM_INFO { }; enum MR_PD_TYPE { - UNKNOWN_DRIVE = 0, - PARALLEL_SCSI = 1, - SAS_PD = 2, - SATA_PD = 3, - FC_PD = 4, + UNKNOWN_DRIVE = 0, + PARALLEL_SCSI = 1, + SAS_PD = 2, + SATA_PD = 3, + FC_PD = 4, + NVME_PD = 5, }; /* JBOD Queue depth definitions */ #define MEGASAS_SATA_QD 32 #define MEGASAS_SAS_QD 64 #define MEGASAS_DEFAULT_PD_QD 64 +#define MEGASAS_NVME_QD 32 + +#define MR_DEFAULT_NVME_PAGE_SIZE 4096 +#define MR_DEFAULT_NVME_PAGE_SHIFT 12 +#define MR_DEFAULT_NVME_MDTS_KB 128 +#define MR_NVME_PAGE_SIZE_MASK 0x000000FF struct megasas_instance { @@ -2209,6 +2216,7 @@ struct megasas_instance { bool is_ventura; bool msix_combined; u16 max_raid_mapsize; + u32 nvme_page_size; }; struct MR_LD_VF_MAP { u32 size; @@ -2428,6 +2436,7 @@ int megasas_get_ctrl_info(struct megasas_instance *instance); /* PD sequence */ int megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); +void megasas_set_dynamic_target_properties(struct scsi_device *sdev); int megasas_set_crash_dump_params(struct megasas_instance *instance, u8 crash_buf_state); void megasas_free_host_crash_buffer(struct megasas_instance *instance); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 23fb78a631f2..f383bf2cc0a8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -116,8 +116,8 @@ static int megasas_ld_list_query(struct megasas_instance *instance, static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); -static int -megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); +static void megasas_get_pd_info(struct megasas_instance *instance, + struct scsi_device *sdev); /* * PCI ID table for all supported controllers */ @@ -1738,16 +1738,21 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) } /* -* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities +* megasas_set_dynamic_target_properties - +* Device property set by driver may not be static and it is required to be +* updated after OCR +* +* set tm_capable. +* set dma alignment (only for eedp protection enable vd). * * @sdev: OS provided scsi device * * Returns void */ -void megasas_update_sdev_properties(struct scsi_device *sdev) +void megasas_set_dynamic_target_properties(struct scsi_device *sdev) { - u16 pd_index = 0; - u32 device_id, ld; + u16 pd_index = 0, ld; + u32 device_id; struct megasas_instance *instance; struct fusion_context *fusion; struct MR_PRIV_DEVICE *mr_device_priv_data; @@ -1772,57 +1777,102 @@ void megasas_update_sdev_properties(struct scsi_device *sdev) raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) - blk_queue_update_dma_alignment(sdev->request_queue, - 0x7); + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; } else if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; + sdev->id; pd_sync = (void *)fusion->pd_seq_sync [(instance->pd_seq_map_id - 1) & 1]; mr_device_priv_data->is_tm_capable = - pd_sync->seq[pd_index].capability.tmCapable; + pd_sync->seq[pd_index].capability.tmCapable; } } -static void megasas_set_device_queue_depth(struct scsi_device *sdev) +/* + * megasas_set_nvme_device_properties - + * set nomerges=2 + * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). + * set maximum io transfer = MDTS of NVME device provided by MR firmware. + * + * MR firmware provides value in KB. Caller of this function converts + * kb into bytes. + * + * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, + * MR firmware provides value 128 as (32 * 4K) = 128K. + * + * @sdev: scsi device + * @max_io_size: maximum io transfer size + * + */ +static inline void +megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) { - u16 pd_index = 0; - int ret = DCMD_FAILED; struct megasas_instance *instance; + u32 mr_nvme_pg_size; - instance = megasas_lookup_instance(sdev->host->host_no); + instance = (struct megasas_instance *)sdev->host->hostdata; + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - if (instance->pd_info) { - mutex_lock(&instance->hba_mutex); - ret = megasas_get_pd_info(instance, pd_index); - mutex_unlock(&instance->hba_mutex); - } + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); +} - if (ret != DCMD_SUCCESS) - return; - if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { +/* + * megasas_set_static_target_properties - + * Device property set by driver are static and it is not required to be + * updated after OCR. + * + * set io timeout + * set device queue depth + * set nvme device properties. see - megasas_set_nvme_device_properties + * + * @sdev: scsi device + * + */ +static void megasas_set_static_target_properties(struct scsi_device *sdev) +{ + u16 target_index = 0; + u8 interface_type; + u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; + u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; + struct megasas_instance *instance; + struct MR_PRIV_DEVICE *mr_device_priv_data; - switch (instance->pd_list[pd_index].interface) { - case SAS_PD: - scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); - break; + instance = megasas_lookup_instance(sdev->host->host_no); + mr_device_priv_data = sdev->hostdata; + interface_type = mr_device_priv_data->interface_type; - case SATA_PD: - scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); - break; + /* + * The RAID firmware may require extended timeouts. + */ + blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); - default: - scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); - } - } + target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + + switch (interface_type) { + case SAS_PD: + device_qd = MEGASAS_SAS_QD; + break; + case SATA_PD: + device_qd = MEGASAS_SATA_QD; + break; + case NVME_PD: + device_qd = MEGASAS_NVME_QD; + break; } + + if (instance->nvme_page_size && max_io_size_kb) + megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); + + scsi_change_queue_depth(sdev, device_qd); + } @@ -1841,14 +1891,18 @@ static int megasas_slave_configure(struct scsi_device *sdev) return -ENXIO; } } - megasas_set_device_queue_depth(sdev); - megasas_update_sdev_properties(sdev); - /* - * The RAID firmware may require extended timeouts. - */ - blk_queue_rq_timeout(sdev->request_queue, - scmd_timeout * HZ); + mutex_lock(&instance->hba_mutex); + /* Send DCMD to Firmware and cache the information */ + if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) + megasas_get_pd_info(instance, sdev); + + megasas_set_static_target_properties(sdev); + + mutex_unlock(&instance->hba_mutex); + + /* This sdev property may change post OCR */ + megasas_set_dynamic_target_properties(sdev); return 0; } @@ -3986,18 +4040,22 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) { return INITIATE_OCR; } -static int -megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) +static void +megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; + struct MR_PRIV_DEVICE *mr_device_priv_data; + u16 device_id = 0; + + device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); - return -ENOMEM; + return; } dcmd = &cmd->frame->dcmd; @@ -4024,7 +4082,9 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) switch (ret) { case DCMD_SUCCESS: - instance->pd_list[device_id].interface = + mr_device_priv_data = sdev->hostdata; + le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); + mr_device_priv_data->interface_type = instance->pd_info->state.ddf.pdType.intf; break; @@ -4051,7 +4111,7 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); - return ret; + return; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure @@ -5020,8 +5080,8 @@ skip_alloc: static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; - u32 max_sectors_2; - u32 tmp_sectors, msix_enable, scratch_pad_2, scratch_pad_3; + u32 max_sectors_2, tmp_sectors, msix_enable; + u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; @@ -5202,6 +5262,18 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; + if (instance->is_ventura) { + scratch_pad_4 = + readl(&instance->reg_set->outbound_scratch_pad_4); + if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= + MR_DEFAULT_NVME_PAGE_SHIFT) + instance->nvme_page_size = + (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); + + dev_info(&instance->pdev->dev, + "NVME page size\t: (%d)\n", instance->nvme_page_size); + } + if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 1) : megasas_setup_irqs_ioapic(instance)) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c38fde0c9a6b..a481854af0cb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -376,7 +376,8 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) fusion->sg_dma_pool = pci_pool_create("mr_sg", instance->pdev, - instance->max_chain_frame_sz, 4, 0); + instance->max_chain_frame_sz, + MR_DEFAULT_NVME_PAGE_SIZE, 0); /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ fusion->sense_dma_pool = pci_pool_create("mr_sense", instance->pdev, @@ -823,6 +824,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->HostMSIxVectors = instance->msix_vectors; + IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); @@ -3935,7 +3937,7 @@ transition_to_ready: megasas_setup_jbod_map(instance); shost_for_each_device(sdev, shost) - megasas_update_sdev_properties(sdev); + megasas_set_dynamic_target_properties(sdev); /* reset stream detection array */ if (instance->is_ventura) { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 9d9658ea4e6e..9f19f000a4cd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -657,7 +657,7 @@ struct MPI2_IOC_INIT_REQUEST { __le16 HeaderVersion; /* 0x0E */ u32 Reserved5; /* 0x10 */ __le16 Reserved6; /* 0x14 */ - u8 Reserved7; /* 0x16 */ + u8 HostPageSize; /* 0x16 */ u8 HostMSIxVectors; /* 0x17 */ __le16 Reserved8; /* 0x18 */ __le16 SystemRequestFrameSize; /* 0x1A */ -- cgit v1.2.3-59-g8ed1b From 96188a89cc6d5ad3a0a5b7a6c4abc9f4a6de7678 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:11 -0800 Subject: scsi: megaraid_sas: NVME interface target prop added This patch fetch true values of NVME property from FW using New DCMD interface MR_DCMD_DEV_GET_TARGET_PROP Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 14 +++ drivers/scsi/megaraid/megaraid_sas_base.c | 144 +++++++++++++++++++++++++++- drivers/scsi/megaraid/megaraid_sas_fusion.h | 1 + 3 files changed, 155 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index f9efddf32641..a45ff102b49e 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -695,6 +695,18 @@ struct MR_PD_INFO { u8 reserved1[512-428]; } __packed; +/* + * Definition of structure used to expose attributes of VD or JBOD + * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP + * is fired by driver) + */ +struct MR_TARGET_PROPERTIES { + u32 max_io_size_kb; + u32 device_qdepth; + u32 sector_size; + u8 reserved[500]; +} __packed; + /* * defines the physical drive address structure */ @@ -2090,6 +2102,8 @@ struct megasas_instance { dma_addr_t hb_host_mem_h; struct MR_PD_INFO *pd_info; dma_addr_t pd_info_h; + struct MR_TARGET_PROPERTIES *tgt_prop; + dma_addr_t tgt_prop_h; __le32 *reply_queue; dma_addr_t reply_queue_h; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f383bf2cc0a8..51b35a619d99 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -118,6 +118,8 @@ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); static void megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev); +static int megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev); /* * PCI ID table for all supported controllers */ @@ -1834,14 +1836,16 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) * set nvme device properties. see - megasas_set_nvme_device_properties * * @sdev: scsi device - * + * @is_target_prop true, if fw provided target properties. */ -static void megasas_set_static_target_properties(struct scsi_device *sdev) +static void megasas_set_static_target_properties(struct scsi_device *sdev, + bool is_target_prop) { u16 target_index = 0; u8 interface_type; u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; + u32 tgt_device_qd; struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; @@ -1868,6 +1872,18 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev) break; } + if (is_target_prop) { + tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); + if (tgt_device_qd && + (tgt_device_qd <= instance->host->can_queue)) + device_qd = tgt_device_qd; + + /* max_io_size_kb will be set to non zero for + * nvme based vd and syspd. + */ + max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); + } + if (instance->nvme_page_size && max_io_size_kb) megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); @@ -1880,6 +1896,8 @@ static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance; + int ret_target_prop = DCMD_FAILED; + bool is_target_prop = false; instance = megasas_lookup_instance(sdev->host->host_no); if (instance->pd_list_not_supported) { @@ -1897,7 +1915,14 @@ static int megasas_slave_configure(struct scsi_device *sdev) if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) megasas_get_pd_info(instance, sdev); - megasas_set_static_target_properties(sdev); + /* Some ventura firmware may not have instance->nvme_page_size set. + * Do not send MR_DCMD_DRV_GET_TARGET_PROP + */ + if ((instance->tgt_prop) && (instance->nvme_page_size)) + ret_target_prop = megasas_get_target_prop(instance, sdev); + + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_static_target_properties(sdev, is_target_prop); mutex_unlock(&instance->hba_mutex); @@ -5682,6 +5707,98 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, return 0; } +/* megasas_get_target_prop - Send DCMD with below details to firmware. + * + * This DCMD will fetch few properties of LD/system PD defined + * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. + * + * DCMD send by drivers whenever new target is added to the OS. + * + * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP + * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. + * 0 = system PD, 1 = LD. + * dcmd.mbox.s[1] - TargetID for LD/system PD. + * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. + * + * @instance: Adapter soft state + * @sdev: OS provided scsi device + * + * Returns 0 on success non-zero on failure. + */ +static int +megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev) +{ + int ret; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + u16 targetId = (sdev->channel % 2) + sdev->id; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, + "Failed to get cmd %s\n", __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); + + dcmd->mbox.s[1] = cpu_to_le16(targetId); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = + cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); + dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(instance->tgt_prop_h); + dcmd->sgl.sge32[0].length = + cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); + + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, + cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, + "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + + default: + megasas_return_cmd(instance, cmd); + } + if (ret != DCMD_SUCCESS) + dev_err(&instance->pdev->dev, + "return from %s %d return value %d\n", + __func__, __LINE__, ret); + + return ret; +} + /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state @@ -5958,9 +6075,17 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->pd_info = pci_alloc_consistent(pdev, sizeof(struct MR_PD_INFO), &instance->pd_info_h); + instance->pd_info = pci_alloc_consistent(pdev, + sizeof(struct MR_PD_INFO), &instance->pd_info_h); + instance->tgt_prop = pci_alloc_consistent(pdev, + sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); + if (!instance->pd_info) dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + if (!instance->tgt_prop) + dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); + instance->crash_dump_buf = pci_alloc_consistent(pdev, CRASH_DMA_BUF_SIZE, &instance->crash_dump_h); @@ -6105,6 +6230,10 @@ fail_alloc_dma_buf: pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -6377,6 +6506,10 @@ fail_init_mfi: pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -6535,11 +6668,14 @@ skip_firing_dcmds: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); - if (instance->pd_info) pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->vf_affiliation) pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 9f19f000a4cd..9d02967b7857 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -694,6 +694,7 @@ struct MPI2_IOC_INIT_REQUEST { #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 +#define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 -- cgit v1.2.3-59-g8ed1b From 33203bc4d61b33f1f7bb736eac0c6fdd20b92397 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:12 -0800 Subject: scsi: megaraid_sas: NVME fast path io support This patch provide true fast path IO support. Driver creates PRP for NVME drives and send Fast Path for performance. Certain h/w requirement needs to be taken care in driver. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 10 +- drivers/scsi/megaraid/megaraid_sas_fp.c | 55 +++-- drivers/scsi/megaraid/megaraid_sas_fusion.c | 316 ++++++++++++++++++++++++++-- drivers/scsi/megaraid/megaraid_sas_fusion.h | 14 +- 4 files changed, 350 insertions(+), 45 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a45ff102b49e..075e2e952353 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2172,6 +2172,11 @@ struct megasas_instance { atomic_t fw_outstanding; atomic_t ldio_outstanding; atomic_t fw_reset_no_pci_access; + atomic_t ieee_sgl; + atomic_t prp_sgl; + atomic_t sge_holes_type1; + atomic_t sge_holes_type2; + atomic_t sge_holes_type3; struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; @@ -2443,7 +2448,9 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); __le16 get_updated_dev_handle(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *in_info, + struct MR_DRV_RAID_MAP_ALL *drv_map); void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); int megasas_get_ctrl_info(struct megasas_instance *instance); @@ -2472,4 +2479,5 @@ void megasas_update_sdev_properties(struct scsi_device *sdev); int megasas_reset_fusion(struct Scsi_Host *shost, int reason); int megasas_task_abort_fusion(struct scsi_cmnd *scmd); int megasas_reset_target_fusion(struct scsi_cmnd *scmd); +u32 mega_mod64(u64 dividend, u32 divisor); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 24258afb207f..c3ef82ddcc0f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -155,6 +155,11 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) return map->raidMap.devHndlInfo[pd].curDevHdl; } +static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.devHndlInfo[pd].interfaceType; +} + u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); @@ -929,6 +934,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u8 retval = TRUE; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; u32 logArm, rowMod, armQ, arm; struct fusion_context *fusion; @@ -960,15 +966,18 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ - if (raid->level == 1) { + if (instance->is_ventura && + (raid->level == 1) && + !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); if (r1_alt_pd != MR_PD_INVALID) io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); } } else { - *pDevHandle = cpu_to_le16(MR_PD_INVALID); + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && @@ -977,8 +986,10 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, else if (raid->level == 1) { physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } } } @@ -1025,6 +1036,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u8 retval = TRUE; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; struct fusion_context *fusion; fusion = instance->ctrl_context; @@ -1070,16 +1082,19 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, if (pd != MR_PD_INVALID) { /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ - if (raid->level == 1) { + if (instance->is_ventura && + (raid->level == 1) && + !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); if (r1_alt_pd != MR_PD_INVALID) io_info->r1_alt_dev_handle = - MR_PdDevHandleGet(r1_alt_pd, map); + MR_PdDevHandleGet(r1_alt_pd, map); } } else { /* set dev handle as invalid. */ - *pDevHandle = cpu_to_le16(MR_PD_INVALID); + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && @@ -1089,9 +1104,11 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, /* Get alternate Pd. */ physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } } } @@ -1509,11 +1526,11 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, } u8 megasas_get_best_arm_pd(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) { - struct fusion_context *fusion; struct MR_LD_RAID *raid; - struct MR_DRV_RAID_MAP_ALL *drv_map; u16 pd1_dev_handle; u16 pend0, pend1, ld; u64 diff0, diff1; @@ -1527,9 +1544,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, >> RAID_CTX_SPANARM_SPAN_SHIFT); arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); - - fusion = instance->ctrl_context; - drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); raid = MR_LdRaidGet(ld, drv_map); span_row_size = instance->UnevenSpanSupport ? @@ -1544,7 +1558,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); - if (pd1_dev_handle == MR_PD_INVALID) { + if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { bestArm = arm; } else { /* get the pending cmds for the data and mirror arms */ @@ -1581,19 +1595,18 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, } __le16 get_updated_dev_handle(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) { u8 arm_pd; __le16 devHandle; - struct fusion_context *fusion; - struct MR_DRV_RAID_MAP_ALL *drv_map; - - fusion = instance->ctrl_context; - drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; /* get best new arm (PD ID) */ - arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info); + arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); devHandle = MR_PdDevHandleGet(arm_pd, drv_map); + io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); + return devHandle; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a481854af0cb..379c723fba47 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1481,22 +1481,262 @@ map_cmd_status(struct fusion_context *fusion, } } +/** + * megasas_is_prp_possible - + * Checks if native NVMe PRPs can be built for the IO + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs can be built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_is_prp_possible(struct megasas_instance *instance, + struct scsi_cmnd *scmd, int sge_count) +{ + struct fusion_context *fusion; + int i; + u32 data_length = 0; + struct scatterlist *sg_scmd; + bool build_prp = false; + u32 mr_nvme_pg_size; + + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + fusion = instance->ctrl_context; + data_length = scsi_bufflen(scmd); + sg_scmd = scsi_sglist(scmd); + + /* + * NVMe uses one PRP for each page (or part of a page) + * look at the data length - if 4 pages or less then IEEE is OK + * if > 5 pages then we need to build a native SGL + * if > 4 and <= 5 pages, then check physical address of 1st SG entry + * if this first size in the page is >= the residual beyond 4 pages + * then use IEEE, otherwise use native SGL + */ + + if (data_length > (mr_nvme_pg_size * 5)) { + build_prp = true; + } else if ((data_length > (mr_nvme_pg_size * 4)) && + (data_length <= (mr_nvme_pg_size * 5))) { + /* check if 1st SG entry size is < residual beyond 4 pages */ + if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) + build_prp = true; + } + +/* + * Below code detects gaps/holes in IO data buffers. + * What does holes/gaps mean? + * Any SGE except first one in a SGL starts at non NVME page size + * aligned address OR Any SGE except last one in a SGL ends at + * non NVME page size boundary. + * + * Driver has already informed block layer by setting boundary rules for + * bio merging done at NVME page size boundary calling kernel API + * blk_queue_virt_boundary inside slave_config. + * Still there is possibility of IO coming with holes to driver because of + * IO merging done by IO scheduler. + * + * With SCSI BLK MQ enabled, there will be no IO with holes as there is no + * IO scheduling so no IO merging. + * + * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and + * then sending IOs with holes. + * + * Though driver can request block layer to disable IO merging by calling- + * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but + * user may tune sysfs parameter- nomerges again to 0 or 1. + * + * If in future IO scheduling is enabled with SCSI BLK MQ, + * this algorithm to detect holes will be required in driver + * for SCSI BLK MQ enabled case as well. + * + * + */ + scsi_for_each_sg(scmd, sg_scmd, sge_count, i) { + if ((i != 0) && (i != (sge_count - 1))) { + if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) || + mega_mod64(sg_dma_address(sg_scmd), + mr_nvme_pg_size)) { + build_prp = false; + atomic_inc(&instance->sge_holes_type1); + break; + } + } + + if ((sge_count > 1) && (i == 0)) { + if ((mega_mod64((sg_dma_address(sg_scmd) + + sg_dma_len(sg_scmd)), + mr_nvme_pg_size))) { + build_prp = false; + atomic_inc(&instance->sge_holes_type2); + break; + } + } + + if ((sge_count > 1) && (i == (sge_count - 1))) { + if (mega_mod64(sg_dma_address(sg_scmd), + mr_nvme_pg_size)) { + build_prp = false; + atomic_inc(&instance->sge_holes_type3); + break; + } + } + } + + return build_prp; +} + +/** + * megasas_make_prp_nvme - + * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sgl_ptr: SGL to be filled in + * @cmd: Fusion command frame + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs are built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, + struct megasas_cmd_fusion *cmd, int sge_count) +{ + int sge_len, offset, num_prp_in_chain = 0; + struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; + u64 *ptr_sgl, *ptr_sgl_phys; + u64 sge_addr; + u32 page_mask, page_mask_result; + struct scatterlist *sg_scmd; + u32 first_prp_len; + bool build_prp = false; + int data_len = scsi_bufflen(scmd); + struct fusion_context *fusion; + u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + + fusion = instance->ctrl_context; + + build_prp = megasas_is_prp_possible(instance, scmd, sge_count); + + if (!build_prp) + return false; + + /* + * Nvme has a very convoluted prp format. One prp is required + * for each page or partial page. Driver need to split up OS sg_list + * entries if it is longer than one page or cross a page + * boundary. Driver also have to insert a PRP list pointer entry as + * the last entry in each physical page of the PRP list. + * + * NOTE: The first PRP "entry" is actually placed in the first + * SGL entry in the main message as IEEE 64 format. The 2nd + * entry in the main message is the chain element, and the rest + * of the PRP entries are built in the contiguous pcie buffer. + */ + page_mask = mr_nvme_pg_size - 1; + ptr_sgl = (u64 *)cmd->sg_frame; + ptr_sgl_phys = (u64 *)cmd->sg_frame_phys_addr; + memset(ptr_sgl, 0, instance->max_chain_frame_sz); + + /* Build chain frame element which holds all prps except first*/ + main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) + ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); + + main_chain_element->Address = cpu_to_le64((uintptr_t)ptr_sgl_phys); + main_chain_element->NextChainOffset = 0; + main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | + IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; + + /* Build first prp, sge need not to be page aligned*/ + ptr_first_sgl = sgl_ptr; + sg_scmd = scsi_sglist(scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + + offset = (u32)(sge_addr & page_mask); + first_prp_len = mr_nvme_pg_size - offset; + + ptr_first_sgl->Address = cpu_to_le64(sge_addr); + ptr_first_sgl->Length = cpu_to_le32(first_prp_len); + + data_len -= first_prp_len; + + if (sge_len > first_prp_len) { + sge_addr += first_prp_len; + sge_len -= first_prp_len; + } else if (sge_len == first_prp_len) { + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + for (;;) { + offset = (u32)(sge_addr & page_mask); + + /* Put PRP pointer due to page boundary*/ + page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; + if (unlikely(!page_mask_result)) { + scmd_printk(KERN_NOTICE, + scmd, "page boundary ptr_sgl: 0x%p\n", + ptr_sgl); + ptr_sgl_phys++; + *ptr_sgl = + cpu_to_le64((uintptr_t)ptr_sgl_phys); + ptr_sgl++; + num_prp_in_chain++; + } + + *ptr_sgl = cpu_to_le64(sge_addr); + ptr_sgl++; + ptr_sgl_phys++; + num_prp_in_chain++; + + sge_addr += mr_nvme_pg_size; + sge_len -= mr_nvme_pg_size; + data_len -= mr_nvme_pg_size; + + if (data_len <= 0) + break; + + if (sge_len > 0) + continue; + + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + main_chain_element->Length = + cpu_to_le32(num_prp_in_chain * sizeof(u64)); + + atomic_inc(&instance->prp_sgl); + return build_prp; +} + /** * megasas_make_sgl_fusion - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: cmd we are working on + * @sge_count sge count * - * If successful, this function returns the number of SG elements. */ -static int +static void megasas_make_sgl_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, - struct megasas_cmd_fusion *cmd) + struct megasas_cmd_fusion *cmd, int sge_count) { - int i, sg_processed, sge_count; + int i, sg_processed; struct scatterlist *os_sgl; struct fusion_context *fusion; @@ -1508,13 +1748,6 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sgl_ptr_end->Flags = 0; } - sge_count = scsi_dma_map(scp); - - BUG_ON(sge_count < 0); - - if (sge_count > instance->max_num_sge || !sge_count) - return sge_count; - scsi_for_each_sg(scp, os_sgl, sge_count, i) { sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); @@ -1523,7 +1756,6 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; sgl_ptr++; - sg_processed = i + 1; if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && @@ -1560,6 +1792,45 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, memset(sgl_ptr, 0, instance->max_chain_frame_sz); } } + atomic_inc(&instance->ieee_sgl); +} + +/** + * megasas_make_sgl - Build Scatter Gather List(SGLs) + * @scp: SCSI command pointer + * @instance: Soft instance of controller + * @cmd: Fusion command pointer + * + * This function will build sgls based on device type. + * For nvme drives, there is different way of building sgls in nvme native + * format- PRPs(Physical Region Page). + * + * Returns the number of sg lists actually used, zero if the sg lists + * is NULL, or -ENOMEM if the mapping failed + */ +static +int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd_fusion *cmd) +{ + int sge_count; + bool build_prp = false; + struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; + + sge_count = scsi_dma_map(scp); + + if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) + return sge_count; + + sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && + (cmd->pd_interface == NVME_PD)) + build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, + cmd, sge_count); + + if (!build_prp) + megasas_make_sgl_fusion(instance, scp, sgl_chain64, + cmd, sge_count); return sge_count; } @@ -2084,7 +2355,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.devHandle = get_updated_dev_handle(instance, &fusion->load_balance_info[device_id], - &io_info); + &io_info, local_map_ptr); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; if (instance->is_ventura) @@ -2111,6 +2382,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; + cmd->pd_interface = io_info.pd_interface; /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); } else { @@ -2253,12 +2525,15 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct RAID_CONTEXT *pRAID_Context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + struct MR_PRIV_DEVICE *mr_device_priv_data; struct fusion_context *fusion = instance->ctrl_context; pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; device_id = MEGASAS_DEV_INDEX(scmd); pd_index = MEGASAS_PD_INDEX(scmd); os_timeout_value = scmd->request->timeout / HZ; + mr_device_priv_data = scmd->device->hostdata; + cmd->pd_interface = mr_device_priv_data->interface_type; io_request = cmd->io_request; /* get RAID_Context pointer */ @@ -2352,7 +2627,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { - u16 sge_count; + int sge_count; u8 cmd_type; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; @@ -2398,15 +2673,12 @@ megasas_build_io_fusion(struct megasas_instance *instance, * Construct SGL */ - sge_count = - megasas_make_sgl_fusion(instance, scp, - (struct MPI25_IEEE_SGE_CHAIN64 *) - &io_request->SGL, cmd); + sge_count = megasas_make_sgl(instance, scp, cmd); - if (sge_count > instance->max_num_sge) { - dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds " - "max (0x%x) allowed\n", sge_count, - instance->max_num_sge); + if (sge_count > instance->max_num_sge || (sge_count < 0)) { + dev_err(&instance->pdev->dev, + "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", + __func__, __LINE__, sge_count, instance->max_num_sge); return 1; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 9d02967b7857..c606ca000596 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -702,7 +702,7 @@ struct MPI2_IOC_INIT_REQUEST { struct MR_DEV_HANDLE_INFO { __le16 curDevHdl; u8 validHandles; - u8 reserved; + u8 interfaceType; __le16 devHandle[2]; }; @@ -914,6 +914,7 @@ struct IO_REQUEST_INFO { u16 ldTgtId; u8 isRead; __le16 devHandle; + u8 pd_interface; u64 pdBlock; u8 fpOkForIo; u8 IoforUnevenSpan; @@ -1025,6 +1026,16 @@ struct MR_FW_RAID_MAP_DYNAMIC { #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) #define IEEE_SGE_FLAGS_END_OF_LIST (0x40) +#define MPI2_SGE_FLAGS_SHIFT (0x02) +#define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0) +#define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00) +#define IEEE_SGE_FLAGS_FORMAT_NVME (0x02) + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + struct megasas_register_set; struct megasas_instance; @@ -1061,6 +1072,7 @@ struct megasas_cmd_fusion { u32 index; u8 pd_r1_lb; struct completion done; + u8 pd_interface; u16 r1_alt_dev_handle; /* raid 1/10 only*/ bool cmd_completed; /* raid 1/10 fp writes status holder */ -- cgit v1.2.3-59-g8ed1b From a48ba0eca0456d45e920169930569caa3fc57124 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:13 -0800 Subject: scsi: megaraid_sas: raid 1 write performance for large io Avoid Host side PCI bandwidth bottleneck and hint FW to do Write buffering using RaidFlag MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT. Once IO is landed in FW with MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT, it will do single DMA from host and buffer the Write operation. On back end, FW will DMA same buffer to the Mirror and Data Arm. This will improve large block IO performance which bottleneck due to Host side PCI bandwidth limitation. Consistent ~4000MB T.P for 256K Block size is expected performance numbers. IOPS for small Block size should be on par with Disk performance. (E.g 42 SAS Disk in JBOD mode gives 3700MB T.P. Same Drive used in R1 WT mode, should give ~1800MB T.P) Using this patch 24 R1 VDs (HDD) gives below performance for Sequential Write. Without this patch, we cannot reach above 3200MB (Throughput is in MB.) Block Size 50% 256K and 50% 4K 100% 256K 4K 3100 2030 8K 3140 2740 16K 3140 3140 32K 3400 3240 64K 3500 3700 128K 3870 3870 256K 3920 3920 Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 5 +++++ drivers/scsi/megaraid/megaraid_sas_fusion.c | 32 +++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 075e2e952353..bed8a3784120 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1409,6 +1409,8 @@ struct megasas_ctrl_info { #define SCAN_VD_CHANNEL 0x2 #define MEGASAS_KDUMP_QUEUE_DEPTH 100 +#define MR_LARGE_IO_MIN_SIZE (32 * 1024) +#define MR_R1_LDIO_PIGGYBACK_DEFAULT 4 enum MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, @@ -1875,6 +1877,7 @@ union megasas_frame { struct MR_PRIV_DEVICE { bool is_tm_capable; bool tm_busy; + atomic_t r1_ldio_hint; u8 interface_type; }; struct megasas_cmd; @@ -2235,6 +2238,8 @@ struct megasas_instance { bool is_ventura; bool msix_combined; u16 max_raid_mapsize; + /* preffered count to send as LDIO irrspective of FP capable.*/ + u8 r1_ldio_hint_default; u32 nvme_page_size; }; struct MR_LD_VF_MAP { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 379c723fba47..edbecc531f33 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1383,6 +1383,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) } instance->flag_ieee = 1; + instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; fusion->fast_path_io = 0; fusion->drv_map_pages = get_order(fusion->drv_map_sz); @@ -2110,7 +2111,7 @@ static void megasas_stream_detect(struct megasas_instance *instance, static void megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, struct MR_LD_RAID *raid, bool fp_possible, - u8 is_read) + u8 is_read, u32 scsi_buff_len) { u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; struct RAID_CONTEXT_G35 *rctx_g35; @@ -2161,6 +2162,17 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, } rctx_g35->routing_flags.bits.cpu_sel = cpu_sel; + + /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. + * IO Subtype is not bitmap. + */ + if ((raid->level == 1) && (!is_read)) { + if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) + praid_context->raid_context_g35.raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } } /** @@ -2303,6 +2315,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.isRead && io_info.ra_capable) fp_possible = false; + /* FP for Optimal raid level 1. + * All large RAID-1 writes (> 32 KiB, both WT and WB modes) + * are built by the driver as LD I/Os. + * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os + * (there is never a reason to process these as buffered writes) + * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os + * with the SLD bit asserted. + */ if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { mrdev_priv = scp->device->hostdata; @@ -2310,13 +2330,21 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (instance->host->can_queue)) { fp_possible = false; atomic_dec(&instance->fw_outstanding); + } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || + atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) + atomic_set(&mrdev_priv->r1_ldio_hint, + instance->r1_ldio_hint_default); } } /* If raid is NULL, set CPU affinity to default CPU0 */ if (raid) megasas_set_raidflag_cpu_affinity(praid_context, - raid, fp_possible, io_info.isRead); + raid, fp_possible, io_info.isRead, + scsi_buff_len); else praid_context->raid_context_g35.routing_flags.bits.cpu_sel = MR_RAID_CTX_CPUSEL_0; -- cgit v1.2.3-59-g8ed1b From 31d9a57b419d8ef8fa391009819f940778ce6245 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:14 -0800 Subject: scsi: megaraid_sas: set residual bytes count during IO completion Fixing issue of not setting residual bytes correctly. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index edbecc531f33..46286719c0aa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1445,6 +1445,7 @@ map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status, u32 data_length, u8 *sense) { + int resid; switch (status) { @@ -1467,6 +1468,15 @@ map_cmd_status(struct fusion_context *fusion, SCSI_SENSE_BUFFERSIZE); scmd->result |= DRIVER_SENSE << 24; } + + /* + * If the IO request is partially completed, then MR FW will + * update "io_request->DataLength" field with actual number of + * bytes transferred.Driver will set residual bytes count in + * SCSI command structure. + */ + resid = (scsi_bufflen(scmd) - data_length); + scsi_set_resid(scmd, resid); break; case MFI_STAT_LD_OFFLINE: -- cgit v1.2.3-59-g8ed1b From def0eab3af8651e8951c5cf1b17ece0d26827636 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:15 -0800 Subject: scsi: megaraid_sas: enhance debug logs in OCR context Add additional logging from driver in OCR context. Add debug logs for partial completion of IOs is iodone context. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 3 +++ drivers/scsi/megaraid/megaraid_sas_base.c | 38 ++++++++++++++++++++++++++--- drivers/scsi/megaraid/megaraid_sas_fusion.c | 35 +++++++++++++++++++++++++- 3 files changed, 72 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index bed8a3784120..93da6dc196ad 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1405,6 +1405,9 @@ struct megasas_ctrl_info { #define VD_EXT_DEBUG 0 +/* Driver's internal Logging levels*/ +#define OCR_LOGS (1 << 0) + #define SCAN_PD_CHANNEL 0x1 #define SCAN_VD_CHANNEL 0x2 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 51b35a619d99..b41bbea3667f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2741,6 +2741,24 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) return BLK_EH_RESET_TIMER; } +/** + * megasas_dump_frame - This function will dump MPT/MFI frame + */ +static inline void +megasas_dump_frame(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + printk(KERN_INFO "IO request frame:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + printk("\n\t"); + printk("%08x ", le32_to_cpu(mfp[i])); + } + printk("\n"); +} + /** * megasas_reset_bus_host - Bus & host reset handler entry point */ @@ -2751,12 +2769,26 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; + scmd_printk(KERN_INFO, scmd, + "Controller reset is requested due to IO timeout\n" + "SCSI command pointer: (%p)\t SCSI host state: %d\t" + " SCSI host busy: %d\t FW outstanding: %d\n", + scmd, scmd->device->host->shost_state, + atomic_read((atomic_t *)&scmd->device->host->host_busy), + atomic_read(&instance->fw_outstanding)); + /* * First wait for all commands to complete */ - if (instance->ctrl_context) - ret = megasas_reset_fusion(scmd->device->host, 1); - else + if (instance->ctrl_context) { + struct megasas_cmd_fusion *cmd; + cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; + if (cmd) + megasas_dump_frame(cmd->io_request, + sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + ret = megasas_reset_fusion(scmd->device->host, + SCSIIO_TIMEOUT_OCR); + } else ret = megasas_generic_reset(scmd); return ret; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 46286719c0aa..1252a3c08e1a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1445,8 +1445,10 @@ map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status, u32 data_length, u8 *sense) { + u8 cmd_type; int resid; + cmd_type = megasas_cmd_type(scmd); switch (status) { case MFI_STAT_OK: @@ -1477,6 +1479,13 @@ map_cmd_status(struct fusion_context *fusion, */ resid = (scsi_bufflen(scmd) - data_length); scsi_set_resid(scmd, resid); + + if (resid && + ((cmd_type == READ_WRITE_LDIO) || + (cmd_type == READ_WRITE_SYSPDIO))) + scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" + " requested/completed 0x%x/0x%x\n", + status, scsi_bufflen(scmd), data_length); break; case MFI_STAT_LD_OFFLINE: @@ -3477,6 +3486,14 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, " will reset adapter scsi%d.\n", instance->host->host_no); megasas_complete_cmd_dpc_fusion((unsigned long)instance); + if (instance->requestorId && reason) { + dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" + " state while polling during" + " I/O timeout handling for %d\n", + instance->host->host_no); + *convert = 1; + } + retval = 1; goto out; } @@ -3496,7 +3513,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, } /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ - if (instance->requestorId && reason) { + if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = @@ -3912,6 +3929,9 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; fusion = instance->ctrl_context; + scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd); + scsi_print_command(scmd); + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); @@ -3992,6 +4012,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; fusion = instance->ctrl_context; + sdev_printk(KERN_INFO, scmd->device, + "target reset called for scmd(%p)\n", scmd); + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); @@ -4151,6 +4174,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) if (convert) reason = 0; + if (megasas_dbg_lvl & OCR_LOGS) + dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); + /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; @@ -4161,6 +4187,13 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) } scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { + if (megasas_dbg_lvl & OCR_LOGS) { + sdev_printk(KERN_INFO, + cmd_fusion->scmd->device, "SMID: 0x%x\n", + cmd_fusion->index); + scsi_print_command(cmd_fusion->scmd); + } + scmd_local->result = megasas_check_mpio_paths(instance, scmd_local); -- cgit v1.2.3-59-g8ed1b From b4a42213a7eb8ec8556f27e6750bbc5c9193e86e Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:16 -0800 Subject: scsi: megaraid_sas: add print in device removal path Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b41bbea3667f..c8fa480f9686 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -7383,6 +7383,13 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); +static inline void megasas_remove_scsi_device(struct scsi_device *sdev) +{ + sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); + scsi_remove_device(sdev); + scsi_device_put(sdev); +} + static void megasas_aen_polling(struct work_struct *work) { @@ -7487,10 +7494,8 @@ megasas_aen_polling(struct work_struct *work) else scsi_device_put(sdev1); } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + if (sdev1) + megasas_remove_scsi_device(sdev1); } } } @@ -7507,10 +7512,8 @@ megasas_aen_polling(struct work_struct *work) else scsi_device_put(sdev1); } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + if (sdev1) + megasas_remove_scsi_device(sdev1); } } } -- cgit v1.2.3-59-g8ed1b From 5fc499b612c5401a7ae0674086befcdf8b148516 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:17 -0800 Subject: scsi: megaraid_sas: reduce size of fusion_context and use vmalloc if kmalloc fails Currently fusion context has fixed array load_balance_info. Use dynamic allocation. In few places, driver do not want physically contigious memory. Attempt to use vmalloc if physical contiguous memory is not available. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 + drivers/scsi/megaraid/megaraid_sas_base.c | 15 ++---- drivers/scsi/megaraid/megaraid_sas_fp.c | 3 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 71 +++++++++++++++++++++++++++-- drivers/scsi/megaraid/megaraid_sas_fusion.h | 3 +- 5 files changed, 76 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 93da6dc196ad..0a20fff04192 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2488,4 +2488,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason); int megasas_task_abort_fusion(struct scsi_cmnd *scmd); int megasas_reset_target_fusion(struct scsi_cmnd *scmd); u32 mega_mod64(u64 dividend, u32 divisor); +int megasas_alloc_fusion_context(struct megasas_instance *instance); +void megasas_free_fusion_context(struct megasas_instance *instance); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c8fa480f9686..b2da25764261 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6029,18 +6029,12 @@ static int megasas_probe_one(struct pci_dev *pdev, case PCI_DEVICE_ID_LSI_CUTLASS_52: case PCI_DEVICE_ID_LSI_CUTLASS_53: { - instance->ctrl_context_pages = - get_order(sizeof(struct fusion_context)); - instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, - instance->ctrl_context_pages); - if (!instance->ctrl_context) { - dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " - "memory for Fusion context info\n"); + if (megasas_alloc_fusion_context(instance)) { + megasas_free_fusion_context(instance); goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; - memset(fusion, 0, - ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) fusion->adapter_type = THUNDERBOLT_SERIES; @@ -6683,8 +6677,7 @@ skip_firing_dcmds: fusion->pd_seq_sync[i], fusion->pd_seq_phys[i]); } - free_pages((ulong)instance->ctrl_context, - instance->ctrl_context_pages); + megasas_free_fusion_context(instance); } else { megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(u32), diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index c3ef82ddcc0f..d9b0f28cce8a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -489,7 +489,8 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) if (instance->UnevenSpanSupport) mr_update_span_set(drv_map, ldSpanInfo); - mr_update_load_balance_params(drv_map, lbInfo); + if (lbInfo) + mr_update_load_balance_params(drv_map, lbInfo); num_lds = le16_to_cpu(drv_map->raidMap.ldCount); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 1252a3c08e1a..9019b82b467f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -2397,8 +2398,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } - if ((fusion->load_balance_info[device_id].loadBalanceFlag) && - (io_info.isRead)) { + if (fusion->load_balance_info && + (fusion->load_balance_info[device_id].loadBalanceFlag) && + (io_info.isRead)) { io_info.devHandle = get_updated_dev_handle(instance, &fusion->load_balance_info[device_id], @@ -4270,9 +4272,10 @@ transition_to_ready: retval = FAILED; } /* Reset load balance info */ - memset(fusion->load_balance_info, 0, - sizeof(struct LD_LOAD_BALANCE_INFO) - *MAX_LOGICAL_DRIVES_EXT); + if (fusion->load_balance_info) + memset(fusion->load_balance_info, 0, + (sizeof(struct LD_LOAD_BALANCE_INFO) * + MAX_LOGICAL_DRIVES_EXT)); if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); @@ -4426,6 +4429,64 @@ void megasas_fusion_ocr_wq(struct work_struct *work) megasas_reset_fusion(instance->host, 0); } +/* Allocate fusion context */ +int +megasas_alloc_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + instance->ctrl_context_pages = get_order(sizeof(struct fusion_context)); + instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + instance->ctrl_context_pages); + if (!instance->ctrl_context) { + /* fall back to using vmalloc for fusion_context */ + instance->ctrl_context = vzalloc(sizeof(struct fusion_context)); + if (!instance->ctrl_context) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + fusion = instance->ctrl_context; + + fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * + sizeof(struct LD_LOAD_BALANCE_INFO)); + fusion->load_balance_info = + (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + fusion->load_balance_info_pages); + if (!fusion->load_balance_info) { + fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT * + sizeof(struct LD_LOAD_BALANCE_INFO)); + if (!fusion->load_balance_info) + dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " + "continuing without Load Balance support\n"); + } + + return 0; +} + +void +megasas_free_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion = instance->ctrl_context; + + if (fusion) { + if (fusion->load_balance_info) { + if (is_vmalloc_addr(fusion->load_balance_info)) + vfree(fusion->load_balance_info); + else + free_pages((ulong)fusion->load_balance_info, + fusion->load_balance_info_pages); + } + + if (is_vmalloc_addr(fusion)) + vfree(fusion); + else + free_pages((ulong)fusion, + instance->ctrl_context_pages); + } +} + struct megasas_instance_template megasas_instance_template_fusion = { .enable_intr = megasas_enable_intr_fusion, .disable_intr = megasas_disable_intr_fusion, diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index c606ca000596..f25a26252790 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -1287,7 +1287,8 @@ struct fusion_context { struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT]; dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT]; u8 fast_path_io; - struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; + struct LD_LOAD_BALANCE_INFO *load_balance_info; + u32 load_balance_info_pages; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; u8 adapter_type; struct LD_STREAM_DETECT **stream_detect_by_ld; -- cgit v1.2.3-59-g8ed1b From a09454ce5dd11184c5040ed536d323e2a302a579 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:18 -0800 Subject: scsi: megaraid_sas: In validate raid map, raid capability is not converted to cpu format for all lds On a host, if an ld is deleted there is a hole in the ld array returned by the FW. But in MR_ValidateMapInfo we are not accounting for holes in the ld array and traverse only upto index num_lds. This patch takes care of converting the capability field of all the valid lds in the ld raid map. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fp.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index d9b0f28cce8a..a0b0e68158ea 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -452,7 +452,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) struct LD_LOAD_BALANCE_INFO *lbInfo; PLD_SPAN_INFO ldSpanInfo; struct MR_LD_RAID *raid; - u16 ldCount, num_lds; + u16 num_lds, i; u16 ld; u32 expected_size; @@ -495,10 +495,17 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) num_lds = le16_to_cpu(drv_map->raidMap.ldCount); /*Convert Raid capability values to CPU arch */ - for (ldCount = 0; ldCount < num_lds; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, drv_map); + for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { + ld = MR_TargetIdToLdGet(i, drv_map); + + /* For non existing VDs, iterate to next VD*/ + if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) + continue; + raid = MR_LdRaidGet(ld, drv_map); le32_to_cpus((u32 *)&raid->capability); + + num_lds--; } return 1; -- cgit v1.2.3-59-g8ed1b From d2d0358bcd09139a8e71afbca35bcd6b219dd1bf Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:19 -0800 Subject: scsi: megaraid_sas: MR_TargetIdToLdGet u8 to u16 and avoid invalid raid-map access Change MR_TargetIdToLdGet return type from u8 to u16. ld id range check is added at two places in this patch - @megasas_build_ldio_fusion and @megasas_build_ld_nonrw_fusion. Previous driver code used different data type for lds TargetId returned from MR_TargetIdToLdGet. Prior to this change, above two functions was safeguarded due to function always return u8 and maximum value of ld id returned was 255. In below check, fw_supported_vd_count as of today is 64 or 256 and valid range to support is either 0-63 or 0-255. Ideally want to filter accessing raid map for ld ids which are not valid. With the u16 change, invalid ld id value is 0xFFFF and we will see kernel panic due to random memory access in MR_LdRaidGet. The changes will ensure we do not call MR_LdRaidGet if ld id is beyond size of ldSpanMap array. if (ld < instance->fw_supported_vd_count) >From firmware perspective,ld id 0xFF is invalid and even though current driver code forward such command, firmware fails with target not available. ld target id issue occurs mainly whenever driver loops to populate raid map (ea. MR_ValidateMapInfo). These are the only two places where we may see out of range target ids and wants to protect raid map access based on range provided by Firmware API. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 +- drivers/scsi/megaraid/megaraid_sas_fp.c | 5 +++-- drivers/scsi/megaraid/megaraid_sas_fusion.c | 25 ++++++++++++++----------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0a20fff04192..efc01a374a6e 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2448,7 +2448,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index a0b0e68158ea..9d5d4851c0c4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -165,7 +165,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } @@ -1151,7 +1151,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, { struct fusion_context *fusion; struct MR_LD_RAID *raid; - u32 ld, stripSize, stripe_mask; + u32 stripSize, stripe_mask; u64 endLba, endStrip, endRow, start_row, start_strip; u64 regStart; u32 regSize; @@ -1163,6 +1163,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, u8 retval = 0; u8 startlba_span = SPAN_INVALID; u64 *pdBlock = &io_info->pdBlock; + u16 ld; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 9019b82b467f..4aaf30769c94 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1121,7 +1121,8 @@ megasas_sync_map_info(struct megasas_instance *instance) int i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - u32 size_sync_info, num_lds; + u16 num_lds; + u32 size_sync_info; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_DRV_RAID_MAP_ALL *map; @@ -1870,7 +1871,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) { struct MR_LD_RAID *raid; - u32 ld; + u16 ld; u64 start_blk = io_info->pdBlock; u8 *cdb = io_request->CDB.CDB32; u32 num_blocks = io_info->numBlocks; @@ -2303,10 +2304,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - raid = MR_LdRaidGet(ld, local_map_ptr); - if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= - instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { + if (ld < instance->fw_supported_vd_count) + raid = MR_LdRaidGet(ld, local_map_ptr); + + if (!raid || (!fusion->fast_path_io)) { io_request->RaidContext.raid_context.reg_lock_flags = 0; fp_possible = false; } else { @@ -2478,12 +2480,12 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; - u16 pd_index = 0; + u16 pd_index = 0, ld; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; __le16 devHandle; - u32 ld, arRef, pd; + u32 arRef, pd; struct MR_LD_RAID *raid; struct RAID_CONTEXT *pRAID_Context; u8 fp_possible = 1; @@ -2506,10 +2508,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld >= instance->fw_supported_vd_count) fp_possible = 0; - - raid = MR_LdRaidGet(ld, local_map_ptr); - if (!(raid->capability.fpNonRWCapable)) - fp_possible = 0; + else { + raid = MR_LdRaidGet(ld, local_map_ptr); + if (!(raid->capability.fpNonRWCapable)) + fp_possible = 0; + } } else fp_possible = 0; -- cgit v1.2.3-59-g8ed1b From ff96f9251768f3fe1b4cd6f48f4021b3a1be269b Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:20 -0800 Subject: scsi: megaraid_sas: Big endian RDPQ mode fix Fix if RDPQ mode enabled MR FW is deployed on big endian host machine, driver does not setup reply address correctly. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 4aaf30769c94..b26ee85ae11c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -580,7 +580,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) } fusion->rdpq_virt[i].RDPQBaseAddress = - fusion->reply_frames_desc_phys[i]; + cpu_to_le64(fusion->reply_frames_desc_phys[i]); reply_desc = fusion->reply_frames_desc[i]; for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) -- cgit v1.2.3-59-g8ed1b From a174118b7a97c52c3c3a4f1b8eee594502a55381 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:21 -0800 Subject: scsi: megaraid_sas: big endian support changes Fix endiannes fixes for Ventura specific. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fp.c | 15 ++-- drivers/scsi/megaraid/megaraid_sas_fusion.c | 70 ++++++++++------- drivers/scsi/megaraid/megaraid_sas_fusion.h | 115 +++++++++++++++++----------- 3 files changed, 122 insertions(+), 78 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 9d5d4851c0c4..68582d9f9e45 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -210,7 +210,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) le32_to_cpu(fw_map_dyn->desc_table_size), le32_to_cpu(fw_map_dyn->desc_table_num_elements)); dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n", - drv_map, fw_map_dyn->ld_count); + drv_map, le16_to_cpu(fw_map_dyn->ld_count)); #endif desc_table = (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); @@ -222,7 +222,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fp_pd_io_timeout_sec; - pDrvRaidMap->totalSize = sizeof(struct MR_DRV_RAID_MAP_ALL); + pDrvRaidMap->totalSize = + cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); /* point to actual data starting point*/ raid_map_data = (void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset) + @@ -234,11 +235,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) dev_dbg(&instance->pdev->dev, "desc table %p\n", desc_table); dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n", - desc_table->raid_map_desc_type, - desc_table->raid_map_desc_offset); + le32_to_cpu(desc_table->raid_map_desc_type), + le32_to_cpu(desc_table->raid_map_desc_offset)); dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n", - desc_table->raid_map_desc_elements, - desc_table->raid_map_desc_buffer_size); + le32_to_cpu(desc_table->raid_map_desc_elements), + le32_to_cpu(desc_table->raid_map_desc_buffer_size)); #endif switch (le32_to_cpu(desc_table->raid_map_desc_type)) { case RAID_MAP_DESC_TYPE_DEVHDL_INFO: @@ -263,7 +264,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) #endif for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { pDrvRaidMap->ldTgtIdToLd[j] = - fw_map_dyn->ld_tgt_id_to_ld[j]; + le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); #if VD_EXT_DEBUG dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index b26ee85ae11c..baea4c2137d7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2080,7 +2080,7 @@ static void megasas_stream_detect(struct megasas_instance *instance, */ continue; - cmd->io_request->RaidContext.raid_context_g35.stream_detected = true; + SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; /* @@ -2154,7 +2154,8 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, /* Fast path cache by pass capable R0/R1 VD */ if ((raid->level <= 1) && (raid->capability.fp_cache_bypass_capable)) { - rctx_g35->routing_flags.bits.sld = 1; + rctx_g35->routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); rctx_g35->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); @@ -2174,7 +2175,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, else if (raid->cpuAffinity.ldWrite.cpu1) cpu_sel = MR_RAID_CTX_CPUSEL_1; - if (rctx_g35->stream_detected && + if (is_stream_detected(rctx_g35) && (raid->level == 5) && (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) @@ -2182,7 +2183,8 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, } } - rctx_g35->routing_flags.bits.cpu_sel = cpu_sel; + rctx_g35->routing_flags |= + (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. @@ -2333,7 +2335,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, /* In ventura if stream detected for a read and it is read ahead * capable make this IO as LDIO */ - if (praid_context->raid_context_g35.stream_detected && + if (is_stream_detected(&io_request->RaidContext.raid_context_g35) && io_info.isRead && io_info.ra_capable) fp_possible = false; @@ -2368,8 +2370,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, raid, fp_possible, io_info.isRead, scsi_buff_len); else - praid_context->raid_context_g35.routing_flags.bits.cpu_sel = - MR_RAID_CTX_CPUSEL_0; + praid_context->raid_context_g35.routing_flags |= + (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); } if (fp_possible) { @@ -2393,12 +2395,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } else if (instance->is_ventura) { - io_request->RaidContext.raid_context_g35.type - = MPI2_TYPE_CUDA; - io_request->RaidContext.raid_context_g35.nseg = 0x1; - io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); io_request->IoFlags |= - cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } if (fusion->load_balance_info && (fusion->load_balance_info[device_id].loadBalanceFlag) && @@ -2456,10 +2460,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.raid_context.nseg = 0x1; } else if (instance->is_ventura) { - io_request->RaidContext.raid_context_g35.type - = MPI2_TYPE_CUDA; - io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; - io_request->RaidContext.raid_context_g35.nseg = 0x1; + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); @@ -2609,17 +2615,23 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->virtual_disk_tgt_id = pd_sync->seq[pd_index].pd_target_id; else - pRAID_Context->virtual_disk_tgt_id = - cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); + pRAID_Context->virtual_disk_tgt_id = + cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; io_request->DevHandle = pd_sync->seq[pd_index].devHandle; - if (instance->is_ventura) - io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1; - else - pRAID_Context->reg_lock_flags |= - (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); - pRAID_Context->type = MPI2_TYPE_CUDA; - pRAID_Context->nseg = 0x1; + if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + } else { + pRAID_Context->type = MPI2_TYPE_CUDA; + pRAID_Context->nseg = 0x1; + pRAID_Context->reg_lock_flags |= + (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); + } } else if (fusion->fast_path_io) { pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); pRAID_Context->config_seq_num = 0; @@ -2734,9 +2746,11 @@ megasas_build_io_fusion(struct megasas_instance *instance, return 1; } - if (instance->is_ventura) - io_request->RaidContext.raid_context_g35.num_sge = sge_count; - else { + if (instance->is_ventura) { + set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); + } else { /* numSGE store lower 8 bit of sge_count. * numSGEExt store higher 8 bit of sge_count */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index f25a26252790..1441c35c58a3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -148,44 +148,13 @@ struct RAID_CONTEXT { * starts in MPT IO Frames */ struct RAID_CONTEXT_G35 { -#if defined(__BIG_ENDIAN_BITFIELD) - u16 resvd0:8; - u16 nseg:4; - u16 type:4; -#else - u16 type:4; /* 0x00 */ - u16 nseg:4; /* 0x00 */ - u16 resvd0:8; -#endif + #define RAID_CONTEXT_NSEG_MASK 0x00F0 + #define RAID_CONTEXT_NSEG_SHIFT 4 + #define RAID_CONTEXT_TYPE_MASK 0x000F + #define RAID_CONTEXT_TYPE_SHIFT 0 + u16 nseg_type; u16 timeout_value; /* 0x02 -0x03 */ - union { - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u16 set_divert:4; - u16 cpu_sel:4; - u16 log:1; - u16 rw:1; - u16 sbs:1; - u16 sqn:1; - u16 fwn:1; - u16 c2f:1; - u16 sld:1; - u16 reserved:1; -#else - u16 reserved:1; - u16 sld:1; - u16 c2f:1; - u16 fwn:1; - u16 sqn:1; - u16 sbs:1; - u16 rw:1; - u16 log:1; - u16 cpu_sel:4; - u16 set_divert:4; -#endif - } bits; - u16 s; - } routing_flags; /* 0x04 -0x05 routing flags */ + u16 routing_flags; // 0x04 -0x05 routing flags u16 virtual_disk_tgt_id; /* 0x06 -0x07 */ u64 reg_lock_row_lba; /* 0x08 - 0x0F */ u32 reg_lock_length; /* 0x10 - 0x13 */ @@ -200,18 +169,78 @@ struct RAID_CONTEXT_G35 { */ u8 span_arm; /* 0x1C span[7:5], arm[4:0] */ u16 config_seq_num; /* 0x1A -0x1B */ + union { + /* + * Bit format: + * --------------------------------- + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * --------------------------------- + * Byte0 | numSGE[7]- numSGE[0] | + * --------------------------------- + * Byte1 |SD | resvd | numSGE 8-11 | + * -------------------------------- + */ + #define NUM_SGE_MASK_LOWER 0xFF + #define NUM_SGE_MASK_UPPER 0x0F + #define NUM_SGE_SHIFT_UPPER 8 + #define STREAM_DETECT_SHIFT 7 + #define STREAM_DETECT_MASK 0x80 + struct { #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */ - u16 stream_detected:1; - u16 reserved:3; - u16 num_sge:12; + u16 stream_detected:1; + u16 reserved:3; + u16 num_sge:12; #else - u16 num_sge:12; - u16 reserved:3; - u16 stream_detected:1; + u16 num_sge:12; + u16 reserved:3; + u16 stream_detected:1; #endif + } bits; + u8 bytes[2]; + } u; u8 resvd2[2]; /* 0x1E-0x1F */ }; +#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1 +#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2 +#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3 +#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4 +#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5 +#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6 +#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000 + +static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35, + u16 sge_count) +{ + rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER); + rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER) + & NUM_SGE_MASK_UPPER); +} + +static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35) +{ + u16 sge_count; + + sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER) + << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0])); + return sge_count; +} + +#define SET_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK) + +#define CLEAR_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK)) + +static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35) +{ + return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK)); +} + union RAID_CONTEXT_UNION { struct RAID_CONTEXT raid_context; struct RAID_CONTEXT_G35 raid_context_g35; -- cgit v1.2.3-59-g8ed1b From 318aaef88353c09a73d26d3b87a74fab67ff9282 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:22 -0800 Subject: scsi: megaraid_sas: avoid unaligned access in ioctl path Fix kernel warning for accessing unaligned memory access in driver. Signed-off-by: Shivasharan S Signed-off-by: Kashyap Desai Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b2da25764261..5d7aa05c0a9c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -7007,7 +7008,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); - if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), + if (copy_to_user((void __user *)((unsigned long) + get_unaligned((unsigned long *)sense_ptr)), sense, ioc->sense_len)) { dev_err(&instance->pdev->dev, "Failed to copy out to user " "sense data\n"); -- cgit v1.2.3-59-g8ed1b From 95c060869e6872ea03a4a9d15236adcffb1d8b07 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:23 -0800 Subject: scsi: megaraid_sas: latest controller OCR capability from FW before sending shutdown DCMD Fetch the latest controller OCR capability from FW before sending MR_DCMD_CTRL_SHUTDOWN When application sends a shutdown DCMD (MR_DCMD_CTRL_SHUTDOWN), driver will fetch latest controller information from firmware. This is to ensure that driver always has latest OCR capability of controller before sending the DCMD. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5d7aa05c0a9c..cd9d223d4370 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6900,6 +6900,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) { + if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { + megasas_return_cmd(instance, cmd); + return -1; + } + } + if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { error = megasas_set_crash_dump_params_ioctl(cmd); megasas_return_cmd(instance, cmd); -- cgit v1.2.3-59-g8ed1b From b41c0a4aa7c0fc1f98648c020358598498d48f06 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:24 -0800 Subject: scsi: megaraid_sas: set pd_after_lb from MR_BuildRaidContext and initialize pDevHandle to MR_DEVHANDLE_INVALID Issue is limited for Syncro firmware where pd_after_lb is not set but is accidentally used. Not a functional issue, but results in low performance due to improper load balancing between two LUNs. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 68582d9f9e45..a5517e72ad2f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -948,6 +948,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, struct fusion_context *fusion; fusion = instance->ctrl_context; + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); /*Get row and span from io_info for Uneven Span IO.*/ row = io_info->start_row; @@ -986,7 +987,6 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, MR_PdDevHandleGet(r1_alt_pd, map); } } else { - *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && @@ -1013,6 +1013,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->span_arm; } + io_info->pd_after_lb = pd; return retval; } @@ -1049,7 +1050,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, struct fusion_context *fusion; fusion = instance->ctrl_context; - + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); row = mega_div64_32(stripRow, raid->rowDataSize); @@ -1102,8 +1103,6 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, MR_PdDevHandleGet(r1_alt_pd, map); } } else { - /* set dev handle as invalid. */ - *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && @@ -1132,6 +1131,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->span_arm; } + io_info->pd_after_lb = pd; return retval; } -- cgit v1.2.3-59-g8ed1b From 50b7f5a2d0126b893a4591336b1cdb4028a726b0 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:25 -0800 Subject: scsi: megaraid_sas: Change max_cmd from u32 to u16 in all functions Since maximum supported FW commands are all defined as u16, change all local variables referring to max_cmd from u32 to u16. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 10 +++++----- drivers/scsi/megaraid/megaraid_sas_fusion.c | 5 +++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index cd9d223d4370..06001b4f9cad 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1549,7 +1549,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; - u32 max_cmd = instance->max_fw_cmds; + u16 max_cmd = instance->max_fw_cmds; dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); @@ -3467,7 +3467,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; - u32 max_cmd = instance->max_fw_cmds; + u16 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; @@ -3843,7 +3843,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; - u32 max_cmd = instance->max_mfi_cmds; + u16 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) @@ -3887,7 +3887,7 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance) static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; - u32 max_cmd; + u16 max_cmd; u32 sge_sz; u32 total_sz; u32 frame_count; @@ -4021,7 +4021,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; - u32 max_cmd; + u16 max_cmd; struct megasas_cmd *cmd; struct fusion_context *fusion; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index baea4c2137d7..2d6d979c6ca8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -367,7 +367,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) { int i; - u32 max_cmd; + u16 max_cmd; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; @@ -1274,7 +1274,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; struct fusion_context *fusion; - u32 max_cmd, scratch_pad_2; + u16 max_cmd; + u32 scratch_pad_2; int i = 0, count; fusion = instance->ctrl_context; -- cgit v1.2.3-59-g8ed1b From 013aec66afaa51581d4b41887fe418b94c54bc51 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:26 -0800 Subject: scsi: megaraid_sas: update can_queue only if the new value is less Minor Optimization: No need to update HBA can_queue value if the current max FW commands is equal to earlier value. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 2d6d979c6ca8..94b7a68d12cd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -259,7 +259,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c if (fw_boot_context == OCR_CONTEXT) { cur_max_fw_cmds = cur_max_fw_cmds - 1; - if (cur_max_fw_cmds <= instance->max_fw_cmds) { + if (cur_max_fw_cmds < instance->max_fw_cmds) { instance->cur_can_queue = cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); -- cgit v1.2.3-59-g8ed1b From cf4e55e7be0a4c009908803ff4c3140c9822c033 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:27 -0800 Subject: scsi: megaraid_sas: max_fw_cmds are decremented twice, remove duplicate Fix to account for the reply_q_sz not exceeding the maximum commands that the firmware can support, instance->max_fw_cmds is already decremented in megasas_fusion_update_can_queue(). Remove the extra decrement logic in code. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 94b7a68d12cd..74cefaea18b2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1284,13 +1284,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); - /* - * Reduce the max supported cmds by 1. This is to ensure that the - * reply_q_sz (1 more than the max cmd that driver may send) - * does not exceed max cmds that the FW can support - */ - instance->max_fw_cmds = instance->max_fw_cmds-1; - /* * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames */ -- cgit v1.2.3-59-g8ed1b From 21c34006dce99949a83e9c46c543af28651db5db Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:28 -0800 Subject: scsi: megaraid_sas: megasas_return_cmd does not memset IO frame to zero Memset the IO frame to zero after release. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index efc01a374a6e..508516d412c9 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2238,6 +2238,7 @@ struct megasas_instance { u8 is_rdpq; bool dev_handle; bool fw_sync_cache_support; + u32 mfi_frame_size; bool is_ventura; bool msix_combined; u16 max_raid_mapsize; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 06001b4f9cad..30e390c14208 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -268,6 +268,8 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) cmd->scmd = NULL; cmd->frame_count = 0; cmd->flags = 0; + memset(cmd->frame, 0, instance->mfi_frame_size); + cmd->frame->io.context = cpu_to_le32(cmd->index); if (!fusion && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add(&cmd->list, (&instance->cmd_pool)->next); @@ -3889,7 +3891,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) int i; u16 max_cmd; u32 sge_sz; - u32 total_sz; u32 frame_count; struct megasas_cmd *cmd; @@ -3917,12 +3918,13 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) * Total 192 byte (3 MFI frame of 64 byte) */ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); - total_sz = MEGAMFI_FRAME_SIZE * frame_count; + instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", - instance->pdev, total_sz, 256, 0); + instance->pdev, instance->mfi_frame_size, + 256, 0); if (!instance->frame_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); @@ -3966,7 +3968,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) return -ENOMEM; } - memset(cmd->frame, 0, total_sz); + memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if (!instance->ctrl_context && reset_devices) -- cgit v1.2.3-59-g8ed1b From 25fb13dd2d6e9897c391315254d1b2f7c74e0482 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:29 -0800 Subject: scsi: megaraid_sas: Remove unused pd_index from megasas_build_ld_nonrw_fusion Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 74cefaea18b2..a9b66ceab796 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2480,7 +2480,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; - u16 pd_index = 0, ld; + u16 ld; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; @@ -2492,7 +2492,6 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(scmd); - pd_index = MEGASAS_PD_INDEX(scmd); local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); /* get RAID_Context pointer */ -- cgit v1.2.3-59-g8ed1b From 1d6dbd1752fb4347a4a5db06c8f5cd35dd1919f4 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:30 -0800 Subject: scsi: megaraid_sas: Do not set fp_possible if TM capable for non-RW syspdIO, change fp_possible to bool FIX - firmware wants non-RW SYS PD IOs to avoid FastPath for better tracking and other functionalities if the device is task management capable. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a9b66ceab796..ba102e4139cd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2566,7 +2566,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, */ static void megasas_build_syspd_fusion(struct megasas_instance *instance, - struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, + bool fp_possible) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; @@ -2687,6 +2688,8 @@ megasas_build_io_fusion(struct megasas_instance *instance, int sge_count; u8 cmd_type; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scp->device->hostdata; /* Zero out some fields so they don't get reused */ memset(io_request->LUN, 0x0, 8); @@ -2715,12 +2718,14 @@ megasas_build_io_fusion(struct megasas_instance *instance, megasas_build_ld_nonrw_fusion(instance, scp, cmd); break; case READ_WRITE_SYSPDIO: + megasas_build_syspd_fusion(instance, scp, cmd, true); + break; case NON_READ_WRITE_SYSPDIO: - if (instance->secure_jbod_support && - (cmd_type == NON_READ_WRITE_SYSPDIO)) - megasas_build_syspd_fusion(instance, scp, cmd, 0); + if (instance->secure_jbod_support || + mr_device_priv_data->is_tm_capable) + megasas_build_syspd_fusion(instance, scp, cmd, false); else - megasas_build_syspd_fusion(instance, scp, cmd, 1); + megasas_build_syspd_fusion(instance, scp, cmd, true); break; default: break; -- cgit v1.2.3-59-g8ed1b From a6821ca39e0cac24b4c366b28b07c1a37ce8915d Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:31 -0800 Subject: scsi: megaraid_sas: During OCR, if get_ctrl_info fails do not continue with OCR Error handling: If controller reset is not able to recover, kill HBA and quit immediately. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ba102e4139cd..c0a58dcabc14 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -4285,6 +4285,7 @@ transition_to_ready: __func__, __LINE__); megaraid_sas_kill_hba(instance); retval = FAILED; + goto out; } /* Reset load balance info */ if (fusion->load_balance_info) -- cgit v1.2.3-59-g8ed1b From 7a7ae4f19280a4da2dfbdca3a414b0fd5fef2cb4 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:32 -0800 Subject: scsi: megaraid_sas: Change build_mpt_mfi_pass_thru to return void Code refactoring to build_mpt_mfi_pass_thru to return void. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c0a58dcabc14..bafaf0376d51 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3229,7 +3229,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) * mfi_cmd: megasas_cmd pointer * */ -u8 +void build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd *mfi_cmd) { @@ -3279,8 +3279,6 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); - - return 0; } /** @@ -3295,11 +3293,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; u16 index; - if (build_mpt_mfi_pass_thru(instance, cmd)) { - dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n"); - return NULL; - } - + build_mpt_mfi_pass_thru(instance, cmd); index = cmd->context.smid; req_desc = megasas_get_request_descriptor(instance, index - 1); -- cgit v1.2.3-59-g8ed1b From 72bff2d1d0c9cb8923a2ffdeaafa40deaed0f671 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:33 -0800 Subject: scsi: megaraid_sas: Bail out the driver load if ld_list_query fails Error handling: Bail out the driver load if key FW cmds (LD_LIST) are not return successful. Clean up error handling in megasas_init_fw. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 30e390c14208..0e7121d757c2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5352,7 +5352,7 @@ static int megasas_init_fw(struct megasas_instance *instance) (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); if (megasas_get_pd_list(instance) < 0) { dev_err(&instance->pdev->dev, "failed to get PD list\n"); - goto fail_get_pd_list; + goto fail_get_ld_pd_list; } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); @@ -5388,7 +5388,7 @@ static int megasas_init_fw(struct megasas_instance *instance) if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - megasas_get_ld_list(instance); + goto fail_get_ld_pd_list; /* * Compute the max allowed sectors per IO: The controller info has two @@ -5507,8 +5507,6 @@ static int megasas_init_fw(struct megasas_instance *instance) fail_get_ld_pd_list: instance->instancet->disable_intr(instance); -fail_get_pd_list: - instance->instancet->disable_intr(instance); fail_init_adapter: megasas_destroy_irqs(instance); fail_setup_irqs: @@ -5520,9 +5518,11 @@ fail_ready_state: instance->ctrl_info = NULL; iounmap(instance->reg_set); - fail_ioremap: +fail_ioremap: pci_release_selected_regions(instance->pdev, 1<bar); + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From 29206da1490a7065e8a03ec43f6de60c5c978cae Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:34 -0800 Subject: scsi: megaraid_sas: Use synchronize_irq to wait for IRQs to complete FIX - Do not use random delay to synchronize with IRQ. Use kernel API. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index bafaf0376d51..d8bfb87f456c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3144,6 +3144,22 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) return IRQ_HANDLED; } +/** + * megasas_sync_irqs - Synchronizes all IRQs owned by adapter + * @instance: Adapter soft state + */ +void megasas_sync_irqs(unsigned long instance_addr) +{ + u32 count, i; + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + for (i = 0; i < count; i++) + synchronize_irq(pci_irq_vector(instance->pdev, i)); +} + /** * megasas_complete_cmd_dpc_fusion - Completes command * @instance: Adapter soft state @@ -3820,7 +3836,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, break; else { instance->instancet->disable_intr(instance); - msleep(1000); + megasas_sync_irqs((unsigned long)instance); megasas_complete_cmd_dpc_fusion ((unsigned long)instance); instance->instancet->enable_intr(instance); @@ -4174,7 +4190,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); - msleep(1000); + megasas_sync_irqs((unsigned long)instance); /* First try waiting for commands to complete */ if (megasas_wait_for_outstanding_fusion(instance, reason, -- cgit v1.2.3-59-g8ed1b From ec77959515e584cd5cb2301531a3ed06c97eeacf Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:35 -0800 Subject: scsi: megaraid_sas: Increase internal command pool Fix - increase internal command pool to 8. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 508516d412c9..42c0e1ff7177 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1460,7 +1460,7 @@ enum FW_BOOT_CONTEXT { */ #define MEGASAS_INT_CMDS 32 #define MEGASAS_SKINNY_INT_CMDS 5 -#define MEGASAS_FUSION_INTERNAL_CMDS 5 +#define MEGASAS_FUSION_INTERNAL_CMDS 8 #define MEGASAS_FUSION_IOCTL_CMDS 3 #define MEGASAS_MFI_IOCTL_CMDS 27 diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index d8bfb87f456c..773ce340c7d3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1350,7 +1350,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->last_reply_idx[i] = 0; /* - * For fusion adapters, 3 commands for IOCTL and 5 commands + * For fusion adapters, 3 commands for IOCTL and 8 commands * for driver's internal DCMDs. */ instance->max_scsi_cmds = instance->max_fw_cmds - -- cgit v1.2.3-59-g8ed1b From e00731bc5abb8f00f64e74fc8deb3feca580f22d Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:36 -0800 Subject: scsi: megaraid_sas: Cleanup VD_EXT_DEBUG and SPAN_DEBUG related debug prints Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 - drivers/scsi/megaraid/megaraid_sas_base.c | 15 -- drivers/scsi/megaraid/megaraid_sas_fp.c | 266 +--------------------------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 5 - 4 files changed, 2 insertions(+), 286 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 42c0e1ff7177..dc331e8224da 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1403,8 +1403,6 @@ struct megasas_ctrl_info { #define MEGASAS_FW_BUSY 1 -#define VD_EXT_DEBUG 0 - /* Driver's internal Logging levels*/ #define OCR_LOGS (1 << 0) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0e7121d757c2..5e0dea14287f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4601,17 +4601,6 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance) } /* irrespective of FW raid maps, driver raid map is constant */ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); - -#if VD_EXT_DEBUG - dev_info(&instance->pdev->dev, "instance->max_raid_mapsize 0x%x\n ", - instance->max_raid_mapsize); - dev_info(&instance->pdev->dev, "new_map_sz = 0x%x, old_map_sz = 0x%x\n", - fusion->new_map_sz, fusion->old_map_sz); - dev_info(&instance->pdev->dev, "ventura_map_sz = 0x%x, current_map_sz = 0x%x\n", - ventura_map_sz, fusion->current_map_sz); - dev_info(&instance->pdev->dev, "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx\n", - fusion->drv_map_sz, sizeof(struct MR_DRV_RAID_MAP_ALL)); -#endif } /** @@ -5215,10 +5204,6 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->is_ventura) { scratch_pad_3 = readl(&instance->reg_set->outbound_scratch_pad_3); -#if VD_EXT_DEBUG - dev_info(&instance->pdev->dev, "scratch_pad3 0x%x\n", - scratch_pad_3); -#endif instance->max_raid_mapsize = ((scratch_pad_3 >> MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & MR_MAX_RAID_MAP_SIZE_MASK); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index a5517e72ad2f..7dc7708a7957 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -77,7 +77,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " #endif #define TRUE 1 -#define SPAN_DEBUG 0 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) #define SPAN_INVALID 0xff @@ -202,16 +201,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) if (instance->max_raid_mapsize) { fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n", - le32_to_cpu(fw_map_dyn->raid_map_size), - le32_to_cpu(fw_map_dyn->desc_table_offset)); - dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n", - le32_to_cpu(fw_map_dyn->desc_table_size), - le32_to_cpu(fw_map_dyn->desc_table_num_elements)); - dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n", - drv_map, le16_to_cpu(fw_map_dyn->ld_count)); -#endif desc_table = (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); if (desc_table != fw_map_dyn->raid_map_desc_table) @@ -230,25 +219,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) le32_to_cpu(fw_map_dyn->desc_table_size); for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { - -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "desc table %p\n", - desc_table); - dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n", - le32_to_cpu(desc_table->raid_map_desc_type), - le32_to_cpu(desc_table->raid_map_desc_offset)); - dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n", - le32_to_cpu(desc_table->raid_map_desc_elements), - le32_to_cpu(desc_table->raid_map_desc_buffer_size)); -#endif switch (le32_to_cpu(desc_table->raid_map_desc_type)) { case RAID_MAP_DESC_TYPE_DEVHDL_INFO: fw_map_dyn->dev_hndl_info = (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "devHndlInfo address %p\n", - fw_map_dyn->dev_hndl_info); -#endif memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->dev_hndl_info, sizeof(struct MR_DEV_HANDLE_INFO) * @@ -258,28 +232,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fw_map_dyn->ld_tgt_id_to_ld = (u16 *) (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "ldTgtIdToLd address %p\n", - fw_map_dyn->ld_tgt_id_to_ld); -#endif for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { pDrvRaidMap->ldTgtIdToLd[j] = le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n", - j, pDrvRaidMap->ldTgtIdToLd[j]); -#endif } break; case RAID_MAP_DESC_TYPE_ARRAY_INFO: fw_map_dyn->ar_map_info = (struct MR_ARRAY_INFO *) (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "arMapInfo address %p\n", - fw_map_dyn->ar_map_info); -#endif - memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->ar_map_info, sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements)); @@ -291,34 +252,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->ld_span_map, sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements)); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, "ldSpanMap address %p\n", - fw_map_dyn->ld_span_map); - dev_dbg(&instance->pdev->dev, "MR_LD_SPAN_MAP size 0x%lx\n", - sizeof(struct MR_LD_SPAN_MAP)); - for (j = 0; j < ld_count; j++) { - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x\n", - j, j, fw_map_dyn->ld_span_map[j].ldRaid.targetId); - dev_dbg(&instance->pdev->dev, "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", - j, fw_map_dyn->ld_span_map[j].ldRaid.seqNum); - dev_dbg(&instance->pdev->dev, "fw_map_dyn->ld_span_map[%d].ldRaid.rowSize 0x%x\n", - j, (u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize); - - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n", - j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId); - dev_dbg(&instance->pdev->dev, "DrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", - j, pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum); - dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", - j, (u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); - - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n", - instance->unique_id, drv_map); - dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p/%p\n", - pDrvRaidMap, - &fw_map_dyn->ld_span_map[j].ldRaid, - &pDrvRaidMap->ldSpanMap[j].ldRaid); - } -#endif break; default: dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", @@ -335,17 +268,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); return; } -#if VD_EXT_DEBUG - for (i = 0; i < ld_count; i++) { - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :Index 0x%x\n", - instance->unique_id, i); - dev_dbg(&instance->pdev->dev, "Target Id 0x%x\n", - fw_map_ext->ldSpanMap[i].ldRaid.targetId); - dev_dbg(&instance->pdev->dev, "Seq Num 0x%x Size 0/%llx\n", - fw_map_ext->ldSpanMap[i].ldRaid.seqNum, - fw_map_ext->ldSpanMap[i].ldRaid.size); - } -#endif pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; @@ -354,29 +276,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) (u16)fw_map_ext->ldTgtIdToLd[i]; memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(struct MR_LD_SPAN_MAP) * ld_count); -#if VD_EXT_DEBUG - for (i = 0; i < ld_count; i++) { - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x\n", - i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId); - dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", - i, fw_map_ext->ldSpanMap[i].ldRaid.seqNum); - dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", - i, (u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); - - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n", - i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId); - dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n", - i, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum); - dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n", - i, (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); - - dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n", - instance->unique_id, drv_map); - dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p %p\n", - pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, - &pDrvRaidMap->ldSpanMap[i].ldRaid); - } -#endif memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, @@ -393,18 +292,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fusion->ld_map[(instance->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); - -#if VD_EXT_DEBUG - for (i = 0; i < ld_count; i++) { - dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " - "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", - instance->unique_id, i, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); - } -#endif - pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; @@ -413,26 +300,6 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) (u8)pFwRaidMap->ldTgtIdToLd[i]; for (i = 0; i < ld_count; i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, - "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " - "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " - "size 0x%x\n", i, i, - pFwRaidMap->ldSpanMap[i].ldRaid.targetId, - pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, - (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); - dev_dbg(&instance->pdev->dev, - "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " - "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " - "size 0x%x\n", i, i, - pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, - pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, - (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); - dev_dbg(&instance->pdev->dev, "Driver raid map all %p " - "raid map %p LD RAID MAP %p/%p\n", drv_map, - pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid, - &pDrvRaidMap->ldSpanMap[i].ldRaid); -#endif } memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); @@ -545,91 +412,6 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, return SPAN_INVALID; } -/* -****************************************************************************** -* -* Function to print info about span set created in driver from FW raid map -* -* Inputs : -* map - LD map -* ldSpanInfo - ldSpanInfo per HBA instance -*/ -#if SPAN_DEBUG -static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, - PLD_SPAN_INFO ldSpanInfo) -{ - - u8 span; - u32 element; - struct MR_LD_RAID *raid; - LD_SPAN_SET *span_set; - struct MR_QUAD_ELEMENT *quad; - int ldCount; - u16 ld; - - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, map); - if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) - continue; - raid = MR_LdRaidGet(ld, map); - dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", - ld, raid->spanDepth); - for (span = 0; span < raid->spanDepth; span++) - dev_dbg(&instance->pdev->dev, "Span=%x," - " number of quads=%x\n", span, - le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements)); - for (element = 0; element < MAX_QUAD_DEPTH; element++) { - span_set = &(ldSpanInfo[ld].span_set[element]); - if (span_set->span_row_data_width == 0) - break; - - dev_dbg(&instance->pdev->dev, "Span Set %x:" - "width=%x, diff=%x\n", element, - (unsigned int)span_set->span_row_data_width, - (unsigned int)span_set->diff); - dev_dbg(&instance->pdev->dev, "logical LBA" - "start=0x%08lx, end=0x%08lx\n", - (long unsigned int)span_set->log_start_lba, - (long unsigned int)span_set->log_end_lba); - dev_dbg(&instance->pdev->dev, "span row start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->span_row_start, - (long unsigned int)span_set->span_row_end); - dev_dbg(&instance->pdev->dev, "data row start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->data_row_start, - (long unsigned int)span_set->data_row_end); - dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->data_strip_start, - (long unsigned int)span_set->data_strip_end); - - for (span = 0; span < raid->spanDepth; span++) { - if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements) >= - element + 1) { - quad = &map->raidMap.ldSpanMap[ld]. - spanBlock[span].block_span_info. - quad[element]; - dev_dbg(&instance->pdev->dev, "Span=%x," - "Quad=%x, diff=%x\n", span, - element, le32_to_cpu(quad->diff)); - dev_dbg(&instance->pdev->dev, - "offset_in_span=0x%08lx\n", - (long unsigned int)le64_to_cpu(quad->offsetInSpan)); - dev_dbg(&instance->pdev->dev, - "logical start=0x%08lx, end=0x%08lx\n", - (long unsigned int)le64_to_cpu(quad->logStart), - (long unsigned int)le64_to_cpu(quad->logEnd)); - } - } - } - } - return 0; -} -#endif - /* ****************************************************************************** * @@ -743,19 +525,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance, else break; } -#if SPAN_DEBUG - dev_info(&instance->pdev->dev, "Strip 0x%llx," - "span_set_Strip 0x%llx, span_set_Row 0x%llx" - "data width 0x%llx span offset 0x%x\n", strip, - (unsigned long long)span_set_Strip, - (unsigned long long)span_set_Row, - (unsigned long long)span_set->span_row_data_width, - span_offset); - dev_info(&instance->pdev->dev, "For strip 0x%llx" - "row is 0x%llx\n", strip, - (unsigned long long) span_set->data_row_start + - (unsigned long long) span_set_Row + (span_offset - 1)); -#endif + retval = (span_set->data_row_start + span_set_Row + (span_offset - 1)); return retval; @@ -872,11 +642,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance, else break; } -#if SPAN_DEBUG - dev_info(&instance->pdev->dev, "get_arm_from_strip:" - "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, - (long unsigned int)strip, (strip_offset - span_offset)); -#endif + retval = (strip_offset - span_offset); return retval; } @@ -1239,17 +1005,6 @@ MR_BuildRaidContext(struct megasas_instance *instance, } io_info->start_span = startlba_span; io_info->start_row = start_row; -#if SPAN_DEBUG - dev_dbg(&instance->pdev->dev, "Check Span number from %s %d" - "for row 0x%llx, start strip 0x%llx end strip 0x%llx" - " span 0x%x\n", __func__, __LINE__, - (unsigned long long)start_row, - (unsigned long long)start_strip, - (unsigned long long)endStrip, startlba_span); - dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx" - "Start span 0x%x\n", (unsigned long long)start_row, - (unsigned long long)endRow, startlba_span); -#endif } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); @@ -1383,12 +1138,6 @@ MR_BuildRaidContext(struct megasas_instance *instance, return TRUE; } } - -#if SPAN_DEBUG - /* Just for testing what arm we get for strip.*/ - if (io_info->IoforUnevenSpan) - get_arm_from_strip(instance, ld, start_strip, map); -#endif return TRUE; } @@ -1502,10 +1251,6 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, break; } } -#if SPAN_DEBUG - getSpanInfo(map, ldSpanInfo); -#endif - } void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, @@ -1594,13 +1339,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, } lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; -#if SPAN_DEBUG - if (arm != bestArm) - dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance " - "occur - span 0x%x arm 0x%x bestArm 0x%x " - "io_info->span_arm 0x%x\n", - span, arm, bestArm, io_info->span_arm); -#endif return io_info->pd_after_lb; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 773ce340c7d3..c7f4cc7608d6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1062,11 +1062,6 @@ megasas_get_ld_map_info(struct megasas_instance *instance) memset(ci, 0, fusion->max_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, - "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", - __func__, cpu_to_le32(size_map_info)); -#endif dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; -- cgit v1.2.3-59-g8ed1b From 41064f1bf8886bc43afbd9aa23a698f97bc65664 Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:37 -0800 Subject: scsi: megaraid_sas: Indentation and smatch warning fixes Fix indentation issues and smatch warning reported by Dan Carpenter for previous series as discussed below. http://www.spinics.net/lists/linux-scsi/msg103635.html http://www.spinics.net/lists/linux-scsi/msg103603.html Reported-by: Dan Carpenter Signed-off-by: Kashyap Desai Signed-off-by: Sasikumar Chandrasekaran Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 2 +- drivers/scsi/megaraid/megaraid_sas_base.c | 10 ++-- drivers/scsi/megaraid/megaraid_sas_fp.c | 57 +++++++++--------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 92 ++++++++++++++--------------- drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 5 files changed, 80 insertions(+), 83 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index dc331e8224da..8c06cbfd07b8 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1376,7 +1376,7 @@ struct megasas_ctrl_info { u16 reserved:8; #endif } adapter_operations4; - u8 pad[0x800-0x7FE]; /* 0x7FE pad to 2K for expansion */ + u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ } __packed; /* diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5e0dea14287f..dc9f42e135bb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5343,14 +5343,14 @@ static int megasas_init_fw(struct megasas_instance *instance) memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); /* stream detection initialization */ - if (instance->is_ventura) { + if (instance->is_ventura && fusion) { fusion->stream_detect_by_ld = - kzalloc(sizeof(struct LD_STREAM_DETECT *) - * MAX_LOGICAL_DRIVES_EXT, - GFP_KERNEL); + kzalloc(sizeof(struct LD_STREAM_DETECT *) + * MAX_LOGICAL_DRIVES_EXT, + GFP_KERNEL); if (!fusion->stream_detect_by_ld) { dev_err(&instance->pdev->dev, - "unable to allocate stream detection for pool of LDs\n"); + "unable to allocate stream detection for pool of LDs\n"); goto fail_get_ld_pd_list; } for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 7dc7708a7957..62affa76133d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -197,7 +197,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) memset(drv_map, 0, fusion->drv_map_sz); memset(pDrvRaidMap->ldTgtIdToLd, - 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); + 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); if (instance->max_raid_mapsize) { fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; @@ -224,34 +224,37 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fw_map_dyn->dev_hndl_info = (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->devHndlInfo, - fw_map_dyn->dev_hndl_info, - sizeof(struct MR_DEV_HANDLE_INFO) * - le32_to_cpu(desc_table->raid_map_desc_elements)); + fw_map_dyn->dev_hndl_info, + sizeof(struct MR_DEV_HANDLE_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); break; case RAID_MAP_DESC_TYPE_TGTID_INFO: fw_map_dyn->ld_tgt_id_to_ld = - (u16 *) (raid_map_data + - le32_to_cpu(desc_table->raid_map_desc_offset)); - for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { - pDrvRaidMap->ldTgtIdToLd[j] = - le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); - } + (u16 *)(raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); + for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { + pDrvRaidMap->ldTgtIdToLd[j] = + le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); + } break; case RAID_MAP_DESC_TYPE_ARRAY_INFO: fw_map_dyn->ar_map_info = - (struct MR_ARRAY_INFO *) - (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + (struct MR_ARRAY_INFO *) + (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->arMapInfo, - fw_map_dyn->ar_map_info, - sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements)); + fw_map_dyn->ar_map_info, + sizeof(struct MR_ARRAY_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); break; case RAID_MAP_DESC_TYPE_SPAN_INFO: fw_map_dyn->ld_span_map = - (struct MR_LD_SPAN_MAP *) - (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + (struct MR_LD_SPAN_MAP *) + (raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->ldSpanMap, - fw_map_dyn->ld_span_map, - sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements)); + fw_map_dyn->ld_span_map, + sizeof(struct MR_LD_SPAN_MAP) * + le32_to_cpu(desc_table->raid_map_desc_elements)); break; default: dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", @@ -262,7 +265,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) } else if (instance->supportmax256vd) { fw_map_ext = - (struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)]; + (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)]; ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); if (ld_count > MAX_LOGICAL_DRIVES_EXT) { dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); @@ -275,12 +278,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) pDrvRaidMap->ldTgtIdToLd[i] = (u16)fw_map_ext->ldTgtIdToLd[i]; memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, - sizeof(struct MR_LD_SPAN_MAP) * ld_count); + sizeof(struct MR_LD_SPAN_MAP) * ld_count); memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, - sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); + sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, - sizeof(struct MR_DEV_HANDLE_INFO) * - MAX_RAIDMAP_PHYSICAL_DEVICES); + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); /* New Raid map will not set totalSize, so keep expected value * for legacy code in ValidateMapInfo @@ -347,7 +350,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", le32_to_cpu(pDrvRaidMap->totalSize)); dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", - (unsigned int) expected_size); + (unsigned int)expected_size); dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), le32_to_cpu(pDrvRaidMap->totalSize)); @@ -770,7 +773,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (instance->is_ventura) { - ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm = + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; @@ -888,7 +891,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (instance->is_ventura) { - ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm = + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; @@ -1329,7 +1332,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, * keep driver in sync with Firmware */ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || - (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) + (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) bestArm ^= 1; /* Update the last accessed block on the correct pd */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c7f4cc7608d6..9928766445d6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -446,8 +446,6 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) return -ENOMEM; } - - for (i = 0; i < max_mpt_cmd; i++) { fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); @@ -1433,8 +1431,8 @@ fail_alloc_mfi_cmds: void map_cmd_status(struct fusion_context *fusion, - struct scsi_cmnd *scmd, u8 status, u8 ext_status, - u32 data_length, u8 *sense) + struct scsi_cmnd *scmd, u8 status, u8 ext_status, + u32 data_length, u8 *sense) { u8 cmd_type; int resid; @@ -2033,8 +2031,8 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, /** stream detection on read and and write IOs */ static void megasas_stream_detect(struct megasas_instance *instance, - struct megasas_cmd_fusion *cmd, - struct IO_REQUEST_INFO *io_info) + struct megasas_cmd_fusion *cmd, + struct IO_REQUEST_INFO *io_info) { struct fusion_context *fusion = instance->ctrl_context; u32 device_id = io_info->ldTgtId; @@ -2048,63 +2046,59 @@ static void megasas_stream_detect(struct megasas_instance *instance, struct STREAM_DETECT *current_sd; /* find possible stream */ for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { - stream_num = - (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & + stream_num = (*track_stream >> + (i * BITS_PER_INDEX_STREAM)) & STREAM_MASK; current_sd = ¤t_ld_sd->stream_track[stream_num]; - /* if we found a stream, update the raid - * context and also update the mruBitMap - */ - /* boundary condition */ - if ((current_sd->next_seq_lba) && - (io_info->ldStartBlock >= current_sd->next_seq_lba) && - (io_info->ldStartBlock <= (current_sd->next_seq_lba+32)) && - (current_sd->is_read == io_info->isRead)) { - - if ((io_info->ldStartBlock != current_sd->next_seq_lba) - && ((!io_info->isRead) || (!is_read_ahead))) - /* - * Once the API availible we need to change this. - * At this point we are not allowing any gap - */ - continue; - - SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); - current_sd->next_seq_lba = - io_info->ldStartBlock + io_info->numBlocks; - /* - * update the mruBitMap LRU + /* if we found a stream, update the raid + * context and also update the mruBitMap */ - shifted_values_mask = - (1 << i * BITS_PER_INDEX_STREAM) - 1; - shifted_values = ((*track_stream & shifted_values_mask) - << BITS_PER_INDEX_STREAM); - index_value_mask = - STREAM_MASK << i * BITS_PER_INDEX_STREAM; - unshifted_values = - *track_stream & ~(shifted_values_mask | - index_value_mask); - *track_stream = - unshifted_values | shifted_values | stream_num; - return; + /* boundary condition */ + if ((current_sd->next_seq_lba) && + (io_info->ldStartBlock >= current_sd->next_seq_lba) && + (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && + (current_sd->is_read == io_info->isRead)) { + + if ((io_info->ldStartBlock != current_sd->next_seq_lba) && + ((!io_info->isRead) || (!is_read_ahead))) + /* + * Once the API availible we need to change this. + * At this point we are not allowing any gap + */ + continue; + SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); + current_sd->next_seq_lba = + io_info->ldStartBlock + io_info->numBlocks; + /* + * update the mruBitMap LRU + */ + shifted_values_mask = + (1 << i * BITS_PER_INDEX_STREAM) - 1; + shifted_values = ((*track_stream & shifted_values_mask) + << BITS_PER_INDEX_STREAM); + index_value_mask = + STREAM_MASK << i * BITS_PER_INDEX_STREAM; + unshifted_values = + *track_stream & ~(shifted_values_mask | + index_value_mask); + *track_stream = + unshifted_values | shifted_values | stream_num; + return; } - } /* * if we did not find any stream, create a new one * from the least recently used */ - stream_num = - (*track_stream >> ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & - STREAM_MASK; + stream_num = (*track_stream >> + ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; current_sd = ¤t_ld_sd->stream_track[stream_num]; current_sd->is_read = io_info->isRead; current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; - *track_stream = - (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); + *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); return; - } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 1441c35c58a3..8e8c35f6b872 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -904,7 +904,7 @@ struct MR_LD_RAID { */ struct MR_IO_AFFINITY cpuAffinity; /* Bit definiations are specified by MR_IO_AFFINITY */ - u8 reserved3[0x80-0x40]; /* 0x40 - 0x7f */ + u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */ }; struct MR_LD_SPAN_MAP { -- cgit v1.2.3-59-g8ed1b From a73b0a4b5d17ee39b2cbac7638a97077189b80cc Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:38 -0800 Subject: scsi: megaraid_sas: Change RAID_1_10_RMW_CMDS to RAID_1_PEER_CMDS and set value to 2 For RAID1 FastPath writes, driver needs to allocate extra commands internally to accommodate for the extra peer command being sent. Currently driver is allocating 2 extra commands for each but only one extra command is necessary. Set RAID_1_10_RMW_CMDS to 2 and also change macro name to RAID_1_PEER_CMDS. Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- drivers/scsi/megaraid/megaraid_sas_fusion.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 9928766445d6..750090119f81 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -293,7 +293,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c if (instance->is_ventura) instance->max_mpt_cmds = - instance->max_fw_cmds * RAID_1_10_RMW_CMDS; + instance->max_fw_cmds * RAID_1_PEER_CMDS; else instance->max_mpt_cmds = instance->max_fw_cmds; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 8e8c35f6b872..d78d76112501 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -101,7 +101,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 #define THRESHOLD_REPLY_COUNT 50 -#define RAID_1_10_RMW_CMDS 3 +#define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 enum MR_FUSION_ADAPTER_TYPE { -- cgit v1.2.3-59-g8ed1b From 156f8759bb708d7fd43cce9e2dc640b1ba01ecef Mon Sep 17 00:00:00 2001 From: Shivasharan S Date: Fri, 10 Feb 2017 00:59:39 -0800 Subject: scsi: megaraid_sas: driver version upgrade Signed-off-by: Kashyap Desai Signed-off-by: Shivasharan S Reviewed-by: Hannes Reinecke Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 8c06cbfd07b8..e7e5974e1a2c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.700.00.00-rc1" -#define MEGASAS_RELDATE "November 29, 2016" +#define MEGASAS_VERSION "07.701.16.00-rc1" +#define MEGASAS_RELDATE "February 2, 2017" /* * Device IDs -- cgit v1.2.3-59-g8ed1b From 8e305cb2bd8b6300ea29420f2458a2f245bdacbd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Feb 2017 15:21:21 +0100 Subject: scsi: snic: switch to pci_irq_alloc_vectors Signed-off-by: Christoph Hellwig Acked-by: Narsimhulu Musini Signed-off-by: Martin K. Petersen --- drivers/scsi/snic/snic.h | 1 - drivers/scsi/snic/snic_isr.c | 48 ++++++++++++++++++-------------------------- 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h index 8ed778d4dbb9..de0ab5fc8474 100644 --- a/drivers/scsi/snic/snic.h +++ b/drivers/scsi/snic/snic.h @@ -299,7 +299,6 @@ struct snic { /* pci related */ struct pci_dev *pdev; - struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX]; struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX]; /* io related info */ diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c index f552003128c6..d859501e4ccd 100644 --- a/drivers/scsi/snic/snic_isr.c +++ b/drivers/scsi/snic/snic_isr.c @@ -93,7 +93,7 @@ snic_free_intr(struct snic *snic) /* ONLY interrupt mode MSIX is supported */ for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { if (snic->msix[i].requested) { - free_irq(snic->msix_entry[i].vector, + free_irq(pci_irq_vector(snic->pdev, i), snic->msix[i].devid); } } @@ -134,7 +134,7 @@ snic_request_intr(struct snic *snic) snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic; for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { - ret = request_irq(snic->msix_entry[i].vector, + ret = request_irq(pci_irq_vector(snic->pdev, i), snic->msix[i].isr, 0, snic->msix[i].devname, @@ -158,47 +158,37 @@ snic_set_intr_mode(struct snic *snic) { unsigned int n = ARRAY_SIZE(snic->wq); unsigned int m = SNIC_CQ_IO_CMPL_MAX; - unsigned int i; + unsigned int vecs = n + m + 1; /* * We need n WQs, m CQs, and n+m+1 INTRs * (last INTR is used for WQ/CQ errors and notification area */ - BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > ARRAY_SIZE(snic->intr)); - SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1)); - - for (i = 0; i < (n + m + 1); i++) - snic->msix_entry[i].entry = i; - - if (snic->wq_count >= n && snic->cq_count >= (n + m)) { - if (!pci_enable_msix(snic->pdev, - snic->msix_entry, - (n + m + 1))) { - snic->wq_count = n; - snic->cq_count = n + m; - snic->intr_count = n + m + 1; - snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY; - - SNIC_ISR_DBG(snic->shost, - "Using MSI-X Interrupts\n"); - svnic_dev_set_intr_mode(snic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - - return 0; - } - } - svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + if (snic->wq_count < n || snic->cq_count < n + m) + goto fail; + if (pci_alloc_irq_vectors(snic->pdev, vecs, vecs, PCI_IRQ_MSIX) < 0) + goto fail; + + snic->wq_count = n; + snic->cq_count = n + m; + snic->intr_count = vecs; + snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY; + + SNIC_ISR_DBG(snic->shost, "Using MSI-X Interrupts\n"); + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_MSIX); + return 0; +fail: + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } /* end of snic_set_intr_mode */ void snic_clear_intr_mode(struct snic *snic) { - pci_disable_msix(snic->pdev); - + pci_free_irq_vectors(snic->pdev); svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX); } -- cgit v1.2.3-59-g8ed1b From 40a4c2c392593b57a2e6f6438794492596279838 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Feb 2017 19:38:55 +0300 Subject: scsi: megaraid_sas: array overflow in megasas_dump_frame() The "sz" variable is in terms of bytes, but we're treating the buffer as an array of __le32 so we have to divide by 4. Fixes: def0eab3af86 ("scsi: megaraid_sas: enhance debug logs in OCR context") Signed-off-by: Dan Carpenter Acked-by: Sumit Saxena Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index dc9f42e135bb..7ac9a9ee9bd4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2754,7 +2754,7 @@ megasas_dump_frame(void *mpi_request, int sz) __le32 *mfp = (__le32 *)mpi_request; printk(KERN_INFO "IO request frame:\n\t"); - for (i = 0; i < sz; i++) { + for (i = 0; i < sz / sizeof(__le32); i++) { if (i && ((i % 8) == 0)) printk("\n\t"); printk("%08x ", le32_to_cpu(mfp[i])); -- cgit v1.2.3-59-g8ed1b From d1da522fb8a70b8c527d4ad15f9e62218cc00f2c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Feb 2017 22:21:51 +0100 Subject: scsi: megaraid_sas: handle dma_addr_t right on 32-bit When building with a dma_addr_t that is different from pointer size, we get this warning: drivers/scsi/megaraid/megaraid_sas_fusion.c: In function 'megasas_make_prp_nvme': drivers/scsi/megaraid/megaraid_sas_fusion.c:1654:17: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] It's better to not pretend that the dma address is a pointer and instead use a dma_addr_t consistently. Fixes: 33203bc4d61b ("scsi: megaraid_sas: NVME fast path io support") Signed-off-by: Arnd Bergmann Acked-by: Sumit Saxena Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 750090119f81..29650ba669da 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1619,7 +1619,8 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, { int sge_len, offset, num_prp_in_chain = 0; struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; - u64 *ptr_sgl, *ptr_sgl_phys; + u64 *ptr_sgl; + dma_addr_t ptr_sgl_phys; u64 sge_addr; u32 page_mask, page_mask_result; struct scatterlist *sg_scmd; @@ -1651,14 +1652,14 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, */ page_mask = mr_nvme_pg_size - 1; ptr_sgl = (u64 *)cmd->sg_frame; - ptr_sgl_phys = (u64 *)cmd->sg_frame_phys_addr; + ptr_sgl_phys = cmd->sg_frame_phys_addr; memset(ptr_sgl, 0, instance->max_chain_frame_sz); /* Build chain frame element which holds all prps except first*/ main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); - main_chain_element->Address = cpu_to_le64((uintptr_t)ptr_sgl_phys); + main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); main_chain_element->NextChainOffset = 0; main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | IEEE_SGE_FLAGS_SYSTEM_ADDR | @@ -1696,16 +1697,15 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, scmd_printk(KERN_NOTICE, scmd, "page boundary ptr_sgl: 0x%p\n", ptr_sgl); - ptr_sgl_phys++; - *ptr_sgl = - cpu_to_le64((uintptr_t)ptr_sgl_phys); + ptr_sgl_phys += 8; + *ptr_sgl = cpu_to_le64(ptr_sgl_phys); ptr_sgl++; num_prp_in_chain++; } *ptr_sgl = cpu_to_le64(sge_addr); ptr_sgl++; - ptr_sgl_phys++; + ptr_sgl_phys += 8; num_prp_in_chain++; sge_addr += mr_nvme_pg_size; -- cgit v1.2.3-59-g8ed1b