aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c261
1 files changed, 216 insertions, 45 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 70dbf53d9e0f..03b752632839 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t guard = scsi_host_get_guard(cmd->device->host);
- /* We only support T10 DIF right now */
- if (guard != SHOST_DIX_GUARD_CRC) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
- "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
- return 0;
- }
-
/* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0;
@@ -172,10 +165,11 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts |= PO_MODE_DIF_REMOVE;
break;
case SCSI_PROT_READ_PASS:
- *fw_prot_opts |= PO_MODE_DIF_PASS;
- break;
case SCSI_PROT_WRITE_PASS:
- *fw_prot_opts |= PO_MODE_DIF_PASS;
+ if (guard & SHOST_DIX_GUARD_IP)
+ *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+ else
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
break;
default: /* Normal Request */
*fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
case SCSI_PROT_DIF_TYPE0:
@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
pkt->ref_tag_mask[3] = 0xff;
break;
}
-
- ql_dbg(ql_dbg_io, vha, 0x3009,
- "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
- "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
- pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
- scsi_get_prot_type(cmd), cmd);
}
struct qla2_sgx {
@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int i;
uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-
- uint8_t *cp;
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- ql_dbg(ql_dbg_io, vha, 0x300a,
- "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
+
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- cp = page_address(sg_page(sg)) + sg->offset;
- ql_dbg(ql_dbg_io, vha, 0x300b,
- "User data buffer=%p for cmd=%p.\n", cp, cmd);
- }
}
/* Null termination */
*cur_dsd++ = 0;
@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- uint8_t *cp;
cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- ql_dbg(ql_dbg_io, vha, 0x3027,
- "%s(): %p, sg_entry %d - "
- "addr=0x%x0x%x, len=%d.\n",
- __func__, cur_dsd, i,
- LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
- }
+
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- cp = page_address(sg_page(sg)) + sg->offset;
- ql_dbg(ql_dbg_io, vha, 0x3028,
- "%s(): Protection Data buffer = %p.\n", __func__,
- cp);
- }
avail_dsds--;
}
/* Null termination */
@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!qla2x00_hba_err_chk_enabled(sp))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+ /* HBA error checking enabled */
+ else if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+ || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE2))
+ fw_prot_opts |= BIT_10;
+ else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE3)
+ fw_prot_opts |= BIT_11;
+ }
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
@@ -1858,7 +1831,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
}
if (index == MAX_OUTSTANDING_COMMANDS) {
ql_log(ql_log_warn, vha, 0x700b,
- "No room on oustanding cmd array.\n");
+ "No room on outstanding cmd array.\n");
goto queuing_error;
}
@@ -2665,3 +2638,201 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}
+
+static void
+qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
+ struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ uint32_t req_data_len = 0;
+ uint32_t rsp_data_len = 0;
+ struct scatterlist *sg;
+ int index;
+ int entry_count = 1;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ /*Update entry type to indicate bidir command */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+
+ /* Set the transfer direction, in this set both flags
+ * Also set the BD_WRAP_BACK flag, firmware will take care
+ * assigning DID=SID for outgoing pkts.
+ */
+ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+ BD_WRAP_BACK);
+
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
+ cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
+ cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+
+ vha->bidi_stats.transfer_bytes += req_data_len;
+ vha->bidi_stats.io_count++;
+
+ /* Only one dsd is available for bidirectional IOCB, remaining dsds
+ * are bundled in continuation iocb
+ */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+
+ index = 0;
+
+ for_each_sg(bsg_job->request_payload.sg_list, sg,
+ bsg_job->request_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* For read request DSD will always goes to continuation IOCB
+ * and follow the write DSD. If there is room on the current IOCB
+ * then it is added to that IOCB else new continuation IOCB is
+ * allocated.
+ */
+ for_each_sg(bsg_job->reply_payload.sg_list, sg,
+ bsg_job->reply_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* This value should be same as number of IOCB required for this cmd */
+ cmd_pkt->entry_count = entry_count;
+}
+
+int
+qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
+{
+
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ uint32_t handle;
+ uint32_t index;
+ uint16_t req_cnt;
+ uint16_t cnt;
+ uint32_t *clr_ptr;
+ struct cmd_bidir *cmd_pkt = NULL;
+ struct rsp_que *rsp;
+ struct req_que *req;
+ int rval = EXT_STATUS_OK;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
+
+ rval = QLA_SUCCESS;
+
+ rsp = ha->rsp_q_map[0];
+ req = vha->req;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return EXT_STATUS_MAILBOX;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == MAX_OUTSTANDING_COMMANDS) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ /* Calculate number of IOCB required */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ /* Check for room on request queue. */
+ if (req->cnt < req_cnt + 2) {
+ if (ha->mqenable)
+ cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else if (IS_QLA82XX(ha))
+ cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
+ cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ else
+ cnt = qla2x00_debounce_register(
+ ISP_REQ_Q_OUT(ha, &reg->isp));
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < req_cnt + 2) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID (of vha)*/
+ cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
+ cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = vha->d_id.b.area;
+ cmd_pkt->port_id[2] = vha->d_id.b.domain;
+
+ qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ req->cnt -= req_cnt;
+
+ /* Send the command to the firmware */
+ wmb();
+ qla2x00_start_iocbs(vha, req);
+queuing_error:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}