aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_target.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c902
1 files changed, 756 insertions, 146 deletions
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..75514a15bea0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
/*
* Global Variables
*/
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+ *dest = atomic_inc_return(&base_vha->generation_tick);
+ /* memory barrier */
+ wmb();
+}
+
/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
static struct qla_tgt_sess *qlt_find_sess_by_port_name(
struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_tgt *tgt = sess->tgt;
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ bool logout_started = false;
+ fc_port_t fcport;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+ " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->logout_on_delete, sess->keep_nport_handle,
+ sess->plogi_ack_needed);
BUG_ON(!tgt);
+
+ if (sess->logout_on_delete) {
+ int rc;
+
+ memset(&fcport, 0, sizeof(fcport));
+ fcport.loop_id = sess->loop_id;
+ fcport.d_id = sess->s_id;
+ memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
+ fcport.vha = vha;
+ fcport.tgt_session = sess;
+
+ rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
+
/*
* Release the target session for FC Nexus from fabric module code.
*/
if (sess->se_sess != NULL)
ha->tgt.tgt_ops->free_session(sess);
+ if (logout_started) {
+ bool traced = false;
+
+ while (!ACCESS_ONCE(sess->logout_completed)) {
+ if (!traced) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+ "%s: waiting for sess %p logout\n",
+ __func__, sess);
+ traced = true;
+ }
+ msleep(100);
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
+ "%s: sess %p logout completed\n",
+ __func__, sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (sess->plogi_ack_needed)
+ qlt_send_notify_ack(vha, &sess->tm_iocb,
+ 0, 0, 0, 0, 0, 0);
+
+ list_del(&sess->sess_list_entry);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
"Unregistration of sess %p finished\n", sess);
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- list_del(&sess->sess_list_entry);
- if (sess->deleted)
- list_del(&sess->del_list_entry);
+ if (!list_empty(&sess->del_list_entry))
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
/* Global event */
- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
- qlt_clear_tgt_db(ha->tgt.qla_tgt);
+ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
struct qla_tgt *tgt = sess->tgt;
uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
- if (sess->deleted)
- return;
+ if (sess->deleted) {
+ /* Upgrade to unconditional deletion in case it was temporary */
+ if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
+ list_del(&sess->del_list_entry);
+ else
+ return;
+ }
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
- sess->deleted = 1;
- if (immediate)
+ if (immediate) {
dev_loss_tmo = 0;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ list_add(&sess->del_list_entry, &tgt->del_sess_list);
+ } else {
+ sess->deleted = QLA_SESS_DELETION_PENDING;
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ }
sess->expires = jiffies + dev_loss_tmo * HZ;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
- "deletion in %u secs (expires: %lu) immed: %d\n",
- sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
- sess->expires, immediate);
+ "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
+ " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
+ sess->vha->vp_idx, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
+ sess->generation);
if (immediate)
- schedule_delayed_work(&tgt->sess_del_work, 0);
+ mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
/* ha->hardware_lock supposed to be held on entry */
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
{
- BUG_ON(!sess->deleted);
+ BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
- list_del(&sess->del_list_entry);
+ list_del_init(&sess->del_list_entry);
sess->deleted = 0;
}
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
del_list_entry);
elapsed = jiffies;
if (time_after_eq(elapsed, sess->expires)) {
- qlt_undelete_sess(sess);
+ /* No turning back */
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
fcport->d_id.b.al_pa, fcport->d_id.b.area,
fcport->loop_id);
+ /* Cannot undelete at this point */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ return NULL;
+ }
+
if (sess->deleted)
qlt_undelete_sess(sess);
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
if (sess->local && !local)
sess->local = 0;
+
+ qlt_do_generation_tick(vha, &sess->generation);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
sess->local = local;
+ INIT_LIST_HEAD(&sess->del_list_entry);
+
+ /* Under normal circumstances we want to logout from firmware when
+ * session eventually ends and release corresponding nport handle.
+ * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+ * code will adjust these flags as necessary. */
+ sess->logout_on_delete = 1;
+ sess->keep_nport_handle = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
spin_lock_irqsave(&ha->hardware_lock, flags);
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
vha->vha_tgt.qla_tgt->sess_count++;
+ qlt_do_generation_tick(vha, &sess->generation);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
/*
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ * Called from qla2x00_reg_remote_port()
*/
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ /* Point of no return */
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
} else {
kref_get(&sess->se_sess->sess_kref);
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
- unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
return;
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ if (!tgt)
return;
- spin_lock_irqsave(&ha->hardware_lock, flags);
if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
}
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ if (max_gen - sess->generation < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+ "Ignoring stale deletion request for se_sess %p / sess %p"
+ " for port %8phC, req_gen %d, sess_gen %d\n",
+ sess->se_sess, sess, sess->port_name, max_gen,
+ sess->generation);
return;
}
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
sess->local = 1;
qlt_schedule_sess_for_deletion(sess, false);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1023,7 +1141,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1081,7 +1199,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
resp->sof_type = abts->sof_type;
resp->exchange_address = abts->exchange_address;
resp->fcp_hdr_le = abts->fcp_hdr_le;
- f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
F_CTL_LAST_SEQ | F_CTL_END_SEQ |
F_CTL_SEQ_INITIATIVE);
p = (uint8_t *)&f_ctl;
@@ -1156,15 +1274,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->entry_count = 1;
ctio->nport_handle = entry->nport_handle;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
ctio->exchange_addr = entry->exchange_addr_to_abort;
- ctio->u.status1.flags =
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
/* Memory Barrier */
@@ -1175,6 +1292,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
FCP_TMF_CMPL, true);
}
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+
+ spin_lock(&vha->cmd_list_lock);
+
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ if (tag == op->atio.u.isp24.exchange_addr) {
+ op->aborted = true;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ if (tag == cmd->atio.u.isp24.exchange_addr) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ spin_unlock(&vha->cmd_list_lock);
+ return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ * for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+ uint32_t lun, uint8_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+
+ key = sid_to_key(s_id);
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key;
+ uint32_t op_lun;
+
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ op_lun = scsilun_to_int(
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+ if (op_key == key && op_lun == lun)
+ op->aborted = true;
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key;
+ uint32_t cmd_lun;
+
+ cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ cmd_lun = scsilun_to_int(
+ (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+ if (cmd_key == key && cmd_lun == lun)
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ }
+ spin_unlock(&vha->cmd_list_lock);
+}
+
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1380,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
spin_unlock(&se_sess->sess_cmd_lock);
- if (!found_lun)
- return -ENOENT;
+ /* cmd not in LIO lists, look in qla list */
+ if (!found_lun) {
+ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+ /* send TASK_ABORT response immediately */
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+ return 0;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
+ "unable to find cmd in driver or LIO for tag 0x%x\n",
+ abts->exchange_addr_to_abort);
+ return -ENOENT;
+ }
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1476,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return;
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
rc = __qlt_24xx_handle_abts(vha, abts, sess);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1324,20 +1521,19 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio->nport_handle = mcmd->sess->loop_id;
- ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_SEND_STATUS);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
- __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
- ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = cpu_to_le16(8);
ctio->u.status1.sense_data[0] = resp_code;
/* Memory Barrier */
@@ -1588,7 +1784,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
- pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -1726,20 +1922,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (unlikely(cmd->aborted)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
- vha->vp_idx, cmd, se_cmd, se_cmd->tag);
-
- cmd->state = QLA_TGT_STATE_ABORTED;
- cmd->cmd_flags |= BIT_6;
-
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
-
- /* !! At this point cmd could be already freed !! */
- return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
- }
-
prm->cmd = cmd;
prm->tgt = tgt;
prm->rq_result = scsi_status;
@@ -1903,10 +2085,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
{
prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
(uint32_t)sizeof(ctio->u.status1.sense_data));
- ctio->u.status0.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
- ctio->u.status0.flags |= __constant_cpu_to_le16(
+ ctio->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
@@ -1923,17 +2104,17 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
"non GOOD status\n");
goto skip_explict_conf;
}
- ctio->u.status1.flags |= __constant_cpu_to_le16(
+ ctio->u.status1.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
skip_explict_conf:
ctio->u.status1.flags &=
- ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
ctio->u.status1.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
ctio->u.status1.scsi_status |=
- __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
for (i = 0; i < prm->sense_buffer_len/4; i++)
@@ -1953,9 +2134,9 @@ skip_explict_conf:
#endif
} else {
ctio->u.status1.flags &=
- ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
ctio->u.status1.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
ctio->u.status1.sense_length = 0;
memset(ctio->u.status1.sense_data, 0,
sizeof(ctio->u.status1.sense_data));
@@ -2077,7 +2258,6 @@ static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
uint32_t *cur_dsd;
- int sgc;
uint32_t transfer_length = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
@@ -2094,7 +2274,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t t16;
- sgc = 0;
ha = vha->hw;
pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
@@ -2184,7 +2363,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
- pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2200,9 +2379,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set transfer direction */
if (cmd->dma_data_direction == DMA_TO_DEVICE)
- pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+ pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
- pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+ pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
pkt->dseg_count = prm->tot_dsds;
@@ -2254,11 +2433,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
- crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Walks data segments */
- pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
@@ -2301,6 +2480,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
unsigned long flags = 0;
int res;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ if (cmd->sess->logout_completed)
+ /* no need to terminate. FW already freed exchange. */
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
memset(&prm, 0, sizeof(prm));
qlt_check_srr_debug(cmd, &xmit_type);
@@ -2313,9 +2505,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
if (unlikely(res != 0)) {
- if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
- return 0;
-
return res;
}
@@ -2345,15 +2534,16 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_build_ctio_crc2_pkt(&prm, vha);
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += full_req_cnt;
goto out_unmap_unlock;
-
+ }
pkt = (struct ctio7_to_24xx *)prm.pkt;
if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
pkt->u.status0.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ cpu_to_le16(CTIO7_FLAGS_DATA_IN |
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2365,11 +2555,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cpu_to_le16(prm.rq_result);
pkt->u.status0.residual =
cpu_to_le32(prm.residual);
- pkt->u.status0.flags |= __constant_cpu_to_le16(
+ pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(ha, cmd, 0)) {
pkt->u.status0.flags |=
- __constant_cpu_to_le16(
+ cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
@@ -2397,12 +2587,12 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
ctio->entry_count = 1;
ctio->entry_type = CTIO_TYPE7;
ctio->dseg_count = 0;
- ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ ctio->u.status1.flags &= ~cpu_to_le16(
CTIO7_FLAGS_DATA_IN);
/* Real finish is ctio_m1's finish */
pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
- pkt->u.status0.flags |= __constant_cpu_to_le16(
+ pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_DONT_RET_CTIO);
/* qlt_24xx_init_ctio_to_isp will correct
@@ -2461,7 +2651,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+ (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
/*
* Either a chip reset is active or this request was from
* previous life, just abort the processing.
@@ -2485,10 +2676,13 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += prm.req_cnt;
goto out_unlock_free_unmap;
+ }
+
pkt = (struct ctio7_to_24xx *)prm.pkt;
- pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2563,7 +2757,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* Update protection tag */
if (cmd->prot_sg_cnt) {
- uint32_t i, j = 0, k = 0, num_ent;
+ uint32_t i, k = 0, num_ent;
struct scatterlist *sg, *sgl;
@@ -2576,7 +2770,6 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
k += num_ent;
continue;
}
- j = blocks_done - k - 1;
k = blocks_done;
break;
}
@@ -2651,6 +2844,89 @@ out:
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy)
+{
+ struct nack_to_isp *nack;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+ "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe080,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+
+ /* terminate */
+ nack->u.isp24.flags |=
+ __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+ unsigned long flags = 0;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+ goto done;
+ }
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
@@ -2687,14 +2963,14 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
- ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_TERMINATE);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -2715,7 +2991,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
- unsigned long flags;
+ unsigned long flags = 0;
int rc;
if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3007,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
rc = __qlt_send_term_exchange(vha, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
done:
if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
!cmd->cmd_sent_to_fw)) {
- if (!ha_locked && !in_interrupt())
- msleep(250); /* just in case */
-
- qlt_unmap_sg(vha, cmd);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
}
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
return;
}
@@ -2792,6 +3069,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
}
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange for aborted cmd=%p "
+ "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+ se_cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
@@ -2915,7 +3210,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
- __constant_cpu_to_le16(OF_TERM_EXCH));
+ cpu_to_le16(OF_TERM_EXCH));
} else
term = 1;
@@ -3015,7 +3310,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
dump_stack();
}
- cmd->cmd_flags |= BIT_12;
+ cmd->cmd_flags |= BIT_17;
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3063,7 +3358,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
- const struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
@@ -3081,7 +3375,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
se_cmd = &cmd->se_cmd;
- tfo = se_cmd->se_tfo;
cmd->cmd_sent_to_fw = 0;
qlt_unmap_sg(vha, cmd);
@@ -3177,23 +3470,21 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ;
+ cmd->cmd_flags |= BIT_12;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- int rx_status = 0;
-
cmd->state = QLA_TGT_STATE_DATA_IN;
- if (unlikely(status != CTIO_SUCCESS))
- rx_status = -EIO;
- else
+ if (status == CTIO_SUCCESS)
cmd->write_data_transferred = 1;
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ cmd->cmd_flags |= BIT_18;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
+ cmd->cmd_flags |= BIT_19;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3496,6 @@ skip_term:
dump_stack();
}
-
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3263,6 +3553,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
if (tgt->tgt_stop)
goto out_term;
+ if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+ "cmd with tag %u is aborted\n",
+ cmd->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3613,12 @@ out_term:
static void qlt_do_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&cmd->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
__qlt_do_work(cmd);
}
@@ -3345,6 +3648,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
return cmd;
}
@@ -3362,14 +3670,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&op->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ if (op->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+ "sess_op with tag %u is aborted\n",
+ op->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (op->atio.u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
+ "Dropping multy entry atio %p\n", &op->atio);
goto out_term;
}
@@ -3434,10 +3753,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
memcpy(&op->atio, atio, sizeof(*atio));
op->vha = vha;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
}
+
+ /* Another WWN used to have our s_id. Our PLOGI scheduled its
+ * session deletion, but it's still in sess_del_work wq */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ ql_dbg(ql_dbg_io, vha, 0x3061,
+ "New command while old session %p is being deleted\n",
+ sess);
+ return -EFAULT;
+ }
+
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
@@ -3451,13 +3785,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- cmd->cmd_flags = 0;
- cmd->jiffies_at_alloc = get_jiffies_64();
-
- cmd->reset_count = vha->hw->chip_reset;
-
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3471,6 +3805,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res;
uint8_t tmr_func;
@@ -3511,6 +3846,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
"qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
tmr_func = TMR_LUN_RESET;
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
break;
case QLA_TGT_CLEAR_TS:
@@ -3580,12 +3916,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct qla_tgt *tgt;
struct qla_tgt_sess *sess;
uint32_t lun, unpacked_lun;
- int lun_size, fn;
+ int fn;
tgt = vha->vha_tgt.qla_tgt;
lun = a->u.isp24.fcp_cmnd.lun;
- lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
a->u.isp24.fcp_hdr.s_id);
@@ -3599,6 +3934,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
sizeof(struct atio_from_isp));
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+ return -EFAULT;
+
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}
@@ -3664,22 +4002,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
return __qlt_abort_task(vha, iocb, sess);
}
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+ if (fcport->tgt_session) {
+ if (rc != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+ "%s: se_sess %p / sess %p from"
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+ " LOGO failed: %#x\n",
+ __func__,
+ fcport->tgt_session->se_sess,
+ fcport->tgt_session,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, rc);
+ }
+
+ fcport->tgt_session->logout_completed = 1;
+ }
+}
+
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
+ struct imm_ntfy_from_isp *b)
+{
+ struct imm_ntfy_from_isp tmp;
+ memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
+ memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
+ memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+static struct qla_tgt_sess *
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
+ port_id_t port_id, uint16_t loop_id)
+{
+ struct qla_tgt_sess *sess = NULL, *other_sess;
+ uint64_t other_wwn;
+
+ list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+
+ other_wwn = wwn_to_u64(other_sess->port_name);
+
+ if (wwn == other_wwn) {
+ WARN_ON(sess);
+ sess = other_sess;
+ continue;
+ }
+
+ /* find other sess with nport_id collision */
+ if (port_id.b24 == other_sess->s_id.b24) {
+ if (loop_id != other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /*
+ * logout_on_delete is set by default, but another
+ * session that has the same s_id/loop_id combo
+ * might have cleared it when requested this session
+ * deletion, so don't touch it
+ */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ } else {
+ /*
+ * Another wwn used to have our s_id/loop_id
+ * combo - kill the session, but don't log out
+ */
+ sess->logout_on_delete = 0;
+ qlt_schedule_sess_for_deletion(other_sess,
+ true);
+ }
+ continue;
+ }
+
+ /* find other sess with nport handle collision */
+ if (loop_id == other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /* Same loop_id but different s_id
+ * Ok to kill and logout */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ }
+ }
+
+ return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+ int count = 0;
+
+ key = (((u32)s_id->b.domain << 16) |
+ ((u32)s_id->b.area << 8) |
+ ((u32)s_id->b.al_pa));
+
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ if (op_key == key) {
+ op->aborted = true;
+ count++;
+ }
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ if (cmd_key == key) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ count++;
+ }
+ }
+ spin_unlock(&vha->cmd_list_lock);
+
+ return count;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id;
+ uint16_t wd3_lo;
int res = 0;
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
"qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ /* res = 1 means ack at the end of thread
+ * res = 0 means ack async/later.
+ */
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
- case ELS_FLOGI:
+
+ /* Mark all stale commands in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn,
+ port_id, loop_id);
+
+ if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+ res = 1;
+ break;
+ }
+
+ if (sess->plogi_ack_needed) {
+ /*
+ * Initiator sent another PLOGI before last PLOGI could
+ * finish. Swap plogi iocbs and terminate old one
+ * without acking, new one will get acked when session
+ * deletion completes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf094,
+ "sess %p received double plogi.\n", sess);
+
+ qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+
+ res = 0;
+ break;
+ }
+
+ res = 0;
+
+ /*
+ * Save immediate Notif IOCB for Ack when sess is done
+ * and being deleted.
+ */
+ memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
+ sess->plogi_ack_needed = 1;
+
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->s_id.b24 == port_id.b24));
+ qlt_schedule_sess_for_deletion(sess, true);
+ break;
+
case ELS_PRLI:
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
+ loop_id);
+
+ if (sess != NULL) {
+ if (sess->deleted) {
+ /*
+ * Impatient initiator sent PRLI before last
+ * PLOGI could finish. Will force him to re-try,
+ * while last one finishes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf095,
+ "sess %p PRLI received, before plogi ack.\n",
+ sess);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
+
+ /*
+ * This shouldn't happen under normal circumstances,
+ * since we have deleted the old session during PLOGI
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+ "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+ sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->s_id = port_id;
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ }
+ res = 1; /* send notify ack */
+
+ /* Make session global (not used in fabric mode) */
+ if (ha->current_topology != ISP_CFG_F) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* todo: else - create sess here. */
+ res = 1; /* send notify ack */
+ }
+
+ break;
+
case ELS_LOGO:
case ELS_PRLO:
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4293,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ case ELS_FLOGI: /* should never happen */
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
"qla_target(%d): Unsupported ELS command %x "
@@ -3968,16 +4565,20 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
+#ifndef __CHECKER__
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
NOTIFY_ACK_SRR_FLAGS_REJECT,
NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+#ifndef __CHECKER__
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#endif
kfree(imm);
}
@@ -4321,14 +4922,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
ctio24->nport_handle = sess->loop_id;
- ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(
+ cpu_to_le16(
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
CTIO7_FLAGS_DONT_RET_CTIO);
/*
@@ -4656,7 +5257,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
if (atio->u.isp2x.status !=
- __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
"qla_target(%d): ATIO with error "
"status %x received\n", vha->vp_idx,
@@ -4730,7 +5331,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
le16_to_cpu(entry->u.isp2x.status));
tgt->notify_ack_expected--;
if (entry->u.isp2x.status !=
- __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe061,
"qla_target(%d): NOTIFY_ACK "
"failed %x\n", vha->vp_idx,
@@ -5012,6 +5613,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
@@ -5044,7 +5650,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
uint32_t lun, unpacked_lun;
- int lun_size, fn;
+ int fn;
void *iocb;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5066,12 +5672,16 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
iocb = a;
lun = a->u.isp24.fcp_cmnd.lun;
- lun_size = sizeof(lun);
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
@@ -5552,6 +6162,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
/* Adjust ring index */
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
}
void
@@ -5594,19 +6205,19 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0xFFFF);
/* Enable target mode */
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
if (ql2xtgt_tape_enable)
/* Enable FC Tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -5615,9 +6226,9 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
/* Disable Full Login after LIP */
- nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -5639,12 +6250,12 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_8);
} else {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
- nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
}
}
@@ -5656,7 +6267,7 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
if (ha->tgt.node_name_set) {
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
- icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
}
@@ -5681,20 +6292,19 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0xFFFF);
/* Enable target mode */
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
- nv->firmware_options_1 |=
- __constant_cpu_to_le32(BIT_5);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
if (ql2xtgt_tape_enable)
/* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -5703,9 +6313,9 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
/* Disable Full Login after LIP */
- nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -5727,12 +6337,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_8);
} else {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
- nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
}
}
@@ -5747,7 +6357,7 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
if (ha->tgt.node_name_set) {
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
- icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
}
@@ -5793,7 +6403,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {