aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h273
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c723
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c735
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1754
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2499
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c437
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h211
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h93
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c975
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c205
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c185
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1010
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3380
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c286
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c185
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h6
32 files changed, 6683 insertions, 7012 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 092a971d066b..bbd1faf41e80 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
- lpfc_nvme.o lpfc_nvmet.o
+ lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2f8e6d0a926f..9ad233b40a9e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -48,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -71,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
-#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
- collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -408,6 +403,7 @@ struct lpfc_trunk_link {
link1,
link2,
link3;
+ u32 phy_lnk_speed;
};
/* Format of congestion module parameters */
@@ -496,52 +492,50 @@ struct lpfc_cgn_info {
__le32 cgn_alarm_hr[24];
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
- /* Start of congestion statistics */
- uint8_t cgn_stat_npm; /* Notifications per minute */
-
- /* Start Time */
- uint8_t cgn_stat_month;
- uint8_t cgn_stat_day;
- uint8_t cgn_stat_year;
- uint8_t cgn_stat_hour;
- uint8_t cgn_stat_minute;
- uint8_t cgn_pad2[2];
-
- __le32 cgn_notification;
- __le32 cgn_peer_notification;
- __le32 link_integ_notification;
- __le32 delivery_notification;
-
- uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
- uint8_t cgn_stat_cgn_day;
- uint8_t cgn_stat_cgn_year;
- uint8_t cgn_stat_cgn_hour;
- uint8_t cgn_stat_cgn_min;
- uint8_t cgn_stat_cgn_sec;
-
- uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
- uint8_t cgn_stat_peer_day;
- uint8_t cgn_stat_peer_year;
- uint8_t cgn_stat_peer_hour;
- uint8_t cgn_stat_peer_min;
- uint8_t cgn_stat_peer_sec;
-
- uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
- uint8_t cgn_stat_lnk_day;
- uint8_t cgn_stat_lnk_year;
- uint8_t cgn_stat_lnk_hour;
- uint8_t cgn_stat_lnk_min;
- uint8_t cgn_stat_lnk_sec;
-
- uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
- uint8_t cgn_stat_del_day;
- uint8_t cgn_stat_del_year;
- uint8_t cgn_stat_del_hour;
- uint8_t cgn_stat_del_min;
- uint8_t cgn_stat_del_sec;
-#define LPFC_CGN_STAT_SIZE 48
-#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
- LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+ struct_group(cgn_stat,
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+ );
__le32 cgn_info_crc;
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
@@ -594,6 +588,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -612,6 +607,7 @@ struct lpfc_vport {
#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
+#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes;
@@ -669,8 +665,6 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- int unreg_vpi_cmpl;
-
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
@@ -716,6 +710,7 @@ struct lpfc_vport {
#define LPFC_VMID_QFPA_CMPL 0x4
#define LPFC_VMID_QOS_ENABLED 0x8
#define LPFC_VMID_TIMER_ENBLD 0x10
+#define LPFC_VMID_TYPE_PRIO 0x20
struct fc_qfpa_res *qfpa_res;
struct fc_vport *fc_vport;
@@ -736,14 +731,11 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
-#define STATIC_VPORT 1
-#define FAWWPN_SET 2
-#define FAWWPN_PARAM_CHG 4
+#define STATIC_VPORT 0x1
+#define FAWWPN_PARAM_CHG 0x2
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
@@ -900,6 +892,11 @@ enum lpfc_irq_chann_mode {
NHT_MODE,
};
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -920,6 +917,10 @@ struct lpfc_hba {
(struct lpfc_vport *vport,
struct lpfc_io_buf *lpfc_cmd,
uint8_t tmo);
+ int (*lpfc_scsi_prep_task_mgmt_cmd)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
@@ -931,8 +932,6 @@ struct lpfc_hba {
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
- IOCB_t * (*lpfc_get_iocb_from_iocbq)
- (struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
@@ -965,7 +964,24 @@ struct lpfc_hba {
int (*lpfc_bg_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_io_buf *);
- /* Add new entries here */
+
+ /* Prep SLI WQE/IOCB jump table entries */
+ void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd,
+ u8 tmo, u8 expect_rsp);
+ void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u32 num_entry, u8 tmo);
+ void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u16 ox_id, u32 num_entry, u8 rctl,
+ u8 last_seq, u8 cr_cx_cmd);
+ void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag,
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -1005,6 +1021,7 @@ struct lpfc_hba {
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
+#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -1023,15 +1040,13 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
+#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
@@ -1039,7 +1054,9 @@ struct lpfc_hba {
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
+#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */
+ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1139,8 +1156,6 @@ struct lpfc_hba {
uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
- uint64_t cfg_soft_wwnn;
- uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
@@ -1165,6 +1180,16 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -1186,9 +1211,6 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
@@ -1265,7 +1287,6 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
- uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
@@ -1331,7 +1352,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
@@ -1413,15 +1433,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
- /* data structure used for latency data collection */
-#define LPFC_NO_BUCKET 0
-#define LPFC_LINEAR_BUCKET 1
-#define LPFC_POWER2_BUCKET 2
- uint8_t bucket_type;
- uint32_t bucket_base;
- uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
@@ -1449,8 +1460,6 @@ struct lpfc_hba {
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
@@ -1545,16 +1554,13 @@ struct lpfc_hba {
/* cgn_reg_signal and cgn_init_reg_signal use
* enum fc_edc_cg_signal_cap_types
*/
- u16 cgn_fpin_frequency;
+ u16 cgn_fpin_frequency; /* In units of msecs */
#define LPFC_FPIN_INIT_FREQ 0xffff
u32 cgn_sig_freq;
u32 cgn_acqe_cnt;
/* RX monitor handling for CMF */
- struct rxtable_entry *rxtable; /* RX_monitor information */
- atomic_t rxtable_idx_head;
-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
- atomic_t rxtable_idx_tail;
+ struct lpfc_rx_info_monitor *rx_monitor;
atomic_t rx_max_read_cnt; /* Maximum read bytes */
uint64_t rx_block_cnt;
@@ -1591,10 +1597,11 @@ struct lpfc_hba {
char os_host_name[MAXHOSTNAMELEN];
- /* SCSI host template information - for physical port */
- struct scsi_host_template port_template;
- /* SCSI host template information - for all vports */
- struct scsi_host_template vport_template;
+ /* LD Signaling */
+ u32 degrade_activate_threshold;
+ u32 degrade_deactivate_threshold;
+ u32 fec_degrade_interval;
+
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;
@@ -1603,7 +1610,8 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
-struct rxtable_entry {
+struct rx_info_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
uint64_t avg_io_size;
@@ -1617,6 +1625,13 @@ struct rxtable_entry {
uint32_t timer_interval;
};
+struct lpfc_rx_info_monitor {
+ struct rx_info_entry *ring; /* info organized in a circular buffer */
+ u32 head_idx, tail_idx; /* index to head/tail of ring */
+ spinlock_t lock; /* spinlock for ring */
+ u32 entries; /* storing number entries/size of ring */
+};
+
static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport *vport)
{
@@ -1796,3 +1811,75 @@ static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
{
return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
}
+
+static inline
+u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
+ else
+ return iocbq->iocb.ulpStatus;
+}
+
+static inline
+u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.parameter;
+ else
+ return iocbq->iocb.un.ulpWord[4];
+}
+
+static inline
+u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpCommand;
+}
+
+static inline
+u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpContext;
+}
+
+static inline
+u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.unsli3.rcvsli3.ox_id;
+}
+
+static inline
+u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.total_data_placed;
+ else
+ return iocbq->iocb.un.genreq64.bdl.bdeSize;
+}
+
+static inline
+u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
+ else
+ return iocbq->iocb.un.acxri.abortIoTag;
+}
+
+static inline
+u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
+ else
+ return iocbq->iocb.un.elsreq64.remoteID;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dd4c51b6ef4e..ef1481326fd7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -922,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -1109,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -1120,12 +1098,22 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (vport->fc_flag & FC_FABRIC)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Fabric\n");
- else
+ if (vport->fc_flag & FC_FABRIC) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag &
+ LPFC_FAWWPN_FABRIC)
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric FA-PWWN\n");
+ else
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric\n");
+ } else {
len += scnprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
+ }
}
}
@@ -1315,6 +1303,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -1709,25 +1700,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
- /* Disable SR-IOV virtual functions if enabled */
- if (phba->cfg_sriov_nr_virtfn) {
- pci_disable_sriov(pdev);
- phba->cfg_sriov_nr_virtfn = 0;
- }
+ if (opcode == LPFC_FW_DUMP) {
+ init_completion(&online_compl);
+ phba->fw_dump_cmpl = &online_compl;
+ } else {
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
- if (opcode == LPFC_FW_DUMP)
- phba->hba_flag |= HBA_FW_DUMP_OP;
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (status != 0)
+ return status;
- if (status != 0) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return status;
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
}
- /* wait for the device to be quiesced before firmware reset */
- msleep(100);
-
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@@ -1756,24 +1747,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
-
- init_completion(&online_compl);
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (!job_posted)
+ if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
goto out;
+ }
- wait_for_completion(&online_compl);
+ /* Firmware dump will trigger an HA_ERATT event, and
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
+ * online.
+ */
+ if (opcode == LPFC_FW_DUMP) {
+ wait_for_completion(phba->fw_dump_cmpl);
+ } else {
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+ wait_for_completion(&online_compl);
+ }
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
+ /* If fw_dump was performed, first disable to clean up */
+ if (opcode == LPFC_FW_DUMP) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
@@ -2796,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -2817,7 +2825,6 @@ static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
-static char *lpfc_soft_wwn_key = "C99G71SL8032A";
#define WWN_SZ 8
/**
* lpfc_wwn_set - Convert string to the 8 byte WWN value.
@@ -2861,229 +2868,7 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
}
return 0;
}
-/**
- * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: containing the string lpfc_soft_wwn_key.
- * @count: must be size of lpfc_soft_wwn_key.
- *
- * Returns:
- * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
- * length of buf indicates success
- **/
-static ssize_t
-lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- unsigned int cnt = count;
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
- /*
- * We're doing a simple sanity check for soft_wwpn setting.
- * We require that the user write a specific key to enable
- * the soft_wwpn attribute to be settable. Once the attribute
- * is written, the enable key resets. If further updates are
- * desired, the key must be written again to re-enable the
- * attribute.
- *
- * The "key" is not secret - it is a hardcoded string shown
- * here. The intent is to protect against the random user or
- * application that is just writing attributes.
- */
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0051 lpfc soft wwpn can not be enabled: "
- "fawwpn is enabled\n");
- return -EINVAL;
- }
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if ((cnt != strlen(lpfc_soft_wwn_key)) ||
- (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
- return -EINVAL;
-
- phba->soft_wwn_enable = 1;
-
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- "lpfc%d: soft_wwpn assignment has been enabled.\n",
- phba->brd_no);
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- " The soft_wwpn feature is not supported by Broadcom.");
-
- return count;
-}
-static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
-
-/**
- * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwpn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwpn);
-}
-
-/**
- * lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the wwpn in hexadecimal.
- * @count: number of wwpn bytes in buf
- *
- * Returns:
- * -EACCES hba reset not enabled, adapter over temp
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
- * -EIO error taking adapter offline or online
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct completion online_compl;
- int stat1 = 0, stat2 = 0;
- unsigned int cnt = count;
- u8 wwpn[WWN_SZ];
- int rc;
-
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
- spin_lock_irq(&phba->hbalock);
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
- spin_unlock_irq(&phba->hbalock);
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- /* lock setting wwpn, wwnn down */
- phba->soft_wwn_enable = 0;
-
- rc = lpfc_wwn_set(buf, cnt, wwpn);
- if (rc) {
- /* not able to set wwpn, unlock it */
- phba->soft_wwn_enable = 1;
- return rc;
- }
-
- phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(shost) = phba->cfg_soft_wwpn;
- if (phba->cfg_soft_wwnn)
- fc_host_node_name(shost) = phba->cfg_soft_wwnn;
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
-
- stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (stat1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0463 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat1);
- init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
-
- wait_for_completion(&online_compl);
- if (stat2)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0464 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat2);
- return (stat1 || stat2) ? -EIO : count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwpn);
-
-/**
- * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwnn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwnn);
-}
-
-/**
- * lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the ww node name in hexadecimal.
- * @count: number of wwnn bytes in buf.
- *
- * Returns:
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int cnt = count;
- u8 wwnn[WWN_SZ];
- int rc;
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- rc = lpfc_wwn_set(buf, cnt, wwnn);
- if (rc) {
- /* Allow wwnn to be set many times, as long as the enable
- * is set. However, once the wwpn is set, everything locks.
- */
- return rc;
- }
-
- phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: soft_wwnn set. Value will take effect upon "
- "setting of the soft_wwpn\n", phba->brd_no);
-
- return count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
@@ -3960,8 +3745,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
@@ -4308,333 +4093,6 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
*/
static DEVICE_ATTR_RO(lpfc_static_vport);
-/**
- * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- * @count: Size of the data buffer.
- *
- * This function get called when a user write to the lpfc_stat_data_ctrl
- * sysfs file. This function parse the command written to the sysfs file
- * and take appropriate action. These commands are used for controlling
- * driver statistical data collection.
- * Following are the command this function handles.
- *
- * setbucket <bucket_type> <base> <step>
- * = Set the latency buckets.
- * destroybucket = destroy all the buckets.
- * start = start data collection
- * stop = stop data collection
- * reset = reset the collected data
- **/
-static ssize_t
-lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-#define LPFC_MAX_DATA_CTRL_LEN 1024
- static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
- unsigned long i;
- char *str_ptr, *token;
- struct lpfc_vport **vports;
- struct Scsi_Host *v_shost;
- char *bucket_type_str, *base_str, *step_str;
- unsigned long base, step, bucket_type;
-
- if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
- if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
- return -EINVAL;
-
- strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
- str_ptr = &bucket_data[0];
- /* Ignore this token - this is command token */
- token = strsep(&str_ptr, "\t ");
- if (!token)
- return -EINVAL;
-
- bucket_type_str = strsep(&str_ptr, "\t ");
- if (!bucket_type_str)
- return -EINVAL;
-
- if (!strncmp(bucket_type_str, "linear", strlen("linear")))
- bucket_type = LPFC_LINEAR_BUCKET;
- else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
- bucket_type = LPFC_POWER2_BUCKET;
- else
- return -EINVAL;
-
- base_str = strsep(&str_ptr, "\t ");
- if (!base_str)
- return -EINVAL;
- base = simple_strtoul(base_str, NULL, 0);
-
- step_str = strsep(&str_ptr, "\t ");
- if (!step_str)
- return -EINVAL;
- step = simple_strtoul(step_str, NULL, 0);
- if (!step)
- return -EINVAL;
-
- /* Block the data collection for every vport */
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(v_shost->host_lock);
- /* Block and reset data collection */
- vports[i]->stat_data_blocked = 1;
- if (vports[i]->stat_data_enabled)
- lpfc_vport_reset_stat_data(vports[i]);
- spin_unlock_irq(v_shost->host_lock);
- }
-
- /* Set the bucket attributes */
- phba->bucket_type = bucket_type;
- phba->bucket_base = base;
- phba->bucket_step = step;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
-
- /* Unblock data collection */
- spin_lock_irq(v_shost->host_lock);
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(v_shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->stat_data_blocked = 1;
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- phba->bucket_type = LPFC_NO_BUCKET;
- phba->bucket_base = 0;
- phba->bucket_step = 0;
- return strlen(buf);
- }
-
- if (!strncmp(buf, "start", strlen("start"))) {
- /* If no buckets configured return error */
- if (phba->bucket_type == LPFC_NO_BUCKET)
- return -EINVAL;
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_alloc_bucket(vport);
- vport->stat_data_enabled = 1;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "stop", strlen("stop"))) {
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled == 0) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "reset", strlen("reset"))) {
- if ((phba->bucket_type == LPFC_NO_BUCKET)
- || !vport->stat_data_enabled)
- return strlen(buf);
- spin_lock_irq(shost->host_lock);
- vport->stat_data_blocked = 1;
- lpfc_vport_reset_stat_data(vport);
- vport->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- return -EINVAL;
-}
-
-
-/**
- * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- *
- * This function is the read call back function for
- * lpfc_stat_data_ctrl sysfs file. This function report the
- * current statistical data collection state.
- **/
-static ssize_t
-lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int index = 0;
- int i;
- char *bucket_type;
- unsigned long bucket_value;
-
- switch (phba->bucket_type) {
- case LPFC_LINEAR_BUCKET:
- bucket_type = "linear";
- break;
- case LPFC_POWER2_BUCKET:
- bucket_type = "power2";
- break;
- default:
- bucket_type = "No Bucket";
- break;
- }
-
- sprintf(&buf[index], "Statistical Data enabled :%d, "
- "blocked :%d, Bucket type :%s, Bucket base :%d,"
- " Bucket step :%d\nLatency Ranges :",
- vport->stat_data_enabled, vport->stat_data_blocked,
- bucket_type, phba->bucket_base, phba->bucket_step);
- index = strlen(buf);
- if (phba->bucket_type != LPFC_NO_BUCKET) {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- if (phba->bucket_type == LPFC_LINEAR_BUCKET)
- bucket_value = phba->bucket_base +
- phba->bucket_step * i;
- else
- bucket_value = phba->bucket_base +
- (1 << i) * phba->bucket_step;
-
- if (index + 10 > PAGE_SIZE)
- break;
- sprintf(&buf[index], "%08ld ", bucket_value);
- index = strlen(buf);
- }
- }
- sprintf(&buf[index], "\n");
- return strlen(buf);
-}
-
-/*
- * Sysfs attribute to control the statistical data collection.
- */
-static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
-
-/*
- * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
- */
-
-/*
- * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
- * for each target.
- */
-#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
-#define MAX_STAT_DATA_SIZE_PER_TARGET \
- STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
-
-
-/**
- * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
- * @filp: sysfs file
- * @kobj: Pointer to the kernel object
- * @bin_attr: Attribute object
- * @buf: Buffer pointer
- * @off: File offset
- * @count: Buffer size
- *
- * This function is the read call back function for lpfc_drvr_stat_data
- * sysfs file. This function export the statistical data to user
- * applications.
- **/
-static ssize_t
-sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device,
- kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int i = 0, index = 0;
- unsigned long nport_index;
- struct lpfc_nodelist *ndlp = NULL;
- nport_index = (unsigned long)off /
- MAX_STAT_DATA_SIZE_PER_TARGET;
-
- if (!vport->stat_data_enabled || vport->stat_data_blocked
- || (phba->bucket_type == LPFC_NO_BUCKET))
- return 0;
-
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!ndlp->lat_data)
- continue;
-
- if (nport_index > 0) {
- nport_index--;
- continue;
- }
-
- if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
- > count)
- break;
-
- if (!ndlp->lat_data)
- continue;
-
- /* Print the WWN */
- sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
-
- index = strlen(buf);
-
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- sprintf(&buf[index], "%010u,",
- ndlp->lat_data[i].cmd_count);
- index = strlen(buf);
- }
- sprintf(&buf[index], "\n");
- index = strlen(buf);
- }
- spin_unlock_irq(shost->host_lock);
- return index;
-}
-
-static struct bin_attribute sysfs_drvr_stat_data_attr = {
- .attr = {
- .name = "lpfc_drvr_stat_data",
- .mode = S_IRUSR,
- },
- .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
- .read = sysfs_drvr_stat_data_read,
- .write = NULL,
-};
-
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -6412,7 +5870,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version.attr,
&dev_attr_link_state.attr,
&dev_attr_num_discovered_ports.attr,
- &dev_attr_menlo_mgmt_mode.attr,
&dev_attr_lpfc_drvr_version.attr,
&dev_attr_lpfc_enable_fip.attr,
&dev_attr_lpfc_temp_sensor.attr,
@@ -6477,9 +5934,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_nvme_enable_fb.attr,
&dev_attr_lpfc_nvmet_fb_size.attr,
&dev_attr_lpfc_enable_bg.attr,
- &dev_attr_lpfc_soft_wwnn.attr,
- &dev_attr_lpfc_soft_wwpn.attr,
- &dev_attr_lpfc_soft_wwn_enable.attr,
&dev_attr_lpfc_enable_hba_reset.attr,
&dev_attr_lpfc_enable_hba_heartbeat.attr,
&dev_attr_lpfc_EnableXLane.attr,
@@ -6492,7 +5946,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_xlane_priority.attr,
&dev_attr_lpfc_sg_seg_cnt.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_aer_support.attr,
&dev_attr_lpfc_aer_state_cleanup.attr,
&dev_attr_lpfc_sriov_nr_virtfn.attr,
@@ -6551,7 +6004,6 @@ static struct attribute *lpfc_vport_attrs[] = {
&dev_attr_npiv_info.attr,
&dev_attr_lpfc_enable_da_id.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_static_vport.attr,
&dev_attr_cmf_info.attr,
NULL,
@@ -6764,17 +6216,14 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
-
/* Virtual ports do not need ctrl_reg and mbox */
- if (error || vport->port_type == LPFC_NPIV_PORT)
- goto out;
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return 0;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out_remove_stat_attr;
+ goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -6784,9 +6233,6 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
-out_remove_stat_attr:
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
out:
return error;
}
@@ -6799,8 +6245,7 @@ void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
+
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
@@ -7083,17 +6528,34 @@ lpfc_get_stats(struct Scsi_Host *shost)
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+ hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+
/*
- * The MBX_READ_STATUS returns tx_k_bytes which has to
- * converted to words
+ * The MBX_READ_STATUS returns tx_k_bytes which has to be
+ * converted to words.
+ *
+ * Check if extended byte flag is set, to know when to collect upper
+ * bits of 64 bit wide statistics counter.
*/
- hs->tx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
- * (uint64_t)256);
- hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
- hs->rx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
- * (uint64_t)256);
+ if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
+ hs->tx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.xmit_xkb &
+ RD_ST_XMIT_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.xmitByteCnt) *
+ (u64)256);
+ hs->rx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.rcv_xkb &
+ RD_ST_RCV_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.rcvByteCnt) *
+ (u64)256);
+ } else {
+ hs->tx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+ * (uint64_t)256);
+ hs->rx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+ * (uint64_t)256);
+ }
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
@@ -7574,7 +7036,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
@@ -7709,8 +7170,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
- phba->cfg_soft_wwnn = 0L;
- phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_aer_support_init(phba, lpfc_aer_support);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fdf08cb57207..852b025e2fec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -303,15 +294,14 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
- unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -325,22 +315,24 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->cmdiocbq->context_un.ndlp;
+ ndlp = iocb->cmdiocbq->ndlp;
rmp = iocb->rmp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
/* Copy the completed data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -355,10 +347,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ total_data_placed, 0);
}
}
@@ -388,22 +379,21 @@ static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
- uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
- IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
+ int request_nseg, reply_nseg;
+ u32 num_entry;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
+ u16 ulp_context;
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
@@ -426,8 +416,6 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto free_dd;
}
- cmd = &cmdiocbq->iocb;
-
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
@@ -461,34 +449,24 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto free_cmp;
}
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = ndlp->nlp_rpi;
+ num_entry = request_nseg + reply_nseg;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- cmd->ulpOwner = OWN_CHIP;
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
+
+ lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
+ phba->fc_ratov * 2);
+
+ cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
- cmdiocbq->context3 = bmp;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- timeout = phba->fc_ratov * 2;
- cmd->ulpTimeout = timeout;
+ cmdiocbq->cmd_dmabuf = cmp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context_un.dd_data = dd_data;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
@@ -506,8 +484,8 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
- if (!cmdiocbq->context_un.ndlp) {
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp) {
rc = -ENODEV;
goto free_rmp;
}
@@ -516,9 +494,9 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -575,7 +553,6 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
@@ -583,10 +560,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
ndlp = dd_data->context_un.iocb.ndlp;
- cmdiocbq->context1 = ndlp;
+ cmdiocbq->ndlp = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -600,11 +578,13 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
- rsp = &rspiocbq->iocb;
- pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
+ pcmd = cmdiocbq->cmd_dmabuf;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
@@ -612,24 +592,28 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
*/
if (job) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ if (ulp_status == IOSTAT_SUCCESS) {
+ rsp_size = total_data_placed;
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ } else if (ulp_status == IOSTAT_LS_RJT) {
bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ rjt_data = (uint8_t *)&ulp_word4;
els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT) {
+ rc = -ETIMEDOUT;
} else {
rc = -EIO;
}
@@ -706,7 +690,6 @@ lpfc_bsg_rport_els(struct bsg_job *job)
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
-
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
@@ -717,19 +700,20 @@ lpfc_bsg_rport_els(struct bsg_job *job)
/* Transfer the request payload to allocated command dma buffer */
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
- ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdiocbq->cmd_dmabuf->virt,
cmdsize);
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
- cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
+ phba->sli4_hba.rpi_ids[rpi]);
else
cmdiocbq->iocb.ulpContext = rpi;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context_un.dd_data = dd_data;
+ cmdiocbq->ndlp = ndlp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -747,19 +731,13 @@ lpfc_bsg_rport_els(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- cmdiocbq->context1 = lpfc_nlp_get(ndlp);
- if (!cmdiocbq->context1) {
- rc = -EIO;
- goto linkdown_err;
- }
-
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -916,20 +894,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
+ IOCB_t *iocb = NULL;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
- struct lpfc_hbq_entry *hbqe;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
struct lpfc_sli_ct_request *ct_req;
struct bsg_job *job = NULL;
struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
+ u32 bde_count = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
@@ -959,12 +938,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
- evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
+ else
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ iocb = &iocbq->iocb;
+ for (i = 0; i < iocb->ulpBdeCount;
+ i++)
evt_dat->len +=
- iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ iocb->un.cont64[i].tus.f.bdeSize;
}
}
@@ -984,22 +968,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- bdeBuf1 = iocbq->context2;
- bdeBuf2 = iocbq->context3;
+ bdeBuf1 = iocbq->cmd_dmabuf;
+ bdeBuf2 = iocbq->bpl_dmabuf;
}
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocbq->wcqe_cmpl.word3;
+ else
+ bde_count = iocbq->iocb.ulpBdeCount;
+ for (i = 0; i < bde_count; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.un.ulpWord[0];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.unsli3.
- sli3Words[4];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->unsol_rcv_len;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
@@ -1053,17 +1036,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_in_buf_free(phba,
dmabuf);
} else {
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
break;
}
}
@@ -1086,14 +1069,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
- piocbq->iocb.ulpContext;
+ get_job_ulpcontext(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.unsli3.rcvsli3.ox_id;
+ get_job_rcvoxid(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].SID =
- piocbq->iocb.un.rcvels.remoteID;
+ bf_get(wqe_els_did,
+ &piocbq->wqe.xmit_els_rsp.wqe_dest);
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
- evt_dat->immed_dat = piocbq->iocb.ulpContext;
+ evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
@@ -1376,13 +1360,13 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ u32 ulp_status, ulp_word4;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1395,21 +1379,23 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
/* Copy the completed job data or set the error status */
if (job) {
bsg_reply = job->reply;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1459,13 +1445,13 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
- IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
+ u16 ulp_context, iotag;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
@@ -1492,70 +1478,44 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
goto no_ctiocb;
}
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- /* Fill in rest of iocb */
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].rxid;
- icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
- ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
- if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2721 ndlp null for oxid %x SID %x\n",
- icmd->ulpContext,
- phba->ct_ctx[tag].SID);
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
- /* get a refernece count so the ndlp doesn't go away while
- * we respond
- */
- if (!lpfc_nlp_get(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ phba->ct_ctx[tag].oxid, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
- } else
- icmd->ulpContext = (ushort) tag;
+ iotag = get_wqe_reqtag(ctiocb);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ ctiocb->num_bdes = num_entry;
+ iotag = ctiocb->iocb.ulpIoTag;
+ }
- icmd->ulpTimeout = phba->fc_ratov * 2;
+ ulp_context = get_job_ulpcontext(phba, ctiocb);
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ ulp_context, iotag, tag, phba->link_state);
- ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
- ctiocb->context1 = dd_data;
- ctiocb->context2 = cmp;
- ctiocb->context3 = bmp;
- ctiocb->context_un.ndlp = ndlp;
- ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->context_un.dd_data = dd_data;
+ ctiocb->cmd_dmabuf = cmp;
+ ctiocb->bpl_dmabuf = bmp;
+ ctiocb->ndlp = ndlp;
+ ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
@@ -1582,9 +1542,9 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -2017,8 +1977,6 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
@@ -2028,8 +1986,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
@@ -2625,7 +2582,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
*
* This function obtains the transmit and receive ids required to send
* an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
- * flags are used to the unsolicted response handler is able to process
+ * flags are used to the unsolicited response handler is able to process
* the ct command sent on the same port.
**/
static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
@@ -2633,7 +2590,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
@@ -2641,6 +2597,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
int time_left;
int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
+ u32 status;
*txxri = 0;
*rxxri = 0;
@@ -2684,9 +2641,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
goto err_get_xri_exit;
}
- cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
-
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
@@ -2696,36 +2650,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
-
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
-
- cmd->un.xseq64.w5.hcsw.Fctl = LA;
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = rpi;
-
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->bpl_dmabuf = dmabuf;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+ cmdiocbq->cmd_cmpl = NULL;
+
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
+ FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq,
- (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ status = get_job_ulpstatus(phba, rspiocbq);
+ if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
ret_val = -EIO;
goto err_get_xri_exit;
}
- *txxri = rsp->ulpContext;
+ *txxri = get_job_ulpcontext(phba, rspiocbq);
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
@@ -2926,16 +2868,16 @@ out:
}
/**
- * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
* This function allocates and posts a data buffer of sufficient size to receive
- * an unsolicted CT command.
+ * an unsolicited CT command.
**/
-static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
- size_t len)
+static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -2972,7 +2914,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
-
cmd = &cmdiocbq->iocb;
i = 0;
@@ -3040,7 +2981,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
-
cmd = &cmdiocbq->iocb;
i = 0;
}
@@ -3092,7 +3032,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
- IOCB_t *cmd, *rsp = NULL;
+ union lpfc_wqe128 *cmdwqe, *rspwqe;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
@@ -3185,7 +3125,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto loopback_test_exit;
}
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
@@ -3228,9 +3168,12 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto err_loopback_test_exit;
}
- cmd = &cmdiocbq->iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
- rsp = &rspiocbq->iocb;
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+ memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ }
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -3262,41 +3205,32 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
-
- cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
+ cmdiocbq->num_bdes = num_bde;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+ cmdiocbq->bpl_dmabuf = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
- cmd->ulpContext = txxri;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
+ num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+
} else {
- cmd->un.xseq64.bdl.ulpIoTag32 = 0;
- cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
- cmdiocbq->context3 = txbmp;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
+ phba->sli4_hba.rpi_ids[rpi], 0xffff,
+ full_size, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
cmdiocbq->sli4_xritag = NO_XRI;
- cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
- cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
-
- if ((iocb_stat != IOCB_SUCCESS) ||
- ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+ if (iocb_stat != IOCB_SUCCESS ||
+ (phba->sli_rev < LPFC_SLI_REV4 &&
+ (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
@@ -3429,7 +3363,7 @@ job_error:
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
- * will wake up thread waiting on the wait queue pointed by context1
+ * will wake up thread waiting on the wait queue pointed by dd_data
* of the mailbox.
**/
static void
@@ -3556,15 +3490,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -5046,283 +4971,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context1;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len < sizeof(*bsg_reply) +
- sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5877,10 +5525,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -6001,7 +5645,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
- if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
@@ -6033,31 +5677,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 749d6c43cfce..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -131,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 89e36bf14d8f..d2d207791056 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -32,7 +32,9 @@ int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-
+int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox);
+void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -76,6 +78,7 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+int lpfc_read_lds_params(struct lpfc_hba *phba);
uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
void lpfc_cmf_signal_init(struct lpfc_hba *phba);
void lpfc_cmf_start(struct lpfc_hba *phba);
@@ -90,6 +93,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
void lpfc_unblock_requests(struct lpfc_hba *phba);
void lpfc_block_requests(struct lpfc_hba *phba);
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries);
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry);
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -129,6 +140,7 @@ void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd);
void lpfc_disc_timeout(struct timer_list *);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
@@ -190,6 +202,7 @@ void lpfc_els_timeout_handler(struct lpfc_vport *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
uint32_t, uint32_t);
+void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -211,7 +224,7 @@ int lpfc_sli4_refresh_params(struct lpfc_hba *phba);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -351,6 +364,22 @@ int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb, void *cmpl);
+void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp);
+void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry,
+ u8 tmo);
+void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq,
+ u8 cr_cx_cmd);
+void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -400,8 +429,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
-void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
- struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -414,6 +441,7 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+void lpfc_setup_fdmi_mask(struct lpfc_vport *vport);
int lpfc_link_reset(struct lpfc_vport *vport);
/* Function prototypes. */
@@ -435,6 +463,7 @@ extern const struct attribute_group *lpfc_hba_groups[];
extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_nvme;
+extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@@ -609,7 +638,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl);
+ struct lpfc_iocbq *rspiocb);
void lpfc_create_multixri_pools(struct lpfc_hba *phba);
void lpfc_create_destroy_pools(struct lpfc_hba *phba);
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
@@ -650,5 +679,11 @@ int lpfc_vmid_cmd(struct lpfc_vport *vport,
int lpfc_vmid_hash_fn(const char *vmid, int len);
struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index dfcb7d4bd7fa..e941a99aa965 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -87,12 +87,12 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0146 Ignoring unsolicited CT No HBQ "
"status = x%x\n",
- piocbq->iocb.ulpStatus);
+ get_job_ulpstatus(phba, piocbq));
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0145 Ignoring unsolicted CT HBQ Size:%d "
+ "0145 Ignoring unsolicited CT HBQ Size:%d "
"status = x%x\n",
- size, piocbq->iocb.ulpStatus);
+ size, get_job_ulpstatus(phba, piocbq));
}
static void
@@ -118,22 +118,22 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *mp, *bmp;
- ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ ndlp = cmdiocb->ndlp;
if (ndlp)
lpfc_nlp_put(ndlp);
- mp = cmdiocb->context2;
- bmp = cmdiocb->context3;
+ mp = cmdiocb->rsp_dmabuf;
+ bmp = cmdiocb->bpl_dmabuf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- cmdiocb->context2 = NULL;
+ cmdiocb->rsp_dmabuf = NULL;
}
if (bmp) {
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- cmdiocb->context3 = NULL;
+ cmdiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -143,7 +143,7 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
* @ndlp: pointer to a node-list data structure.
* @ct_req: pointer to the CT request data structure.
- * @rx_id: rx_id of the received UNSOL CT command
+ * @ulp_context: context of received UNSOL CT command
* @ox_id: ox_id of the UNSOL CT command
*
* This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
@@ -152,7 +152,7 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
static void
lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
struct lpfc_sli_ct_request *ct_req,
- u16 rx_id, u16 ox_id)
+ u16 ulp_context, u16 ox_id)
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
@@ -161,8 +161,8 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
struct lpfc_dmabuf *bmp = NULL;
struct lpfc_dmabuf *mp = NULL;
struct ulp_bde64 *bpl;
- IOCB_t *icmd;
u8 rc = 0;
+ u32 tmo;
/* fill in BDEs for command */
mp = kmalloc(sizeof(*mp), GFP_KERNEL);
@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
memset(bpl, 0, sizeof(struct ulp_bde64));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -220,43 +220,40 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
goto ct_free_bmpvirt;
}
- icmd = &cmdiocbq->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ }
/* Save for completion so we can release these resources */
- cmdiocbq->context1 = lpfc_nlp_get(ndlp);
- cmdiocbq->context2 = (uint8_t *)mp;
- cmdiocbq->context3 = (uint8_t *)bmp;
- cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
- icmd->ulpContext = rx_id; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = ox_id;
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- icmd->ulpTimeout = (3 * phba->fc_ratov);
+ cmdiocbq->rsp_dmabuf = mp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
+ tmo = (3 * phba->fc_ratov);
cmdiocbq->retry = 0;
cmdiocbq->vport = vport;
- cmdiocbq->context_un.ndlp = NULL;
- cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp)
+ goto ct_no_ndlp;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (!rc)
- return;
+ if (rc) {
+ lpfc_nlp_put(ndlp);
+ goto ct_no_ndlp;
+ }
+ return;
+ct_no_ndlp:
rc = 6;
- lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
ct_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -286,25 +283,17 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
{
struct lpfc_sli_ct_request *ct_req;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_vport *vport = NULL;
- IOCB_t *icmd = &ctiocbq->iocb;
- u32 mi_cmd, vpi;
- u32 did = 0;
-
- vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi;
- vport = lpfc_find_vport_by_vpid(phba, vpi);
- if (!vport) {
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "6437 Unsol CT: VPORT NULL vpi : x%x\n",
- vpi);
- return;
- }
-
- did = ctiocbq->iocb.un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ struct lpfc_vport *vport = ctiocbq->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
+ u32 ulp_word4 = get_job_word4(phba, ctiocbq);
+ u32 did;
+ u32 mi_cmd;
+
+ did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6438 Unsol CT: status:x%x/x%x did : x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
return;
}
@@ -320,15 +309,16 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
return;
}
- ct_req = ((struct lpfc_sli_ct_request *)
- (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
lpfc_ct_reject_event(ndlp, ct_req,
- ctiocbq->iocb.ulpContext,
- ctiocbq->iocb.unsli3.rcvsli3.ox_id);
+ bf_get(wqe_ctxt_tag,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com),
+ bf_get(wqe_rcvoxid,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com));
}
/**
@@ -351,55 +341,51 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocb;
dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
struct lpfc_sli_ct_request *ct_req;
- struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
-
- ctiocbq->context1 = NULL;
- ctiocbq->context2 = NULL;
- ctiocbq->context3 = NULL;
+ struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
+ u32 status, parameter, bde_count = 0;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+
+ ctiocbq->cmd_dmabuf = NULL;
+ ctiocbq->rsp_dmabuf = NULL;
+ ctiocbq->bpl_dmabuf = NULL;
+
+ wcqe_cmpl = &ctiocbq->wcqe_cmpl;
+ status = get_job_ulpstatus(phba, ctiocbq);
+ parameter = get_job_word4(phba, ctiocbq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = icmd->ulpBdeCount;
- if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ if (unlikely(status == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if ((status == IOSTAT_LOCAL_REJECT) &&
+ ((parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 2);
+ lpfc_sli3_post_buffer(phba, pring, 2);
return;
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- ctiocbq->context2 = bdeBuf1;
- if (icmd->ulpBdeCount == 2)
- ctiocbq->context3 = bdeBuf2;
- } else {
- dma_addr = getPaddr(icmd->un.cont64[0].addrHigh,
- icmd->un.cont64[0].addrLow);
- ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- dma_addr);
- if (icmd->ulpBdeCount == 2) {
- dma_addr = getPaddr(icmd->un.cont64[1].addrHigh,
- icmd->un.cont64[1].addrLow);
- ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba,
- pring,
- dma_addr);
- }
- }
+ ctiocbq->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ ctiocbq->bpl_dmabuf = bdeBuf2;
- ct_req = ((struct lpfc_sli_ct_request *)
- (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
@@ -412,19 +398,29 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
list_add_tail(&head, &ctiocbq->list);
- list_for_each_entry(iocbq, &head, list) {
- icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0)
+ list_for_each_entry(iocb, &head, list) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocb->wcqe_cmpl.word3;
+ else
+ bde_count = iocb->iocb.ulpBdeCount;
+
+ if (!bde_count)
continue;
- bdeBuf1 = iocbq->context2;
- iocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
+ bdeBuf1 = iocb->cmd_dmabuf;
+ iocb->cmd_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
+ else
+ size = iocb->iocb.un.cont64[0].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
lpfc_in_buf_free(phba, bdeBuf1);
- if (icmd->ulpBdeCount == 2) {
- bdeBuf2 = iocbq->context3;
- iocbq->context3 = NULL;
- size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ if (bde_count == 2) {
+ bdeBuf2 = iocb->bpl_dmabuf;
+ iocb->bpl_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->unsol_rcv_len;
+ else
+ size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
lpfc_in_buf_free(phba, bdeBuf2);
@@ -447,7 +443,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- lpfc_post_buffer(phba, pring, i);
+ lpfc_sli3_post_buffer(phba, pring, i);
}
list_del(&head);
}
@@ -551,24 +547,25 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
- /* I/O job is complete so context is now invalid*/
- ctiocb->context_un.ndlp = NULL;
- if (ctiocb->context1) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ /* IOCBQ job structure gets cleaned during release. Just release
+ * the dma buffers here.
+ */
+ if (ctiocb->cmd_dmabuf) {
+ buf_ptr = ctiocb->cmd_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context1 = NULL;
+ ctiocb->cmd_dmabuf = NULL;
}
- if (ctiocb->context2) {
- lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
- ctiocb->context2 = NULL;
+ if (ctiocb->rsp_dmabuf) {
+ lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
+ ctiocb->rsp_dmabuf = NULL;
}
- if (ctiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ if (ctiocb->bpl_dmabuf) {
+ buf_ptr = ctiocb->bpl_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context3 = NULL;
+ ctiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, ctiocb);
return 0;
@@ -588,15 +585,15 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
static int
lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
- void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *),
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
+ u16 ulp_context;
/* Allocate buffer for command iocb */
geniocb = lpfc_sli_get_iocbq(phba);
@@ -604,65 +601,45 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
if (geniocb == NULL)
return 1;
- icmd = &geniocb->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ /* Update the num_entry bde count */
+ geniocb->num_bdes = num_entry;
- geniocb->context3 = (uint8_t *) bmp;
+ geniocb->bpl_dmabuf = bmp;
/* Save for completion so we can release these resources */
- geniocb->context1 = (uint8_t *) inp;
- geniocb->context2 = (uint8_t *) outp;
+ geniocb->cmd_dmabuf = inp;
+ geniocb->rsp_dmabuf = outp;
geniocb->event_tag = event_tag;
- /* Fill in payload, bp points to frame payload */
- icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
-
- /* Fill in rest of iocb */
- icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
-
if (!tmo) {
/* FC spec states we need 3 * ratov for CT requests */
tmo = (3 * phba->fc_ratov);
}
- icmd->ulpTimeout = tmo;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- /* For GEN_REQUEST64_CR, use the RPI */
- icmd->ulpCt_h = 0;
- icmd->ulpCt_l = 0;
- }
+ lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n",
- ndlp->nlp_DID, icmd->ulpIoTag,
+ ndlp->nlp_DID, geniocb->iotag,
vport->port_state);
- geniocb->iocb_cmpl = cmpl;
- geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ geniocb->cmd_cmpl = cmpl;
+ geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
- geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
- if (!geniocb->context_un.ndlp)
+ geniocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!geniocb->ndlp)
goto out;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
- geniocb->context_un.ndlp = NULL;
lpfc_nlp_put(ndlp);
goto out;
}
@@ -938,26 +915,26 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc, type;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ ulp_status, ulp_word4, vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
@@ -981,11 +958,17 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0226 NS query failed due to link event\n");
+ "0226 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -1013,11 +996,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
@@ -1040,7 +1023,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0257 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *) inp->virt;
@@ -1054,12 +1037,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -1154,26 +1137,26 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- outp = (struct lpfc_dmabuf *)cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_PT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
@@ -1198,11 +1181,17 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4166 NS query failed due to link event\n");
+ "4166 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -1230,11 +1219,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
@@ -1253,7 +1242,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4103 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *)inp->virt;
@@ -1267,12 +1256,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -1367,20 +1356,21 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
@@ -1389,7 +1379,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto iocb_free;
}
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
@@ -1419,8 +1409,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] &
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 &
IOERR_PARAM_MASK)) {
case IOERR_NO_RESOURCES:
@@ -1446,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->retry, did);
if (rc == 0) {
/* success */
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
return;
@@ -1456,7 +1446,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
- did, irsp->ulpStatus, irsp->un.ulpWord[4],
+ did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
@@ -1503,7 +1493,7 @@ out:
}
iocb_free:
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
return;
@@ -1511,36 +1501,34 @@ iocb_free:
static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_nodelist *ns_ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
uint32_t fc4_data_0, fc4_data_1;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
- if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"9046 Event tag mismatch. Ignoring NS rsp\n");
goto out;
}
- /* Preserve the nameserver node to release the reference. */
- ns_ndlp = cmdiocb->context_un.ndlp;
-
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
@@ -1601,7 +1589,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+ "3065 GFT_ID failed x%08x\n", ulp_status);
out:
lpfc_ct_free_iocb(phba, cmdiocb);
@@ -1615,22 +1603,22 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int cmdcode, rc;
uint8_t retry;
uint32_t latt;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
CommandResponse.bits.CmdRsp);
@@ -1638,28 +1626,28 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
latt = lpfc_els_chk_latt(vport);
- /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0209 CT Request completes, latt %d, "
- "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
- latt, irsp->ulpStatus,
+ "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ latt, ulp_status,
CTrsp->CommandResponse.bits.CmdRsp,
- cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+ get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"CT cmd cmpl: status:x%x/x%x cmd:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+ ulp_status, ulp_word4, cmdcode);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0268 NS cmd x%x Error (x%x x%x)\n",
- cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+ cmdcode, ulp_status, ulp_word4);
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_DOWN) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_ABORTED)))
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
@@ -1684,15 +1672,15 @@ static void
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFT_ID;
@@ -1705,14 +1693,14 @@ static void
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1726,15 +1714,15 @@ static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RSPN_ID;
@@ -1747,14 +1735,14 @@ static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1780,15 +1768,15 @@ static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFF_ID;
@@ -1884,7 +1872,7 @@ lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
struct lpfc_dmabuf *mp;
uint32_t type;
- mp = cmdiocb->context1;
+ mp = cmdiocb->cmd_dmabuf;
if (mp == NULL)
return 0;
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
@@ -2037,28 +2025,30 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+ CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
+
+ /* Register Application Services type if vmid enabled. */
+ if (phba->cfg_vmid_app_header)
+ CtReq->un.rft.app_serv_reg =
+ cpu_to_be32(RFT_APP_SERV_REG);
/* Register FC4 FCP type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
- CtReq->un.rft.fcpReg = 1;
+ CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
- /* Register NVME type if enabled. Defined LE and swapped.
- * rsvd[0] is used as word1 because of the hard-coded
- * word0 usage in the ct_request data structure.
- */
+ /* Register NVME type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
- CtReq->un.rft.rsvd[0] =
- cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
+ CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
ptr = (uint32_t *)CtReq;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
- "%08x %08x %08x %08x\n",
- CtReq->un.rft.fcpReg ? "FCP" : " ",
- CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+ "6433 Issue RFT (%s %s %s): %08x %08x %08x "
+ "%08x %08x %08x %08x %08x\n",
+ CtReq->un.rft.fcp_reg ? "FCP" : " ",
+ CtReq->un.rft.nvme_reg ? "NVME" : " ",
+ CtReq->un.rft.app_serv_reg ? "APPS" : " ",
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
*(ptr + 4), *(ptr + 5),
*(ptr + 6), *(ptr + 7));
@@ -2175,6 +2165,41 @@ ns_cmd_exit:
}
/**
+ * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
+ * @phba: Pointer to HBA context object.
+ * @mask: Initial port attributes mask
+ *
+ * This function checks to see if any vports have deferred their FDMI RPRT.
+ * A vports RPRT may be deferred if it is issued before the primary ports
+ * RHBA completes.
+ */
+static void
+lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ int i;
+
+ phba->hba_flag |= HBA_RHBA_CMPL;
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ vport = vports[i];
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ continue;
+ if (vport->ct_flags & FC_CT_RPRT_DEFER) {
+ vport->ct_flags &= ~FC_CT_RPRT_DEFER;
+ vport->fdmi_port_mask = mask;
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to the command IOCBQ.
@@ -2188,26 +2213,27 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_dmabuf *inp = cmdiocb->context1;
- struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTcmd = inp->virt;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
latt = lpfc_els_chk_latt(vport);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+ ulp_status, ulp_word4, latt);
- if (latt || irsp->ulpStatus) {
+ if (latt || ulp_status) {
/* Look for a retryable error */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
@@ -2237,12 +2263,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d "
- "ulpStatus: x%x, rid x%x\n",
- be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "ulp_status: x%x, rid x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, ulp_status,
+ ulp_word4);
}
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -2254,15 +2280,19 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = be16_to_cpu(fdmi_cmd);
if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
"0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) {
case SLI_MGMT_RHBA:
if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
- /* Fallback to FDMI-1 */
+ /* Fallback to FDMI-1 for HBA attributes */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+
+ /* If HBA attributes are FDMI1, so should
+ * port attributes be for consistency.
+ */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
@@ -2270,6 +2300,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
@@ -2290,9 +2325,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->link_flag &= ~LS_CT_VEN_RPA;
if (phba->cmf_active_mode == LPFC_CFG_OFF)
return;
- lpfc_printf_log(phba, KERN_ERR,
+ lpfc_printf_log(phba, KERN_WARNING,
LOG_DISCOVERY | LOG_ELS,
- "6460 VEN FDMI RPA failure\n");
+ "6460 VEN FDMI RPA RJT\n");
return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
@@ -2319,6 +2354,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
switch (cmd) {
case SLI_MGMT_RHBA:
+ /* Check for any RPRTs deferred till after RHBA completes */
+ lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
+
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
break;
@@ -2327,10 +2365,26 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case SLI_MGMT_DPRT:
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
- else
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+
+ /* Only issue a RPRT for the vport if the RHBA
+ * for the physical port completes successfully.
+ * We may have to defer the RPRT accordingly.
+ */
+ if (phba->hba_flag & HBA_RHBA_CMPL) {
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6078 RPRT deferred\n");
+ vport->ct_flags |= FC_CT_RPRT_DEFER;
+ }
+ }
break;
case SLI_MGMT_RPA:
if (vport->port_type == LPFC_PHYSICAL_PORT &&
@@ -2345,7 +2399,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_CGN_MGMT,
"6210 Issue Vendor MI FDMI %x\n",
phba->sli4_hba.pc_sli4_params.mi_ver);
@@ -2414,6 +2469,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
phba->link_flag &= ~LS_CT_VEN_RPA;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
} else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
}
@@ -2435,939 +2493,625 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
} else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
LPFC_FDMI_PORT_ATTR_num_disc);
}
}
-/* Routines for all individual HBA attributes */
-static int
-lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_fdmi_attr_u32 *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ ae->value_u32 = cpu_to_be32(attrval);
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-static int
-lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+
+static inline int
+lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_wwn *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(ae->name, wwn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- /* This string MUST be consistent with other FC platforms
- * supported by Broadcom.
- */
- strncpy(ae->un.AttrString,
- "Emulex Corporation",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
+ struct lpfc_name *wwnn, struct lpfc_name *wwpn)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fullwwn *ae = attr;
+ u8 *nname = ae->nname;
+ u8 *pname = ae->pname;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(nname, wwnn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
+ memcpy(pname, wwpn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- strncpy(ae->un.AttrString, phba->SerialNumber,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_string *ae = attr;
+ int len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ /*
+ * We are trusting the caller that if a fdmi string field
+ * is capped at 64 bytes, the caller passes in a string of
+ * 64 bytes or less.
+ */
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ len = strnlen(ae->value_string, sizeof(ae->value_string));
+ /* round string length to a 32bit boundary. Ensure there's a NULL */
len += (len & 3) ? (4 - (len & 3)) : 4;
+ /* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL);
+
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
return size;
}
-static int
-lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+/* Bitfields for FC4 Types that can be reported */
+#define ATTR_FC4_CT 0x00000001
+#define ATTR_FC4_FCP 0x00000002
+#define ATTR_FC4_NVME 0x00000004
+
+static inline int
+lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fc4types *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
+ if (typemask & ATTR_FC4_FCP)
+ ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
+
+ if (typemask & ATTR_FC4_CT)
+ ae->value_types[7] = 0x01; /* Type 0x20 - CT */
+
+ if (typemask & ATTR_FC4_NVME)
+ ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
- strncpy(ae->un.AttrString, phba->ModelDesc,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
return size;
}
+/* Routines for all individual HBA attributes */
static int
-lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- lpfc_vpd_t *vp = &phba->vpd;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t i, j, incr, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- /* Convert JEDEC ID to ascii for hardware version */
- incr = vp->rev.biuRev;
- for (i = 0; i < 8; i++) {
- j = (incr & 0xf);
- if (j <= 9)
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
- else
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
- incr = (incr >> 4);
- }
- size = FOURBYTES + 8;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
+ "Emulex Corporation");
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
+{
+ struct lpfc_hba *phba = vport->phba;
- strncpy(ae->un.AttrString, lpfc_release_version,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
+ phba->SerialNumber);
}
static int
-lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- else
- strncpy(ae->un.AttrString, phba->OptionROMVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
+ phba->ModelDesc);
}
static int
-lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+ char buf[16] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
+ return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
+}
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
+ lpfc_release_version);
}
static int
-lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_decode_firmware_rev(phba, buf, 1);
- ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ buf);
+ }
+
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ phba->OptionROMVersion);
}
static int
-lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_decode_firmware_rev(phba, buf, 1);
- len = lpfc_vport_symbolic_node_name(vport,
- ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
+ snprintf(buf, sizeof(buf), "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
- /* Nothing is defined for this currently */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
- /* Each driver instance corresponds to a single port */
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
+ LPFC_MAX_CT_SIZE);
}
static int
-lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
- memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
}
static int
-lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
+{
+ /* Each driver instance corresponds to a single port */
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
+}
- strlcat(ae->un.AttrString, phba->BIOSVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
+ &vport->fabric_nodename);
}
static int
-lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
- ae = &ad->AttrValue;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
+ phba->BIOSVersion);
+}
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
+{
/* Driver doesn't have access to this information */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
}
static int
-lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "EMULEX",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
}
-/* Routines for all individual PORT attributes */
+/*
+ * Routines for all individual PORT attributes
+ */
+
static int
-lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if Firmware supports NVME and on physical port */
if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
phba->sli4_hba.pc_sli4_params.nvme)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ struct lpfc_hba *phba = vport->phba;
+ u32 speeds = 0;
+ u32 tcfg;
+ u8 i, cnt;
- ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- if (phba->lmt & LMT_256Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
- if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
- if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
- if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
- if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
- if (phba->lmt & LMT_10Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
- if (phba->lmt & LMT_8Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
- if (phba->lmt & LMT_4Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
- if (phba->lmt & LMT_2Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
- if (phba->lmt & LMT_1Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+ cnt = 0;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tcfg = phba->sli4_hba.conf_trunk;
+ for (i = 0; i < 4; i++, tcfg >>= 1)
+ if (tcfg & 1)
+ cnt++;
+ }
+
+ if (cnt > 2) { /* 4 lane trunk group */
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ } else if (cnt) { /* 2 lane trunk group */
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ } else {
+ if (phba->lmt & LMT_256Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_16GFC;
+ if (phba->lmt & LMT_10Gb)
+ speeds |= HBA_PORTSPEED_10GFC;
+ if (phba->lmt & LMT_8Gb)
+ speeds |= HBA_PORTSPEED_8GFC;
+ if (phba->lmt & LMT_4Gb)
+ speeds |= HBA_PORTSPEED_4GFC;
+ if (phba->lmt & LMT_2Gb)
+ speeds |= HBA_PORTSPEED_2GFC;
+ if (phba->lmt & LMT_1Gb)
+ speeds |= HBA_PORTSPEED_1GFC;
+ }
} else {
/* FCoE links support only one speed */
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ u32 speeds = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+ speeds = HBA_PORTSPEED_1GFC;
break;
case LPFC_LINK_SPEED_2GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+ speeds = HBA_PORTSPEED_2GFC;
break;
case LPFC_LINK_SPEED_4GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+ speeds = HBA_PORTSPEED_4GFC;
break;
case LPFC_LINK_SPEED_8GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+ speeds = HBA_PORTSPEED_8GFC;
break;
case LPFC_LINK_SPEED_10GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+ speeds = HBA_PORTSPEED_10GFC;
break;
case LPFC_LINK_SPEED_16GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+ speeds = HBA_PORTSPEED_16GFC;
break;
case LPFC_LINK_SPEED_32GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+ speeds = HBA_PORTSPEED_32GFC;
break;
case LPFC_LINK_SPEED_64GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_64GFC;
+ speeds = HBA_PORTSPEED_64GFC;
break;
case LPFC_LINK_SPEED_128GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ speeds = HBA_PORTSPEED_128GFC;
break;
case LPFC_LINK_SPEED_256GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ speeds = HBA_PORTSPEED_256GFC;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
} else {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
{
- struct serv_parm *hsp;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
- ae = &ad->AttrValue;
-
- hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) hsp->cmn.bbRcvSizeLsb;
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
+ (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t)hsp->cmn.bbRcvSizeLsb);
}
static int
-lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
+ shost->host_no);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
- "/sys/class/scsi_host/host%d", shost->host_no);
- len = strnlen((char *)ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
- scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- vport->phba->os_host_name);
-
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
- len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
}
static int
-lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
- ae = &ad->AttrValue;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
- else
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
+ LPFC_FDMI_PORTTYPE_NLPORT :
+ LPFC_FDMI_PORTTYPE_NPORT);
}
static int
-lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
+ FC_COS_CLASS2 | FC_COS_CLASS3);
}
static int
-lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
+ &vport->fabric_portname);
}
static int
-lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ struct lpfc_hba *phba = vport->phba;
+ u32 fc4types;
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if NVME is configured or not */
- if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ if (vport == phba->pport &&
+ phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- /* Link Up - operational */
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
+ LPFC_FDMI_PORTSTATE_ONLINE);
}
static int
-lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
- ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
+ vport->fdmi_num_disc);
}
static int
-lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
}
static int
-lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Initiator",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
+ "Smart SAN Initiator");
}
static int
-lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy((((uint8_t *)&ae->un.AttrString) +
- sizeof(struct lpfc_name)),
- &vport->fc_sparam.portName, sizeof(struct lpfc_name));
- size = FOURBYTES + (2 * sizeof(struct lpfc_name));
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
- return size;
+ return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
+ &vport->fc_sparam.nodeName,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
+ "Smart SAN Version 2.0");
}
static int
-lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
/* SRIOV (type 3) is not supported */
- if (vport->vpi)
- ae->un.AttrInt = cpu_to_be32(2); /* NPIV */
- else
- ae->un.AttrInt = cpu_to_be32(1); /* Physical */
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
+ (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */);
}
static int
-lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
}
static int
-lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
}
static int
-lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- char mibrevision[16];
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
- sprintf(mibrevision, "ELXE2EM:%04d",
- phba->sli4_hba.pc_sli4_params.mi_ver);
- strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
- return size;
+ char buf[32] = { 0 };
+
+ sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
}
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */
lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */
@@ -3391,7 +3135,7 @@ int (*lpfc_fdmi_hba_action[])
/* RPA / RPRT attribute jump table */
int (*lpfc_fdmi_port_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */
lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */
@@ -3433,20 +3177,20 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int cmdcode, uint32_t new_mask)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_dmabuf *rq, *rsp;
struct lpfc_sli_ct_request *CtReq;
- struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
uint32_t bit_pos;
- uint32_t size;
+ uint32_t size, addsz;
uint32_t rsp_size;
uint32_t mask;
struct lpfc_fdmi_reg_hba *rh;
struct lpfc_fdmi_port_entry *pe;
- struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
struct lpfc_fdmi_attr_block *ab = NULL;
- int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
- void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
+ int (*func)(struct lpfc_vport *vport, void *attrbuf);
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
if (!ndlp)
return 0;
@@ -3455,34 +3199,39 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
goto fdmi_cmd_exit;
- mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
- if (!mp->virt)
- goto fdmi_cmd_free_mp;
+ rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
+ if (!rq->virt)
+ goto fdmi_cmd_free_rq;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
- goto fdmi_cmd_free_mpvirt;
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ goto fdmi_cmd_free_rqvirt;
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
- if (!bmp->virt)
- goto fdmi_cmd_free_bmp;
+ rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
+ if (!rsp->virt)
+ goto fdmi_cmd_free_rsp;
- INIT_LIST_HEAD(&mp->list);
- INIT_LIST_HEAD(&bmp->list);
+ INIT_LIST_HEAD(&rq->list);
+ INIT_LIST_HEAD(&rsp->list);
+
+ /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
+ memset(rq->virt, 0, LPFC_BPL_SIZE);
+ rsp_size = LPFC_BPL_SIZE;
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0218 FDMI Request Data: x%x x%x x%x\n",
- vport->fc_flag, vport->port_state, cmdcode);
- CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+ cmdcode, new_mask, vport->fdmi_port_mask,
+ vport->fc_flag, vport->port_state);
+
+ CtReq = (struct lpfc_sli_ct_request *)rq->virt;
/* First populate the CT_IU preamble */
- memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
@@ -3490,17 +3239,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
- rsp_size = LPFC_BPL_SIZE;
+
size = 0;
/* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
- rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
/* HBA Identifier */
memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_fdmi_hba_ident);
if (cmdcode == SLI_MGMT_RHBA) {
/* Registered Port List */
@@ -3509,16 +3259,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&rh->rpl.pe.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof(struct lpfc_name) +
- FOURBYTES;
- } else {
- size = sizeof(struct lpfc_name);
+ size += sizeof(struct lpfc_fdmi_reg_port_list);
}
+
ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
ab->EntryCnt = 0;
- size += FOURBYTES;
+ size += FOURBYTES; /* add length of EntryCnt field */
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3529,11 +3276,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_hba_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)rh + size));
- ab->EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)rh + size));
+ if (addsz) {
+ ab->EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto hba_out;
}
@@ -3543,27 +3292,40 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hba_out:
ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_RPA:
- pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+ /* Store base ptr right after preamble */
+ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
+
if (cmdcode == SLI_MGMT_RPRT) {
- rh = (struct lpfc_fdmi_reg_hba *)pab;
+ rh = (struct lpfc_fdmi_reg_hba *)base;
/* HBA Identifier */
memcpy(&rh->hi.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
pab = (struct lpfc_fdmi_reg_portattr *)
- ((uint8_t *)pab + sizeof(struct lpfc_name));
+ ((uint8_t *)base + sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_name);
+ } else {
+ pab = base;
}
memcpy((uint8_t *)&pab->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
- size += sizeof(struct lpfc_name) + FOURBYTES;
pab->ab.EntryCnt = 0;
+ /* add length of name and EntryCnt field */
+ size += sizeof(struct lpfc_name) + FOURBYTES;
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3574,11 +3336,13 @@ hba_out:
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_port_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)pab + size));
- pab->ab.EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)base + size));
+ if (addsz) {
+ pab->ab.EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto port_out;
}
@@ -3587,10 +3351,7 @@ hba_out:
}
port_out:
pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
- /* Total size */
- if (cmdcode == SLI_MGMT_RPRT)
- size += sizeof(struct lpfc_name);
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_GHAT:
@@ -3599,7 +3360,7 @@ port_out:
fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3611,8 +3372,14 @@ port_out:
rsp_size = FC_MAX_NS_RSP;
fallthrough;
case SLI_MGMT_DPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_DPA:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3625,31 +3392,32 @@ port_out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0298 FDMI cmdcode x%x not supported\n",
cmdcode);
- goto fdmi_cmd_free_bmpvirt;
+ goto fdmi_cmd_free_rspvirt;
}
CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *)bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = size;
+ bde = (struct ulp_bde64_le *)rsp->virt;
+ bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
+ bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
+ bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
+ ULP_BDE64_TYPE_SHIFT);
+ bde->type_size |= cpu_to_le32(size);
/*
* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
return 0;
-fdmi_cmd_free_bmpvirt:
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-fdmi_cmd_free_bmp:
- kfree(bmp);
-fdmi_cmd_free_mpvirt:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-fdmi_cmd_free_mp:
- kfree(mp);
+fdmi_cmd_free_rspvirt:
+ lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
+fdmi_cmd_free_rsp:
+ kfree(rsp);
+fdmi_cmd_free_rqvirt:
+ lpfc_mbuf_free(phba, rq->virt, rq->phys);
+fdmi_cmd_free_rq:
+ kfree(rq);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3798,12 +3566,13 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_dmabuf *inp = cmdiocb->context1;
- struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *ctcmd = inp->virt;
struct lpfc_sli_ct_request *ctrsp = outp->virt;
u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
struct app_id_object *app;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
u32 cmd, hash, bucket;
struct lpfc_vmid *vmp, *cur;
u8 *data = outp->virt;
@@ -3813,9 +3582,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmd == SLI_CTAS_DALLAPP_ID)
lpfc_ct_free_iocb(phba, cmdiocb);
- if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) {
+ if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
if (cmd != SLI_CTAS_DALLAPP_ID)
- return;
+ goto free_res;
}
/* Check for a CT LS_RJT response */
if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@@ -3830,7 +3599,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
- return;
+ goto free_res;
}
}
@@ -3844,7 +3613,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
app->obj.entity_id_len);
if (app->obj.entity_id_len == 0 || app->port_id == 0)
- return;
+ goto free_res;
hash = lpfc_vmid_hash_fn(app->obj.entity_id,
app->obj.entity_id_len);
@@ -3891,6 +3660,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
"8857 Invalid command code\n");
}
+free_res:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 08b2e85dcd7d..f5252e45a48a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -5156,7 +5156,7 @@ error_out:
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
- uint16_t ext_cnt, ext_size;
+ uint16_t ext_cnt = 0, ext_size = 0;
len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
@@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
- break;
+ goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%03x: %08x %08x %08x %08x "
@@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
cnt += 32;
ptr += 8;
}
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Parameter Data\n");
+ ptr = (uint32_t *)&phba->cgn_p;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
out:
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
}
@@ -5520,7 +5531,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
if (!debug->buffer) {
kfree(debug);
goto out;
@@ -5541,55 +5552,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
struct lpfc_rx_monitor_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
char *buffer = debug->buffer;
- struct rxtable_entry *entry;
- int i, len = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
- /* Table is getting updated */
- msleep(20);
- head = atomic_read(&phba->rxtable_idx_head);
- }
- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
- if (!phba->rxtable || head == tail) {
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "Rxtable is empty\n");
- goto out;
- }
- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
- start = tail;
-
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI\t Total Data Cmd Total Data Cmpl "
- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
- "Info BWutil(ms)\n");
-get_table:
- for (i = start; i < last; i++) {
- entry = &phba->rxtable[i];
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld\t%12lld\t"
- "%8lldus\t%8lld\t%10lld "
- "%8d %2d %2d(%2d)\n",
- i, entry->max_bytes_per_interval,
- entry->total_bytes,
- entry->rcv_bytes,
- entry->avg_io_latency,
- entry->avg_io_size,
- entry->max_read_cnt,
- entry->io_cnt,
- entry->cmf_info,
- entry->timer_utilization,
- entry->timer_interval);
+ if (!phba->rx_monitor) {
+ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
+ "Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
+ MAX_DEBUGFS_RX_INFO_SIZE,
+ LPFC_MAX_RXMONITOR_ENTRY);
}
- if (head != last) {
- start = 0;
- last = head;
- goto get_table;
- }
-out:
- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
+ strlen(buffer));
}
static int
@@ -6259,9 +6233,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
- phba->slow_ring_trc = kmalloc(
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc),
+ phba->slow_ring_trc = kcalloc(
+ lpfc_debugfs_max_slow_ring_trc,
+ sizeof(struct lpfc_debugfs_trc),
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -6270,9 +6244,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
- memset(phba->slow_ring_trc, 0,
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc));
}
snprintf(name, sizeof(name), "nvmeio_trc");
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index a5bf71b34972..8d2e8d05bbc0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 37a4b79010bf..f82615d87c4b 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -149,7 +149,6 @@ struct lpfc_nodelist {
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
- struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
@@ -188,7 +187,6 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
-#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e83453bea2ae..863b2125fed6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -134,9 +134,9 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
- * @expectRsp: flag indicating whether response is expected.
- * @cmdSize: size of the ELS command.
- * @retry: number of retries to the command IOCB when it fails.
+ * @expect_rsp: flag indicating whether response is expected.
+ * @cmd_size: size of the ELS command.
+ * @retry: number of retries to the command when it fails.
* @ndlp: pointer to a node-list data structure.
* @did: destination identifier.
* @elscmd: the ELS command code.
@@ -152,7 +152,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* Buffer Descriptor Entries (BDEs), allocates buffers for both command
* payload and response payload (if expected). The reference count on the
* ndlp is incremented by 1 and the reference to the ndlp is put into
- * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * ndlp of the IOCB data structure for this IOCB to hold the ndlp
* reference for the command's callback function to access later.
*
* Return code
@@ -160,25 +160,23 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* NULL - when els iocb data structure allocation/preparation failed
**/
struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry,
- struct lpfc_nodelist *ndlp, uint32_t did,
- uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
+ u16 cmd_size, u8 retry,
+ struct lpfc_nodelist *ndlp, u32 did,
+ u32 elscmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
- struct ulp_bde64 *bpl;
- IOCB_t *icmd;
-
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
+ struct ulp_bde64_le *bpl;
+ u32 timeout = 0;
if (!lpfc_is_link_up(phba))
return NULL;
/* Allocate buffer for command iocb */
elsiocb = lpfc_sli_get_iocbq(phba);
-
- if (elsiocb == NULL)
+ if (!elsiocb)
return NULL;
/*
@@ -186,35 +184,33 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
+ (phba->hba_flag & HBA_FIP_SUPPORT) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
}
else
- elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
-
- icmd = &elsiocb->iocb;
+ elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
if (!pcmd || !pcmd->virt)
@@ -223,19 +219,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pcmd->list);
/* Allocate buffer for response payload */
- if (expectRsp) {
- prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (expect_rsp) {
+ prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
if (!prsp || !prsp->virt)
goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
- } else
+ } else {
prsp = NULL;
+ }
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
@@ -244,90 +241,61 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pbuflist->list);
- if (expectRsp) {
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-
- icmd->un.elsreq64.remoteID = did; /* DID */
- icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
- if (elscmd == ELS_CMD_FLOGI)
- icmd->ulpTimeout = FF_DEF_RATOV * 2;
- else if (elscmd == ELS_CMD_LOGO)
- icmd->ulpTimeout = phba->fc_ratov;
- else
- icmd->ulpTimeout = phba->fc_ratov * 2;
- } else {
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
- icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
- }
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
-
- /*
- * If we have NPIV enabled, we want to send ELS traffic by VPI.
- * For SLI4, since the driver controls VPIs we also want to include
- * all ELS pt2pt protocol traffic as well.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- ((phba->sli_rev == LPFC_SLI_REV4) &&
- (vport->fc_flag & FC_PT2PT))) {
-
- if (expectRsp) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
-
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ if (expect_rsp) {
+ switch (elscmd) {
+ case ELS_CMD_FLOGI:
+ timeout = FF_DEF_RATOV * 2;
+ break;
+ case ELS_CMD_LOGO:
+ timeout = phba->fc_ratov;
+ break;
+ default:
+ timeout = phba->fc_ratov * 2;
}
- icmd->ulpCt_h = 0;
- /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
- if (elscmd == ELS_CMD_ECHO)
- icmd->ulpCt_l = 0; /* context = invalid RPI */
- else
- icmd->ulpCt_l = 1; /* context = VPI */
+ /* Fill SGE for the num bde count */
+ elsiocb->num_bdes = 2;
}
- bpl = (struct ulp_bde64 *) pbuflist->virt;
- bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
- bpl->tus.f.bdeSize = cmdSize;
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bmp = pcmd;
+ else
+ bmp = pbuflist;
+
+ lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
+ elscmd, timeout, expect_rsp);
+
+ bpl = (struct ulp_bde64_le *)pbuflist->virt;
+ bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
+ bpl->type_size = cpu_to_le32(cmd_size);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
- if (expectRsp) {
+ if (expect_rsp) {
bpl++;
- bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
- bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
+ bpl->type_size = cpu_to_le32(FCELSSIZE);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
}
- elsiocb->context2 = pcmd;
- elsiocb->context3 = pbuflist;
+ elsiocb->cmd_dmabuf = pcmd;
+ elsiocb->bpl_dmabuf = pbuflist;
elsiocb->retry = retry;
elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
- if (prsp) {
+ if (prsp)
list_add(&prsp->list, &pcmd->list);
- }
- if (expectRsp) {
+ if (expect_rsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
+ "rpi x%x fc_flag:x%x\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag, ndlp->nlp_flag, vport);
+ vport->fc_flag);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -335,13 +303,14 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
"NPORT x%x I/O tag: x%x, size: x%x "
"port_state x%x rpi x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
- cmdSize, vport->port_state,
+ cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
}
+
return elsiocb;
els_iocb_free_pbuf_exit:
- if (expectRsp)
+ if (expect_rsp)
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
@@ -376,7 +345,6 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
int rc;
@@ -426,7 +394,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp) {
err = 6;
- goto fail_no_ndlp;
+ goto fail_free_mbox;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -442,13 +410,8 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
-fail_no_ndlp:
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -496,45 +459,37 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
- rc = -ENOMEM;
- goto fail;
- }
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
+ rc = lpfc_mbox_rsrc_prep(phba, mboxq);
+ if (rc) {
rc = -ENOMEM;
- goto fail;
+ goto fail_mbox;
}
+ dmabuf = mboxq->ctx_buf;
memcpy(dmabuf->virt, &phba->fc_fabparam,
sizeof(struct serv_parm));
}
vport->port_state = LPFC_FABRIC_CFG_LINK;
- if (dmabuf)
+ if (dmabuf) {
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
- else
+ /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
+ mboxq->ctx_buf = dmabuf;
+ } else {
lpfc_reg_vfi(mboxq, vport, 0);
+ }
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
- mboxq->ctx_buf = dmabuf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
- goto fail;
+ goto fail_mbox;
}
return 0;
+fail_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
fail:
- if (mboxq)
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- if (dmabuf->virt)
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
-
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0289 Issue Register VFI failed: Err %d\n", rc);
@@ -650,7 +605,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @sp: pointer to service parameter data structure.
- * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ * @ulp_word4: command response value
*
* This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
* function to handle the completion of a Fabric Login (FLOGI) into a fabric
@@ -667,7 +622,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
**/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+ struct serv_parm *sp, uint32_t ulp_word4)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
@@ -692,7 +647,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
}
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
@@ -903,10 +858,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- /* Decrement ndlp reference count indicating that ndlp can be
- * safely released when other references to it are done.
+ /* If not registered with a transport, decrement ndlp reference
+ * count indicating that ndlp can be safely released when other
+ * references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
@@ -943,11 +900,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto fail;
}
} else {
- /* This side will wait for the PLOGI, decrement ndlp reference
- * count indicating that ndlp can be released when other
- * references to it are done.
+ /* This side will wait for the PLOGI. If not registered with
+ * a transport, decrement node reference count indicating that
+ * ndlp can be released when other references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
@@ -987,28 +945,40 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint16_t fcf_index;
int rc;
+ u32 ulp_status, ulp_word4, tmo;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
* trigger the release of the node
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ lpfc_nlp_put(ndlp);
goto out;
}
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FLOGI cmpl: status:x%x/x%x state:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->port_state);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/*
* In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
@@ -1019,8 +989,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto stop_rr_fcf_flogi;
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
- (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ (ulp_status == IOSTAT_LOCAL_REJECT) &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
@@ -1031,8 +1001,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ ulp_status, ulp_word4, tmo);
lpfc_sli4_set_fcf_flogi_fail(phba,
phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
@@ -1043,15 +1012,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
":x%x Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
@@ -1060,19 +1028,25 @@ stop_rr_fcf_flogi:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout,
- kref_read(&ndlp->kref));
+ ulp_status, ulp_word4, cmdiocb->sli4_xritag,
+ tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE))) {
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0100 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
+ }
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -1098,7 +1072,7 @@ stop_rr_fcf_flogi:
}
/* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
@@ -1122,16 +1096,17 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0101 FLOGI completes successfully, I/O tag:x%x, "
+ "0101 FLOGI completes successfully, I/O tag:x%x "
"xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
- irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag,
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
if (sp->cmn.priority_tagging)
- vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
+ vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+ LPFC_VMID_TYPE_PRIO);
if (vport->port_state == LPFC_FLOGI) {
/*
@@ -1139,7 +1114,8 @@ stop_rr_fcf_flogi:
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
+ ulp_word4);
else if (!(phba->hba_flag & HBA_FCOE_MODE))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
@@ -1206,16 +1182,16 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!lpfc_error_lost_link(irsp)) {
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ (((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_ABORTED) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
@@ -1239,22 +1215,24 @@ static void
lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
uint32_t *pcmd;
uint32_t cmd;
+ u32 ulp_status, ulp_word4;
- pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
cmd = *pcmd;
- irsp = &rspiocb->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"6445 ELS completes after LINK_DOWN: "
" Status %x/%x cmd x%x flg x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
- cmdiocb->iocb_flag);
+ ulp_status, ulp_word4, cmd,
+ cmdiocb->cmd_flag);
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
lpfc_els_free_iocb(phba, cmdiocb);
@@ -1274,7 +1252,7 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* out FLOGI ELS command with one outstanding fabric IOCB at a time.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FLOGI ELS command.
*
* Return code
@@ -1287,10 +1265,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
- IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
+ IOCB_t *icmd = NULL;
struct lpfc_iocbq *elsiocb;
struct lpfc_iocbq defer_flogi_acc;
- uint8_t *pcmd;
+ u8 *pcmd, ct;
uint16_t cmdsize;
uint32_t tmo, did;
int rc;
@@ -1302,8 +1281,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
+ wqe = &elsiocb->wqe;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
icmd = &elsiocb->iocb;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
@@ -1336,12 +1316,15 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
- elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
- elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
/* FLOGI needs to be 3 for WQE FCFI */
+ ct = SLI4_CT_FCFI;
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+
/* Set the fcfi to the fcfi we registered with */
- elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->fcf.fcfi);
}
+
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
@@ -1354,13 +1337,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
- } else
+ } else {
sp->cmn.request_multiple_Nport = 0;
- }
+ }
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
}
tmo = phba->fc_ratov;
@@ -1369,14 +1353,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FLOGI: opt:x%x",
phba->sli3_options, 0, 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -1390,16 +1374,34 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
+ /* lookup ndlp for received FLOGI */
+ ndlp = lpfc_findnode_did(vport, 0);
+ if (!ndlp)
+ return 0;
+
did = vport->fc_myDID;
vport->fc_myDID = Fabric_DID;
memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
- defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
- defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_ctxt_tag,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_rx_id);
+ bf_set(wqe_rcvoxid,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_ox_id);
+ } else {
+ icmd = &defer_flogi_acc.iocb;
+ icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->unsli3.rcvsli3.ox_id =
+ phba->defer_flogi_acc_ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
@@ -1412,8 +1414,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, NULL);
phba->defer_flogi_acc_flag = false;
-
vport->fc_myDID = did;
+
+ /* Decrement ndlp reference count to indicate the node can be
+ * released when other references are removed.
+ */
+ lpfc_nlp_put(ndlp);
}
return 0;
@@ -1439,7 +1445,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_nodelist *ndlp;
- IOCB_t *icmd;
+ u32 ulp_command;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
@@ -1456,13 +1462,13 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
- ndlp = (struct lpfc_nodelist *)(iocb->context1);
+ ulp_command = get_job_cmnd(phba, iocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
+ ndlp = iocb->ndlp;
if (ndlp && ndlp->nlp_DID == Fabric_DID) {
if ((phba->pport->fc_flag & FC_PT2PT) &&
!(phba->pport->fc_flag & FC_PT2PT_PLOGI))
- iocb->fabric_iocb_cmpl =
+ iocb->fabric_cmd_cmpl =
lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
NULL);
@@ -1515,11 +1521,16 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
lpfc_enqueue_node(vport, ndlp);
}
+ /* Reset the Fabric flag, topology change may have happened */
+ vport->fc_flag &= ~FC_FABRIC;
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
- /* This decrement of reference count to node shall kick off
- * the release of the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1562,10 +1573,13 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
- /* decrement node reference count to trigger the release of
- * the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1776,18 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1802,9 +1818,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1891,43 +1906,43 @@ lpfc_end_rscn(struct lpfc_vport *vport)
static void
lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_node_rrq *rrq;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* we pass cmdiocb to state machine which needs rspiocb as well */
rrq = cmdiocb->context_un.rrq;
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"RRQ cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4,
+ get_job_els_rsp64_did(phba, cmdiocb));
+
/* rrq completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2880 RRQ completes to DID x%x "
"Data: x%x x%x x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
/* RRQ failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2881 RRQ failure DID:%06X Status:"
"x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
}
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
@@ -1966,24 +1981,33 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *prsp;
int disc;
struct serv_parm *sp = NULL;
+ u32 ulp_status, ulp_word4, did, iotag;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ }
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PLOGI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
+ did, ulp_status, ulp_word4, iotag);
goto out_freeiocb;
}
@@ -2000,7 +2024,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_fc4_type,
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
@@ -2011,7 +2035,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2023,17 +2047,18 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* PLOGI failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "2753 PLOGI failure DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
@@ -2046,23 +2071,24 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
goto out;
}
- spin_unlock_irq(&ndlp->lock);
/* No PLOGI collision and the node is not registered with the
* scsi or nvme transport. It is no longer an active node. Just
* start the device remove process.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else {
/* Good status, call state machine */
- prsp = list_entry(((struct lpfc_dmabuf *)
- cmdiocb->context2)->list.next,
+ prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
@@ -2107,7 +2133,7 @@ out:
out_freeiocb:
/* Release the reference on the original I/O request. */
- free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ free_ndlp = cmdiocb->ndlp;
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -2127,7 +2153,7 @@ out_freeiocb:
* the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding
- * the ndlp and the reference to ndlp will be stored into the context1 field
+ * the ndlp and the reference to ndlp will be stored into the ndlp field
* of the IOCB for the completion callback function to the PLOGI ELS command.
*
* Return code
@@ -2174,11 +2200,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
@@ -2225,13 +2247,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
}
phba->fc_stat.elsXmitPLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x refcnt %d",
did, kref_read(&ndlp->kref), 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -2264,16 +2286,21 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
u32 loglevel;
+ u32 ulp_status;
+ u32 ulp_word4;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
@@ -2284,21 +2311,21 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID, ulp_status, ulp_word4,
vport->num_disc_nodes, ndlp->fc4_prli_sent);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2321,11 +2348,11 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
@@ -2341,14 +2368,18 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* it is no longer an active node. Otherwise devloss
* handles the final cleanup.
*/
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
!ndlp->fc4_prli_sent) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
@@ -2378,7 +2409,7 @@ out:
* routine lpfc_sli_issue_iocb() to send out PRLI command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the PRLI ELS command.
*
* Return code
@@ -2452,7 +2483,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PRLI request, remainder of payload is service parameters */
memset(pcmd, 0, cmdsize);
@@ -2484,7 +2515,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
/* Remove FCP type - processed. */
local_nlp_type &= ~NLP_FC4_FCP;
@@ -2518,41 +2549,40 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
- elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
/* Remove NVME type - processed. */
local_nlp_type &= ~NLP_FC4_NVME;
}
phba->fc_stat.elsXmitPRLI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_PRLI_SND;
-
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
- spin_unlock_irq(&ndlp->lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PRLI: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
- goto err;
+ return 1;
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
- goto err;
+ return 1;
}
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_PRLI_SND;
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
+ spin_unlock_irq(&ndlp->lock);
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2562,12 +2592,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto send_next_prli;
else
return 0;
-
-err:
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(&ndlp->lock);
- return 1;
}
/**
@@ -2719,16 +2743,27 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
+ u32 ulp_status, ulp_word4, tmo;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ADISC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* Since ndlp can be freed in the disc state machine, note if this node
@@ -2742,8 +2777,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(&ndlp->lock);
@@ -2752,7 +2787,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2767,23 +2802,26 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ADISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
-
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ NLP_EVT_CMPL_ADISC);
/* As long as this node is not registered with the SCSI or NVMe
* transport, it is no longer an active node. Otherwise
* devloss handles the final cleanup.
*/
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2810,7 +2848,7 @@ out:
* to issue the ADISC ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the ADISC ELS command.
*
* Return code
@@ -2834,7 +2872,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
@@ -2848,12 +2886,12 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ap->DID = be32_to_cpu(vport->fc_myDID);
phba->fc_stat.elsXmitADISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -2861,6 +2899,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue ADISC: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -2892,17 +2931,29 @@ static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp->vport;
IOCB_t *irsp;
unsigned long flags;
uint32_t skip_recovery = 0;
int wake_up_waiter = 0;
+ u32 ulp_status;
+ u32 ulp_word4;
+ u32 tmo;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
- irsp = &(rspiocb->iocb);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
@@ -2913,7 +2964,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
@@ -2921,8 +2972,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0105 LOGO completes to NPort x%x "
"refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
skip_recovery = 1;
@@ -2934,14 +2985,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* all acceptable. Note the failure and move forward with
* discovery. The PLOGI will retry.
*/
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ "2756 LOGO failure, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
skip_recovery = 1;
goto out;
}
@@ -2961,18 +3013,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
- /* Presume the node was released. */
- return;
+ goto out_rsrc_free;
}
out:
- /* Driver is done with the IO. */
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
* on link up processing. For a LOGO / PLOGI initiated by the
@@ -2996,9 +3040,13 @@ out:
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
"Recovery Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, tmo,
vport->num_disc_nodes);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
lpfc_disc_start(vport);
return;
}
@@ -3015,6 +3063,10 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
+out_rsrc_free:
+ /* Driver is done with the I/O. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -3029,7 +3081,7 @@ out:
* lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the LOGO ELS command.
*
* Callers of this routine are expected to unregister the RPI first
@@ -3061,7 +3113,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -3071,13 +3123,13 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
phba->fc_stat.elsXmitLOGO++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -3085,6 +3137,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue LOGO: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -3127,24 +3180,34 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
- irsp = &rspiocb->iocb;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
- free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ free_ndlp = cmdiocb->ndlp;
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -3171,7 +3234,6 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ns_ndlp;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
return rc;
@@ -3208,7 +3270,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
if (!mbox->ctx_ndlp) {
rc = -ENOMEM;
- goto out_mem;
+ goto out;
}
mbox->vport = vport;
@@ -3216,21 +3278,15 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
if (rc == MBX_NOT_FINISHED) {
rc = -ENODEV;
lpfc_nlp_put(fc_ndlp);
- goto out_mem;
+ goto out;
}
/* Success path. Exit. */
lpfc_nlp_set_state(vport, fc_ndlp,
NLP_STE_REG_LOGIN_ISSUE);
return 0;
- out_mem:
- fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-
out:
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0938 %s: failed to format reg_login "
"Data: x%x x%x x%x x%x\n", __func__,
@@ -3260,23 +3316,32 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
u32 *pdata;
u32 cmd;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
- irsp = &rspiocb->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
+
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
- "x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- cmdiocb->retry);
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
+ iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
if (!pcmd)
goto out;
@@ -3286,8 +3351,8 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *pdata;
/* Only 1 retry for ELS Timeout only */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SEQUENCE_TIMEOUT)) {
cmdiocb->retry++;
if (cmdiocb->retry <= 1) {
@@ -3299,7 +3364,6 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_issue_els_edc(vport, cmdiocb->retry);
break;
case ELS_CMD_RDF:
- cmdiocb->context1 = NULL; /* save ndlp refcnt */
lpfc_issue_els_rdf(vport, cmdiocb->retry);
break;
}
@@ -3312,11 +3376,11 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* ELS discovery cmd completes with error */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
@@ -3341,7 +3405,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"4677 Fabric RDF Notification Grant "
"Data: 0x%08x Reg: %x %x\n",
be32_to_cpu(
- prdf->reg_d1.desc_tags[i]),
+ prdf->reg_d1.desc_tags[i]),
phba->cgn_reg_signal,
phba->cgn_reg_fpin);
}
@@ -3367,7 +3431,7 @@ out:
* routine is invoked to send the SCR IOCB.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the SCR ELS command.
*
* Return code
@@ -3409,7 +3473,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
}
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
pcmd += sizeof(uint32_t);
@@ -3423,9 +3487,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3456,7 +3520,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
* replay the RSCN to registered recipients.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RSCN ELS command.
*
* Return code
@@ -3506,7 +3570,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!elsiocb)
return 1;
- event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ event = elsiocb->cmd_dmabuf->virt;
event->rscn.rscn_cmd = ELS_RSCN;
event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
@@ -3520,9 +3584,9 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
event->portid.rscn_fid[2] = nportid & 0x000000FF;
phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3538,11 +3602,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
return 0;
}
@@ -3560,7 +3619,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
* lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FARPR ELS command.
*
* Return code
@@ -3591,11 +3650,11 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
+ ndlp->nlp_DID, ELS_CMD_FARPR);
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
pcmd += sizeof(uint32_t);
@@ -3624,9 +3683,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitFARPR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3657,7 +3716,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* for diagnostic functions.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RDF ELS command.
*
* Return code
@@ -3694,8 +3753,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
return -ENOMEM;
/* Configure the payload for the supported FPIN events. */
- prdf = (struct lpfc_els_rdf_req *)
- (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
memset(prdf, 0, cmdsize);
prdf->rdf.fpin_cmd = ELS_RDF;
prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
@@ -3715,9 +3773,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
phba->cgn_reg_fpin);
phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return -EIO;
}
@@ -3788,9 +3846,6 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
{
u32 rsp_sig_cap = 0, drv_sig_cap = 0;
u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
- struct lpfc_cgn_info *cp;
- u32 crc;
- u16 sig_freq;
/* Get rsp signal and frequency capabilities. */
rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
@@ -3846,25 +3901,7 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
}
}
- if (!phba->cgn_i)
- return;
-
- /* Update signal frequency in congestion info buffer */
- cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- sig_freq = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- }
- crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
- cp->cgn_info_crc = cpu_to_le32(crc);
+ /* We are NOT recording signal frequency in congestion info buffer */
return;
out_no_support:
@@ -3893,7 +3930,7 @@ static void
lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
+ IOCB_t *irsp_iocb;
struct fc_els_edc_resp *edc_rsp;
struct fc_tlv_desc *tlv;
struct fc_diag_cg_sig_desc *pcgd;
@@ -3904,22 +3941,33 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int desc_cnt = 0, bytes_remain;
bool rcv_cap_desc = false;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ndlp = cmdiocb->ndlp;
- irsp = &rspiocb->iocb;
- ndlp = cmdiocb->context1;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(rspiocb);
+ iotag = get_wqe_reqtag(rspiocb);
+ } else {
+ irsp_iocb = &rspiocb->iocb;
+ tmo = irsp_iocb->ulpTimeout;
+ iotag = irsp_iocb->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
"EDC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
if (!pcmd)
goto out;
@@ -3928,7 +3976,7 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* Need to clear signal values, send features MB and RDF with FPIN. */
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
@@ -3940,7 +3988,8 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"4676 Fabric EDC Rsp: "
"0x%02x, 0x%08x\n",
edc_rsp->acc_hdr.la_cmd,
@@ -3977,18 +4026,18 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6462 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
- lpfc_printf_log(
- phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
"4617 Link Fault Desc Data: 0x%08x 0x%08x "
"0x%08x 0x%08x 0x%08x\n",
be32_to_cpu(plnkflt->desc_tag),
@@ -4068,8 +4117,26 @@ out:
}
static void
-lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
+ struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
+
+ lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
+ lft->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
+
+ lft->degrade_activate_threshold =
+ cpu_to_be32(phba->degrade_activate_threshold);
+ lft->degrade_deactivate_threshold =
+ cpu_to_be32(phba->degrade_deactivate_threshold);
+ lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
+
/* We are assuming cgd was zero'ed before calling this routine */
/* Configure the congestion detection capability */
@@ -4113,6 +4180,23 @@ lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
}
+static bool
+lpfc_link_is_lds_capable(struct lpfc_hba *phba)
+{
+ if (!(phba->lmt & LMT_64Gb))
+ return false;
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return false;
+
+ if (phba->sli4_hba.conf_trunk) {
+ if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
+ return true;
+ } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
+ return true;
+ }
+ return false;
+}
+
/**
* lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
* @vport: pointer to a host virtual N_Port data structure.
@@ -4140,12 +4224,12 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_els_edc_req *edc_req;
- struct fc_diag_cg_sig_desc *cgn_desc;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
u16 cmdsize;
struct lpfc_nodelist *ndlp;
u8 *pcmd = NULL;
- u32 edc_req_size, cgn_desc_size;
+ u32 cgn_desc_size, lft_desc_size;
int rc;
if (vport->port_type == LPFC_NPIV_PORT)
@@ -4155,40 +4239,48 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
return -ENODEV;
- /* If HBA doesn't support signals, drop into RDF */
- if (!phba->cgn_init_reg_signal)
+ cgn_desc_size = (phba->cgn_init_reg_signal) ?
+ sizeof(struct fc_diag_cg_sig_desc) : 0;
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize = cgn_desc_size + lft_desc_size;
+
+ /* Skip EDC if no applicable descriptors */
+ if (!cmdsize)
goto try_rdf;
- edc_req_size = sizeof(struct fc_els_edc);
- cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
- cmdsize = edc_req_size + cgn_desc_size;
+ cmdsize += sizeof(struct fc_els_edc);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_EDC);
if (!elsiocb)
goto try_rdf;
/* Configure the payload for the supported Diagnostics capabilities. */
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_req = (struct lpfc_els_edc_req *)pcmd;
- edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
- edc_req->edc.edc_cmd = ELS_EDC;
-
- cgn_desc = &edc_req->cgn_desc;
+ edc_req = (struct fc_els_edc *)pcmd;
+ edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
+ edc_req->edc_cmd = ELS_EDC;
+ tlv = edc_req->desc;
- lpfc_format_edc_cgn_desc(phba, cgn_desc);
+ if (cgn_desc_size) {
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ tlv = fc_tlv_next_desc(tlv);
+ }
- phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4623 Xmit EDC to remote "
"NPORT x%x reg_sig x%x reg_fpin:x%x\n",
ndlp->nlp_DID, phba->cgn_reg_signal,
phba->cgn_reg_fpin);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return -EIO;
}
@@ -4465,9 +4557,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ union lpfc_wqe128 *irsp = &rspiocb->wqe;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *elscmd;
struct ls_rjt stat;
int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
@@ -4475,9 +4567,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t cmd = 0;
uint32_t did;
int link_reset = 0, rc;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- /* Note: context2 may be 0 for internal driver abort
+ /* Note: cmd_dmabuf may be 0 for internal driver abort
* of delays ELS command.
*/
@@ -4490,7 +4584,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
- did = irsp->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(phba, rspiocb);
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 0;
@@ -4498,9 +4592,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Retry ELS: wd7:x%x wd4:x%x did:x%x",
- *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
+ *(((uint32_t *)irsp) + 7), ulp_word4, did);
- switch (irsp->ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_FCP_RSP_ERROR:
break;
case IOSTAT_REMOTE_STOP:
@@ -4514,17 +4608,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
break;
case IOSTAT_LOCAL_REJECT:
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -4595,7 +4680,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP) {
retry = 1;
break;
}
@@ -4608,53 +4693,75 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
- /* The driver has a VALID PLOGI but the rport has
- * rejected the PRLI - can't do it now. Delay
- * for 1 second and try again.
- *
- * However, if explanation is REQ_UNSUPPORTED there's
- * no point to retry PRLI.
+ /* Special case for PRLI LS_RJTs. Recall that lpfc
+ * uses a single routine to issue both PRLI FC4 types.
+ * If the PRLI is rejected because that FC4 type
+ * isn't really supported, don't retry and cause
+ * multiple transport registrations. Otherwise, parse
+ * the reason code/reason code explanation and take the
+ * appropriate action.
*/
- if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
- stat.un.b.lsRjtRsnCodeExp !=
- LSEXP_REQ_UNSUPPORTED) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
- break;
- }
-
- /* Legacy bug fix code for targets with PLOGI delays. */
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CMD_IN_PROGRESS) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS | LOG_NODE,
+ "0153 ELS cmd x%x LS_RJT by x%x. "
+ "RsnCode x%x RsnCodeExp x%x\n",
+ cmd, did, stat.un.b.lsRjtRsnCode,
+ stat.un.b.lsRjtRsnCodeExp);
+
+ switch (stat.un.b.lsRjtRsnCodeExp) {
+ case LSEXP_CANT_GIVE_DATA:
+ case LSEXP_CMD_IN_PROGRESS:
if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
- }
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CANT_GIVE_DATA) {
- if (cmd == ELS_CMD_PLOGI) {
+ case LSEXP_REQ_UNSUPPORTED:
+ case LSEXP_NO_RSRC_ASSIGN:
+ /* These explanation codes get no retry. */
+ if (cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI)
+ break;
+ fallthrough;
+ default:
+ /* Limit the delay and retry action to a limited
+ * cmd set. There are other ELS commands where
+ * a retry is not expected.
+ */
+ if (cmd == ELS_CMD_PLOGI ||
+ cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI) {
delay = 1000;
- maxretry = 48;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
}
- retry = 1;
- break;
- }
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
break;
}
+
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
@@ -4734,12 +4841,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* on this rport.
*/
if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
- retry = 0;
- goto out_retry;
+ LSEXP_REQ_UNSUPPORTED) {
+ if (cmd == ELS_CMD_PRLI)
+ goto out_retry;
}
break;
}
@@ -4771,7 +4875,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
- !lpfc_error_lost_link(irsp)) {
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
@@ -4784,7 +4888,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
- } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ } else if ((cmd == ELS_CMD_FDISC) &&
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* retry FDISCs every second up to devloss */
retry = 1;
maxretry = vport->cfg_devloss_tmo;
@@ -4821,8 +4926,8 @@ out_retry:
cmd, did, cmdiocb->retry, delay);
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
- ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
@@ -4894,15 +4999,15 @@ out_retry:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
"NPORT x%x: Out of Resources: Error:x%x/%x\n",
- cmd, did, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, ulp_status,
+ ulp_word4);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0108 No retry ELS command x%x to remote "
"NPORT x%x Retried:%d Error:x%x/%x\n",
- cmd, did, cmdiocb->retry, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, cmdiocb->retry, ulp_status,
+ ulp_word4);
}
return 0;
}
@@ -4968,10 +5073,10 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
* command IOCB data structure contains the reference to various associated
* resources, these fields must be set to NULL if the associated reference
* not present:
- * context1 - reference to ndlp
- * context2 - reference to cmd
- * context2->next - reference to rsp
- * context3 - reference to bpl
+ * cmd_dmabuf - reference to cmd.
+ * cmd_dmabuf->next - reference to rsp
+ * rsp_dmabuf - unused
+ * bpl_dmabuf - reference to bpl
*
* It first properly decrements the reference count held on ndlp for the
* IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
@@ -4991,19 +5096,19 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O iocb is complete. Clear the context1 data. */
- elsiocb->context1 = NULL;
+ /* The I/O iocb is complete. Clear the node and first dmbuf */
+ elsiocb->ndlp = NULL;
- /* context2 = cmd, context2->next = rsp, context3 = bpl */
- if (elsiocb->context2) {
- if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
+ if (elsiocb->cmd_dmabuf) {
+ if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
- buf_ptr = elsiocb->context2;
- elsiocb->context2 = NULL;
+ elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
+ buf_ptr = elsiocb->cmd_dmabuf;
+ elsiocb->cmd_dmabuf = NULL;
if (buf_ptr) {
buf_ptr1 = NULL;
spin_lock_irq(&phba->hbalock);
@@ -5022,16 +5127,16 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
spin_unlock_irq(&phba->hbalock);
}
} else {
- buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->context2 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
}
}
- if (elsiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ if (elsiocb->bpl_dmabuf) {
+ buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->context3 = NULL;
+ elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -5047,7 +5152,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
* the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
* release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
* field to NULL to inform the following lpfc_els_free_iocb() routine no
* ndlp reference count needs to be decremented. Otherwise, the ndlp
* reference use-count shall be decremented by the lpfc_els_free_iocb()
@@ -5058,14 +5163,16 @@ static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ACC LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+ ulp_status, ulp_word4, ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
@@ -5083,7 +5190,6 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-
/* If PLOGI is being retried, PLOGI completion will cleanup the
* node. The NLP_NPR_2B_DISC flag needs to be retained to make
* progress on nodes discovered from last RSCN.
@@ -5103,7 +5209,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Indicate the node has already released, should
* not reference to it from within lpfc_els_free_iocb.
*/
- cmdiocb->context1 = NULL;
+ cmdiocb->ndlp = NULL;
}
}
out:
@@ -5131,14 +5237,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
u32 mbx_flag = pmb->mbox_flag;
u32 mbx_cmd = pmb->u.mb.mbxCommand;
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
-
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0006 rpi x%x DID:%x flg:%x %d x%px "
@@ -5161,10 +5263,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_drop_node(ndlp->vport, ndlp);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -5184,14 +5283,12 @@ static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
- struct lpfc_dmabuf *mp = NULL;
-
- irsp = &rspiocb->iocb;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -5201,37 +5298,44 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
+
/* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) {
- if (mbox) {
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
- }
+ if (mbox)
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
goto out;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ELS rsp cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->iocb.un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
- cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
+ iotag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0) &&
- (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ if (ulp_status == 0
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
(!(vport->fc_flag & FC_PT2PT))) {
- if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state ==
+ NLP_STE_REG_LOGIN_ISSUE) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0314 PLOGI recov "
@@ -5241,14 +5345,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_state,
ndlp->nlp_rpi,
ndlp->nlp_flag);
- mp = mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt,
- mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+ goto out_free_mbox;
}
}
@@ -5257,7 +5354,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
- goto out;
+ goto out_free_mbox;
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
@@ -5289,12 +5386,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
}
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
+out_free_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
}
out:
if (ndlp && shost) {
@@ -5312,12 +5405,15 @@ out:
(vport && vport->port_type == LPFC_NPIV_PORT) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
- lpfc_drop_node(vport, ndlp);
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
}
/* Release the originating I/O reference. */
@@ -5343,7 +5439,7 @@ out:
* mailbox command to the HBA later when callback is invoked.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the corresponding
* response ELS IOCB command.
*
@@ -5359,6 +5455,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
+ union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
struct serv_parm *sp;
@@ -5367,8 +5465,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ELS_PKT *els_pkt_ptr;
struct fc_els_rdf_resp *rdf_resp;
- oldcmd = &oldiocb->iocb;
-
switch (flag) {
case ELS_CMD_ACC:
cmdsize = sizeof(uint32_t);
@@ -5381,10 +5477,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -5400,10 +5512,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
if (mbox)
elsiocb->context_un.mbox = mbox;
@@ -5462,12 +5590,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
- memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+ memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
@@ -5484,10 +5628,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
rdf_resp = (struct fc_els_rdf_resp *)pcmd;
memset(rdf_resp, 0, sizeof(*rdf_resp));
rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
@@ -5509,14 +5669,14 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
spin_unlock_irq(&ndlp->lock);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5553,7 +5713,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
* to issue to the HBA later.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the reject response
* ELS IOCB command.
*
@@ -5570,6 +5730,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -5580,11 +5741,20 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
pcmd += sizeof(uint32_t);
@@ -5599,16 +5769,16 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
rejectError, elsiocb->iotag,
- elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue LS_RJT: did:x%x flg:x%x err:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5654,44 +5824,65 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_els_edc_rsp *edc_rsp;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
struct lpfc_iocbq *elsiocb;
IOCB_t *icmd, *cmd;
+ union lpfc_wqe128 *wqe;
+ u32 cgn_desc_size, lft_desc_size;
+ u16 cmdsize;
uint8_t *pcmd;
- int cmdsize, rc;
+ int rc;
- cmdsize = sizeof(struct lpfc_els_edc_rsp);
+ cmdsize = sizeof(struct fc_els_edc_resp);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize += cgn_desc_size + lft_desc_size;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- cmd = &cmdiocb->iocb;
- icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ cmd = &cmdiocb->iocb;
+ icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
- edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
- edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
- FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
- edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
- edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+ edc_rsp = (struct fc_els_edc_resp *)pcmd;
+ edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
+ cgn_desc_size + lft_desc_size);
+ edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->lsri.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
- edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
- lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+ edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
+ tlv = edc_rsp->desc;
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue EDC ACC: did:x%x flg:x%x refcnt %d",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5726,7 +5917,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the ADISC Accept response
* ELS IOCB command.
*
@@ -5741,10 +5932,12 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(ADISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -5752,19 +5945,32 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -5780,9 +5986,9 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5794,14 +6000,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 1;
}
- /* Xmit ELS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
- "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
- rc, elsiocb->iotag, elsiocb->sli4_xritag,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
return 0;
}
@@ -5816,7 +6014,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the PRLI Accept response
* ELS IOCB command.
*
@@ -5834,18 +6032,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t prli_fc4_req, *req_payload;
struct lpfc_dmabuf *req_buf;
int rc;
- u32 elsrspcmd;
+ u32 elsrspcmd, ulp_context;
/* Need the incoming PRLI payload to determine if the ACC is for an
* FC4 or NVME PRLI type. The PRLI type is at word 1.
*/
- req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+ req_buf = oldiocb->cmd_dmabuf;
req_payload = (((uint32_t *)req_buf->virt) + 1);
/* PRLI type payload is at byte 3 for FCP or NVME. */
@@ -5858,7 +6057,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
@@ -5866,23 +6065,34 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
}
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
- ndlp->nlp_DID, elsrspcmd);
+ ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
*((uint32_t *)(pcmd)) = elsrspcmd;
@@ -5910,7 +6120,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
@@ -5954,9 +6164,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5984,7 +6194,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* issue the response.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function.
*
* Return code
@@ -5998,10 +6208,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_hba *phba = vport->phba;
RNID *rn;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
@@ -6013,16 +6225,27 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -6055,9 +6278,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -6092,7 +6315,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
struct lpfc_node_rrq *prrq;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+ pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
@@ -6104,7 +6327,8 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
be32_to_cpu(bf_get(rrq_did, rrq)),
bf_get(rrq_oxid, rrq),
rxid,
- iocb->iotag, iocb->iocb.ulpContext);
+ get_wqe_reqtag(iocb),
+ get_job_ulpcontext(phba, iocb));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
@@ -6135,12 +6359,18 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
- cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
+ else
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
/* The accumulated length can exceed the BPL_SIZE. For
* now, use this as the limit
@@ -6152,14 +6382,27 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
@@ -6169,9 +6412,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -6754,12 +6997,14 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
struct lpfc_iocbq *elsiocb;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct ls_rjt *stat;
struct fc_rdp_res_frame *rdp_res;
uint32_t cmdsize, len;
uint16_t *flag_ptr;
int rc;
+ u32 ulp_context;
if (status != SUCCESS)
goto error;
@@ -6768,24 +7013,33 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
cmdsize = sizeof(struct fc_rdp_res_frame);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
- lpfc_max_els_tries, rdp_context->ndlp,
- rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
+ lpfc_max_els_tries, rdp_context->ndlp,
+ rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2171 Xmit RDP response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- rdp_res = (struct fc_rdp_res_frame *)
- (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6833,18 +7087,17 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
rdp_context->page_a0, vport);
rdp_res->length = cpu_to_be32(len - 8);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
/* Now that we know the true size of the payload, update the BPL */
- bpl = (struct ulp_bde64 *)
- (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
+ bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
bpl->tus.f.bdeSize = len;
bpl->tus.f.bdeFlags = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_rdp_context;
}
@@ -6864,19 +7117,30 @@ error:
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_rdp_context;
}
@@ -6889,7 +7153,7 @@ error:
free_rdp_context:
/* This reference put is for the original unsolicited RDP. If the
- * iocb prep failed, there is no reference to remove.
+ * prep failed, there is no reference to remove.
*/
lpfc_nlp_put(ndlp);
kfree(rdp_context);
@@ -6909,18 +7173,19 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
}
if (lpfc_sli4_dump_page_a0(phba, mbox))
- goto prep_mbox_fail;
+ goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
- goto issue_mbox_fail;
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ return 1;
+ }
return 0;
-prep_mbox_fail:
-issue_mbox_fail:
+rdp_fail:
mempool_free(mbox, phba->mbox_mem_pool);
return 1;
}
@@ -6951,7 +7216,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
struct fc_rdp_req_frame *rdp_req;
struct lpfc_rdp_context *rdp_context;
- IOCB_t *cmd = NULL;
+ union lpfc_wqe128 *cmd = NULL;
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
@@ -6968,7 +7233,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -6993,15 +7258,17 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- cmd = &cmdiocb->iocb;
+ cmd = &cmdiocb->wqe;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
if (!rdp_context->ndlp) {
kfree(rdp_context);
rjt_err = LSRJT_UNABLE_TPC;
goto error;
}
- rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
- rdp_context->rx_id = cmd->ulpContext;
+ rdp_context->ox_id = bf_get(wqe_rcvoxid,
+ &cmd->xmit_els_rsp.wqe_com);
+ rdp_context->rx_id = bf_get(wqe_ctxt_tag,
+ &cmd->xmit_els_rsp.wqe_com);
rdp_context->cmpl = lpfc_els_rdp_cmpl;
if (lpfc_get_rdp_info(phba, rdp_context)) {
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
@@ -7031,6 +7298,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -7077,26 +7345,33 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!elsiocb)
goto free_lcb_context;
- lcb_res = (struct fc_lcb_res_frame *)
- (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
lcb_res->lcb_duration = lcb_context->duration;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto out;
}
@@ -7113,16 +7388,24 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
error:
cmdsize = sizeof(struct fc_lcb_res_frame);
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_lcb_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
@@ -7131,10 +7414,10 @@ error:
if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_lcb_context;
}
@@ -7246,7 +7529,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
u8 state, rjt_err = 0;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint8_t *)pcmd->virt;
beacon = (struct fc_lcb_request_frame *)pcmd->virt;
@@ -7286,8 +7569,8 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
lcb_context->duration = beacon->lcb_duration;
- lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
- lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+ lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
+ lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
lcb_context->ndlp = lpfc_nlp_get(ndlp);
if (!lcb_context->ndlp) {
rjt_err = LSRJT_UNABLE_TPC;
@@ -7443,10 +7726,10 @@ return_did_out:
static int
lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ndlp = NULL, *n;
/* Move all affected nodes by pending RSCNs to NPR state. */
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
@@ -7492,7 +7775,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
uint32_t payload_len;
struct lpfc_rscn_event_header *rscn_event_data;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
payload_ptr = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
@@ -7552,7 +7835,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
int rscn_id = 0, hba_id = 0;
int i, tmo;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -7628,6 +7911,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies +
+ msecs_to_jiffies(1000 * tmo));
+ }
return 0;
}
}
@@ -7647,7 +7937,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Get the array count after successfully have the token */
rscn_cnt = vport->fc_rscn_id_cnt;
/* If we are already processing an RSCN, save the received
- * RSCN payload buffer, cmdiocb->context2 to process later.
+ * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
*/
if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -7680,10 +7970,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
} else {
vport->fc_rscn_id_list[rscn_cnt] = pcmd;
vport->fc_rscn_id_cnt++;
- /* If we zero, cmdiocb->context2, the calling
+ /* If we zero, cmdiocb->cmd_dmabuf, the calling
* routine will not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
}
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -7720,10 +8010,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Indicate we are done walking fc_rscn_id_list on this vport */
vport->fc_rscn_flush = 0;
/*
- * If we zero, cmdiocb->context2, the calling routine will
+ * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
* not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
lpfc_set_disctmo(vport);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -7847,9 +8137,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
- IOCB_t *icmd = &cmdiocb->iocb;
+ union lpfc_wqe128 *wqe = &cmdiocb->wqe;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
@@ -7857,6 +8147,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t fc_flag = 0;
uint32_t port_state = 0;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
cmd = *lp++;
sp = (struct serv_parm *) lp;
@@ -7866,7 +8159,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
- did = icmd->un.elsreq64.remoteID;
+ did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
@@ -7908,6 +8201,12 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
+ /* External loopback plug insertion detected */
+ phba->link_flag |= LS_EXTERNAL_LOOPBACK;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
+ "1119 External Loopback plug detected\n");
+
/* abort the flogi coming back to ourselves
* due to external loopback on the port.
*/
@@ -7962,9 +8261,10 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
- phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
- phba->defer_flogi_acc_ox_id =
- cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com);
+ phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
@@ -8013,7 +8313,7 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
RNID *rn;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -8054,7 +8354,7 @@ lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
uint8_t *pcmd;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+ pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
/* skip over first word of echo command to find echo data */
pcmd += sizeof(uint32_t);
@@ -8130,7 +8430,7 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* response to the RLS.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RLS Accept Response
* ELS IOCB command.
*
@@ -8141,6 +8441,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct RLS_RSP *rls_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
@@ -8148,10 +8449,11 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t oxid;
uint16_t rxid;
uint32_t cmdsize;
+ u32 ulp_context;
mb = &pmb->u.mb;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
pmb->ctx_buf = NULL;
@@ -8175,11 +8477,19 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+ }
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
rls_rsp = (struct RLS_RSP *)pcmd;
@@ -8195,13 +8505,13 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return;
}
@@ -8239,6 +8549,8 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
+ u32 ctx = get_job_ulpcontext(phba, cmdiocb);
+ u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
@@ -8249,8 +8561,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
+ (ox_id << 16 | ctx));
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
@@ -8291,7 +8602,7 @@ reject_out:
* Value (RTV) unsolicited IOCB event.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RTV Accept Response
* ELS IOCB command.
*
@@ -8303,13 +8614,15 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
int rc = 0;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
uint32_t cmdsize;
-
+ u32 ulp_context;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
@@ -8324,13 +8637,23 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
+ }
rtv_rsp = (struct RTV_RSP *)pcmd;
@@ -8346,14 +8669,14 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
"Data: x%x x%x x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 0;
}
@@ -8409,7 +8732,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For RRQ request, remainder of payload is Exchange IDs */
*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
@@ -8427,19 +8750,21 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue RRQ: did:x%x",
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
- lpfc_nlp_get(ndlp);
- elsiocb->context1 = ndlp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
+ goto io_err;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (ret == IOCB_ERROR)
+ if (ret == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
goto io_err;
+ }
return 0;
io_err:
lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
return 1;
}
@@ -8481,7 +8806,7 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
* It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RPL Accept Response
* ELS command.
*
@@ -8495,10 +8820,12 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
{
int rc = 0;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd, *oldcmd;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ u32 ulp_context;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
@@ -8506,12 +8833,21 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
+ }
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
@@ -8530,13 +8866,13 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
"0120 Xmit ELS RPL ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -8591,7 +8927,7 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
rpl = (RPL *) (lp + 1);
maxsize = be32_to_cpu(rpl->maxsize);
@@ -8639,13 +8975,11 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
FARP *fp;
uint32_t cnt, did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -8712,13 +9046,11 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
uint32_t did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
lp++;
/* FARP-RSP received from DID <did> */
@@ -8758,7 +9090,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
FAN *fp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
- lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
fp = (FAN *) ++lp;
/* FAN received; Fan does not have a reply sequence */
if ((vport == phba->pport) &&
@@ -8805,15 +9137,16 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *ptr, dtag;
const char *dtag_nm;
int desc_cnt = 0, bytes_remain;
- bool rcv_cap_desc = false;
+ struct fc_diag_lnkflt_desc *plnkflt;
- payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ payload = cmdiocb->cmd_dmabuf->virt;
edc_req = (struct fc_els_edc *)payload;
bytes_remain = be32_to_cpu(edc_req->desc_len);
ptr = (uint32_t *)payload;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"3319 Rcv EDC payload len %d: x%x x%x x%x\n",
bytes_remain, be32_to_cpu(*ptr),
be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
@@ -8832,9 +9165,10 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* cycle through EDC diagnostic descriptors to find the
* congestion signaling capability descriptor
*/
- while (bytes_remain && !rcv_cap_desc) {
+ while (bytes_remain) {
if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6464 Truncated TLV hdr on "
"Diagnostic descriptor[%d]\n",
desc_cnt);
@@ -8847,16 +9181,27 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6465 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
- /* No action for Link Fault descriptor for now */
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4626 Link Fault Desc Data: x%08x len x%x "
+ "da x%x dd x%x interval x%x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
break;
case ELS_DTAG_CG_SIGNAL_CAP:
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
@@ -8883,11 +9228,11 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_least_capable_settings(
phba, (struct fc_diag_cg_sig_desc *)tlv);
- rcv_cap_desc = true;
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6467 unknown Diagnostic "
"Descriptor[%d]: tag x%x (%s)\n",
desc_cnt, dtag, dtag_nm);
@@ -8955,6 +9300,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
LIST_HEAD(abort_list);
+ u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
timeout = (uint32_t)(phba->fc_ratov << 1);
@@ -8971,17 +9317,27 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
+ ulp_context = get_job_ulpcontext(phba, piocb);
+ did = get_job_els_rsp64_did(phba, piocb);
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
- piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(piocb);
+ } else {
+ cmd = &piocb->iocb;
+ iotag = cmd->ulpIoTag;
+ }
+
+ if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
+ ulp_command == CMD_ABORT_XRI_CX ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN)
continue;
if (piocb->vport != vport)
continue;
- pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ pcmd = piocb->cmd_dmabuf;
if (pcmd)
els_command = *(uint32_t *) (pcmd->virt);
@@ -8999,11 +9355,11 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
remote_ID = 0xffffffff;
- if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
- remote_ID = cmd->un.elsreq64.remoteID;
- else {
+ if (ulp_command != CMD_GEN_REQUEST64_CR) {
+ remote_ID = did;
+ } else {
struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ ndlp = __lpfc_findnode_rpi(vport, ulp_context);
if (ndlp)
remote_ID = ndlp->nlp_DID;
}
@@ -9014,11 +9370,11 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
- remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+ remote_ID, ulp_command, iotag);
+
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
@@ -9061,7 +9417,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
+ u32 ulp_command;
unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
@@ -9086,20 +9442,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
- cmd = &piocb->iocb;
- if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ulp_command = get_job_cmnd(phba, piocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
@@ -9110,9 +9466,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* and avoid any retry logic.
*/
if (phba->link_state == LPFC_LINK_DOWN)
- piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -9143,17 +9499,17 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* just queue them up for lpfc_sli_cancel_iocbs
*/
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
- }
/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
+ if (ulp_command == CMD_QUE_RING_BUF_CN ||
+ ulp_command == CMD_QUE_RING_BUF64_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CX)
continue;
if (piocb->vport != vport)
@@ -9167,7 +9523,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (vport == phba->pport) {
list_for_each_entry_safe(piocb, tmp_iocb,
&phba->fabric_iocb_list, list) {
- cmd = &piocb->iocb;
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
@@ -9235,22 +9590,25 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
struct ls_rjt stat;
struct lpfc_nodelist *ndlp;
uint32_t *pcmd;
+ u32 ulp_status, ulp_word4;
- ndlp = cmdiocbp->context1;
+ ndlp = cmdiocbp->ndlp;
if (!ndlp)
return;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ ulp_word4 = get_job_word4(phba, rspiocbp);
+
+ if (ulp_status == IOSTAT_LS_RJT) {
lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- cmdiocbp->context2)->virt);
+ pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
- stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
fc_host_post_vendor_event(shost,
@@ -9260,10 +9618,10 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
LPFC_NL_VENDOR_ID);
return;
}
- if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
- (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ if (ulp_status == IOSTAT_NPORT_BSY ||
+ ulp_status == IOSTAT_FABRIC_BSY) {
fabric_event.event_type = FC_REG_FABRIC_EVENT;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ if (ulp_status == IOSTAT_NPORT_BSY)
fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
else
fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
@@ -9589,11 +9947,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for an Alarm event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
- /* Track of alarm cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_alarm_cnt);
/* Track of alarm cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_alarm_cnt);
}
+ /* Track alarm cnt for cgn_info regardless
+ * of whether CMF is configured for Signals
+ * or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_alarm_cnt);
goto cleanup;
}
break;
@@ -9601,11 +9962,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for a Warning event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
- /* Track of warning cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_warn_cnt);
/* Track of warning cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_warn_cnt);
}
+ /* Track warning cnt and freq for cgn_info
+ * regardless of whether CMF is configured for
+ * Signals or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_warn_cnt);
cleanup:
/* Save frequency in ms */
phba->cgn_fpin_frequency =
@@ -9614,14 +9978,10 @@ cleanup:
if (phba->cgn_i) {
cp = (struct lpfc_cgn_info *)
phba->cgn_i->virt;
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq =
- cpu_to_le16(value);
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq =
- cpu_to_le16(value);
+ cp->cgn_alarm_freq =
+ cpu_to_le16(value);
+ cp->cgn_warn_freq =
+ cpu_to_le16(value);
crc = lpfc_cgn_calc_crc32
(cp,
LPFC_CGN_INFO_SZ,
@@ -9776,27 +10136,32 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload, payload_len;
- uint32_t cmd, did, newnode;
+ u32 *payload, payload_len;
+ u32 cmd = 0, did = 0, newnode, status = 0;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
LPFC_MBOXQ_t *mbox;
- if (!vport || !(elsiocb->context2))
+ if (!vport || !elsiocb->cmd_dmabuf)
goto dropit;
newnode = 0;
- payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
- payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ payload = elsiocb->cmd_dmabuf->virt;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ payload_len = wcqe_cmpl->total_data_placed;
+ else
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ status = get_job_ulpstatus(phba, elsiocb);
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
- lpfc_post_buffer(phba, pring, 1);
+ lpfc_sli3_post_buffer(phba, pring, 1);
- did = icmd->un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ did = get_job_els_rsp64_did(phba, elsiocb);
+ if (status) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV Unsol ELS: status:x%x/x%x did:x%x",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ status, get_job_word4(phba, elsiocb), did);
goto dropit;
}
@@ -9843,8 +10208,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
goto dropit;
elsiocb->vport = vport;
@@ -9882,7 +10247,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* the vfi. This is done in lpfc_rcv_plogi but
* that is called after the reg_vfi.
*/
- vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+ vport->fc_myDID =
+ bf_get(els_rsp64_sid,
+ &elsiocb->wqe.xmit_els_rsp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3312 Remote port assigned DID x%x "
"%x\n", vport->fc_myDID,
@@ -9935,6 +10302,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+ /* retain node if our response is deferred */
+ if (phba->defer_flogi_acc_flag)
+ break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
@@ -10195,8 +10565,8 @@ lsrjt:
}
/* Release the reference on this elsiocb, not the ndlp. */
- lpfc_nlp_put(elsiocb->context1);
- elsiocb->context1 = NULL;
+ lpfc_nlp_put(elsiocb->ndlp);
+ elsiocb->ndlp = NULL;
/* Special case. Driver received an unsolicited command that
* unsupportable given the driver's current state. Reset the
@@ -10224,8 +10594,9 @@ dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+ "Data: x%x x%x x%x x%x\n",
+ cmd, status, get_job_word4(phba, elsiocb), did);
+
phba->fc_stat.elsRcvDrop++;
}
@@ -10245,77 +10616,93 @@ void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
{
- struct lpfc_vport *vport = phba->pport;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_vport *vport = elsiocb->vport;
+ u32 ulp_command, status, parameter, bde_count = 0;
+ IOCB_t *icmd;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
dma_addr_t paddr;
- struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
- struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
- elsiocb->context1 = NULL;
- elsiocb->context2 = NULL;
- elsiocb->context3 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
+ elsiocb->rsp_dmabuf = NULL;
+ elsiocb->bpl_dmabuf = NULL;
+
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ ulp_command = get_job_cmnd(phba, elsiocb);
+ status = get_job_ulpstatus(phba, elsiocb);
+ parameter = get_job_word4(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = elsiocb->iocb.ulpBdeCount;
- if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ if (status == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if (status == IOSTAT_LOCAL_REJECT &&
+ (parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0);
+ lpfc_sli3_post_buffer(phba, pring, 0);
return;
}
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
- icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
- if (icmd->unsli3.rcvsli3.vpi == 0xffff)
- vport = phba->pport;
- else
- vport = lpfc_find_vport_by_vpid(phba,
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ icmd = &elsiocb->iocb;
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
+ ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
icmd->unsli3.rcvsli3.vpi);
+ }
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- /* type of ELS cmd is first 32bit word
- * in packet
- */
+ /* Account for SLI2 or SLI3 and later unsolicited buffering */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- elsiocb->context2 = bdeBuf1;
+ elsiocb->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ elsiocb->bpl_dmabuf = bdeBuf2;
} else {
+ icmd = &elsiocb->iocb;
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
- elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (bde_count == 2) {
+ paddr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting context2 to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf to NULL.
*/
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
- elsiocb->context2 = NULL;
- }
-
- /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
- if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
- icmd->ulpBdeCount == 2) {
- elsiocb->context2 = bdeBuf2;
- lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
- /* free mp if we are done with it */
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, elsiocb->context2);
- elsiocb->context2 = NULL;
- }
+ if (elsiocb->cmd_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
+ elsiocb->cmd_dmabuf = NULL;
}
+
+ if (elsiocb->bpl_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
+ elsiocb->bpl_dmabuf = NULL;
+ }
+
}
static void
@@ -10426,7 +10813,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
@@ -10655,9 +11042,11 @@ lpfc_fabric_login_reqd(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
- (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ if (ulp_status != IOSTAT_FABRIC_RJT ||
+ ulp_word4 != RJT_LOGIN_REQUIRED)
return 0;
else
return 1;
@@ -10689,18 +11078,21 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint8_t fabric_param_changed;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_prevDID);
/* Since all FDISCs are being single threaded, we
* must reset the discovery timer for ALL vports
@@ -10712,9 +11104,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FDISC cmpl: status:x%x/x%x prevdid:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+ ulp_status, ulp_word4, vport->fc_prevDID);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
lpfc_retry_pport_discovery(phba);
@@ -10727,7 +11119,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0126 FDISC failed. (x%x/x%x)\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
@@ -10741,7 +11133,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -10828,7 +11220,7 @@ out:
* IOCB will be sent off HBA at any given time.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FDISC ELS command.
*
* Return code
@@ -10841,6 +11233,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
struct lpfc_iocbq *elsiocb;
struct serv_parm *sp;
uint8_t *pcmd;
@@ -10860,20 +11253,19 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
-
- /*
- * SLI3 ports require a different context type value than SLI4.
- * Catch SLI3 ports here and override the prep.
- */
- if (phba->sli_rev == LPFC_SLI_REV3) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
pcmd += sizeof(uint32_t); /* CSP Word 1 */
memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
@@ -10899,21 +11291,18 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FDISC: did:x%x",
did, 0, 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
- lpfc_els_free_iocb(phba, elsiocb);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
goto err_out;
- }
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
goto err_out;
}
@@ -10922,6 +11311,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0256 Issue FDISC: Cannot send IOCB\n");
@@ -10950,23 +11340,36 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ u32 ulp_status, ulp_word4, did, tmo;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ did = get_job_els_rsp64_did(phba, rspiocb);
+ tmo = irsp->ulpTimeout;
+ }
- ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO npiv cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+ ulp_status, ulp_word4, did);
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes,
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes,
kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->fc4_xpt_flags);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
vport->fc_flag &= ~FC_FABRIC;
@@ -10974,10 +11377,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ /* Wake up lpfc_vport_delete if waiting...*/
+ if (ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
/* Safe to release resources now. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
@@ -10988,7 +11400,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* This routine issues a LOGO ELS command to an @ndlp off a @vport.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the LOGO ELS command.
*
* Return codes
@@ -11010,7 +11422,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -11023,12 +11435,12 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Issue LOGO npiv did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -11093,7 +11505,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
- IOCB_t *cmd;
repeat:
iocb = NULL;
@@ -11108,24 +11519,23 @@ repeat:
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched1: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmd = &iocb->iocb;
- cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- iocb->iocb_cmpl(phba, iocb, iocb);
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
+ iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ iocb->cmd_cmpl(phba, iocb, iocb);
atomic_dec(&phba->fabric_iocb_count);
goto repeat;
@@ -11181,26 +11591,27 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
* @rspiocb: pointer to lpfc response iocb data structure.
*
* This routine is the callback function that is put to the fabric iocb's
- * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
- * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
* function first restores and invokes the original iocb's callback function
* and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
* fabric bound iocb from the driver internal fabric iocb list onto the wire.
**/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct ls_rjt stat;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
+ WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
- switch (rspiocb->iocb.ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP)
lpfc_block_fabric_iocbs(phba);
- }
break;
case IOSTAT_NPORT_BSY:
@@ -11209,8 +11620,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError =
- be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be =
+ cpu_to_be32(ulp_word4);
if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
lpfc_block_fabric_iocbs(phba);
@@ -11219,10 +11630,10 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
- cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
- cmdiocb->fabric_iocb_cmpl = NULL;
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+ cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
+ cmdiocb->fabric_cmd_cmpl = NULL;
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
@@ -11273,20 +11684,20 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
atomic_inc(&phba->fabric_iocb_count);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched2: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
} else {
@@ -11588,11 +11999,12 @@ lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *prsp = NULL;
struct lpfc_vmid_priority_range *vmid_range = NULL;
u32 *data;
- struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
- IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
u8 *pcmd, max_desc;
u32 len, i;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -11606,10 +12018,10 @@ lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
"6529 QFPA failed with status x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
@@ -11688,15 +12100,15 @@ int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
if (!elsiocb)
return -ENOMEM;
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
*((u32 *)(pcmd)) = ELS_CMD_QFPA;
pcmd += 4;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(vport->phba, elsiocb);
return -ENXIO;
}
@@ -11743,7 +12155,7 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
vmid_context->nlp = ndlp;
vmid_context->instantiated = instantiated;
elsiocb->vmid_tag.vmid_context = vmid_context;
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
@@ -11776,10 +12188,10 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
}
inst_desc->word6 = cpu_to_be32(inst_desc->word6);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(vport->phba, elsiocb);
goto out;
}
@@ -11805,11 +12217,12 @@ lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
struct lpfc_dmabuf *prsp = NULL;
struct lpfc_vmid_context *vmid_context =
icmdiocb->vmid_tag.vmid_context;
- struct lpfc_nodelist *ndlp = icmdiocb->context1;
+ struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
u8 *pcmd;
u32 *data;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
struct lpfc_vmid *vmid;
vmid = vmid_context->vmp;
@@ -11826,10 +12239,10 @@ lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
"4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
"4533 UVEM error status %x: %x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
spin_lock(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9fe6e5b386ce..d38ebd7281b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
- __func__, rport, rport->scsi_target_id);
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
return -EINVAL;
}
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d\n",
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
- vport->load_flag, kref_read(&ndlp->kref));
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down.
* The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
/* Remove the node reference from remote_port_add now.
* The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_RECOVER_PORT:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
- free_evt = 0;
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
/* decrement the node reference count held for
* this queued work
*/
@@ -859,20 +890,30 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
@@ -880,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
/* Handle VMID Events */
- if (lpfc_is_vmid_enabled(phba)) {
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
if (phba->pport->work_port_events &
WORKER_CHECK_VMID_ISSUE_QFPA) {
lpfc_check_vmid_qfpa_issue(phba);
@@ -930,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -1140,6 +1183,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
@@ -1157,6 +1201,13 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
vport->fc_flag &= ~FC_DISC_DELAYED;
spin_unlock_irq(shost->host_lock);
del_timer_sync(&vport->delayed_disc_tmo);
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Assume success on link up */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
}
int
@@ -1167,15 +1218,20 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
@@ -1186,6 +1242,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
@@ -1213,7 +1271,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1297,11 +1355,17 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ if (phba->defer_flogi_acc_flag)
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ else
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
+ FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
+ lpfc_setup_fdmi_mask(vport);
lpfc_linkup_cleanup_nodes(vport);
}
@@ -1333,9 +1397,8 @@ lpfc_linkup(struct lpfc_hba *phba)
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
- /* reinitialize initial FLOGI flag */
- phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
- phba->defer_flogi_acc_flag = false;
+ /* reinitialize initial HBA flag */
+ phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
return 0;
}
@@ -1413,7 +1476,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
LPFC_MBOXQ_t *sparam_mb;
- struct lpfc_dmabuf *sparam_mp;
u16 status = pmb->u.mb.mbxStatus;
int rc;
@@ -1462,13 +1524,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- sparam_mp = (struct lpfc_dmabuf *)
- sparam_mb->ctx_buf;
- lpfc_mbuf_free(phba, sparam_mp->virt,
- sparam_mp->phys);
- kfree(sparam_mp);
- sparam_mb->ctx_buf = NULL;
- mempool_free(sparam_mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
+ MBOX_THD_UNLOCKED);
goto sparam_out;
}
@@ -2099,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -2112,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
@@ -2913,7 +2970,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
@@ -3018,7 +3075,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -3267,7 +3324,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -3348,12 +3404,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
out_free_mem:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
- return;
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
}
static void
@@ -3398,9 +3449,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Check if sending the FLOGI is being deferred to after we get
* up to date CSPs from MBX_READ_SPARAM.
@@ -3412,12 +3461,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
out:
- pmb->ctx_buf = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_issue_clear_la(phba, vport);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
}
static void
@@ -3427,7 +3472,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
struct Scsi_Host *shost;
int i;
- struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
@@ -3555,10 +3599,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
goto out;
}
@@ -3727,18 +3768,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3752,21 +3783,22 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
if (phba->cmf_active_mode != LPFC_CFG_OFF)
lpfc_cmf_signal_init(phba);
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -3780,64 +3812,28 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3850,9 +3846,13 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ /* The driver calls the state machine with the pmb pointer
+ * but wants to make sure a stale ctx_buf isn't acted on.
+ * The ctx_buf is restored later and cleaned up.
+ */
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
@@ -3889,10 +3889,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Call state machine */
lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ pmb->ctx_buf = mp;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
/* decrement the node reference count held for this callback
* function.
*/
@@ -3928,7 +3927,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@@ -3958,7 +3956,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
@@ -4061,11 +4058,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
- /* free dma buffer from previous round */
+ /* While loop iteration forces a free dma buffer from
+ * the previous loop because the mbox is reused and
+ * the dump routine is a single-use construct.
+ */
if (pmb->ctx_buf) {
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
+ pmb->ctx_buf = NULL;
}
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
@@ -4150,16 +4151,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
- if (mbx_wait_rc != MBX_TIMEOUT) {
- if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(pmb, phba->mbox_mem_pool);
- }
-
- return;
+ if (mbx_wait_rc != MBX_TIMEOUT)
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -4173,22 +4166,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct Scsi_Host *shost;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -4230,9 +4217,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_do_scr_ns_plogi(phba, vport);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -4326,12 +4311,10 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
int rc;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
vport->gidft_inp = 0;
@@ -4345,9 +4328,7 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
@@ -4417,8 +4398,11 @@ out:
rc = lpfc_issue_els_edc(vport, 0);
lpfc_printf_log(phba, KERN_INFO,
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
- "4220 EDC issue error x%x, Data: x%x\n",
+ "4220 Issue EDC status x%x Data x%x\n",
rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
} else {
lpfc_issue_els_rdf(vport, 0);
}
@@ -4436,10 +4420,7 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -4453,13 +4434,9 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
-
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0933 %s: Register FC login error: 0x%x\n",
@@ -4483,9 +4460,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
out:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -4708,6 +4683,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
return;
}
@@ -4718,6 +4698,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
}
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -4813,22 +4800,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
new_state == NLP_STE_UNMAPPED_NODE)
lpfc_nlp_reg_node(vport, ndlp);
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
@@ -5055,7 +5026,8 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (vport->fc_flag & FC_DISC_TMO ||
+ timer_pending(&vport->fc_disctmo)) {
spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag &= ~FC_DISC_TMO;
spin_unlock_irqrestore(shost->host_lock, iflags);
@@ -5084,24 +5056,30 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd = &iocb->iocb;
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
+ u8 ulp_command;
+ u16 ulp_context;
+ u32 remote_id;
if (iocb->vport != vport)
return 0;
+ ulp_command = get_job_cmnd(phba, iocb);
+ ulp_context = get_job_ulpcontext(phba, iocb);
+ remote_id = get_job_els_rsp64_did(phba, iocb);
+
if (pring->ringno == LPFC_ELS_RING) {
- switch (icmd->ulpCommand) {
+ switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
- if (iocb->context_un.ndlp == ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
fallthrough;
case CMD_ELS_REQUEST64_CR:
- if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ if (remote_id == ndlp->nlp_DID)
return 1;
fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
- if (iocb->context1 == (uint8_t *) ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
}
} else if (pring->ringno == LPFC_FCP_RING) {
@@ -5110,9 +5088,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ if (ulp_context == ndlp->nlp_rpi)
return 1;
- }
}
return 0;
}
@@ -5212,7 +5189,6 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
- mempool_free(pmb, phba->mbox_mem_pool);
/* Check to see if there are any deferred events to process */
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
@@ -5239,6 +5215,13 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
spin_unlock_irq(&ndlp->lock);
}
+
+ /* The node has an outstanding reference for the unreg. Now
+ * that the LOGO action and cleanup are finished, release
+ * resources.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/*
@@ -5361,6 +5344,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -5507,7 +5491,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5545,16 +5528,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
list_del(&mb->list);
- mempool_free(mb, phba->mbox_mem_pool);
- /* We shall not invoke the lpfc_nlp_put to decrement
- * the ndlp reference count as we are in the process
- * of lpfc_nlp_release.
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
+
+ /* Don't invoke lpfc_nlp_put. The driver is in
+ * lpfc_nlp_release context.
*/
}
}
@@ -6023,9 +6001,9 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
+ u32 ulp_command;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
@@ -6036,12 +6014,13 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
list_move_tail(&iocb->list, &completions);
}
@@ -6049,12 +6028,13 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
- icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
@@ -6085,12 +6065,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
@@ -6304,8 +6306,9 @@ restart_disc:
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
- "x%x\n",
- vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+ "x%x x%x x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
+ vport->port_state, vport->gidft_inp);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
@@ -6375,11 +6378,9 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
if (phba->sli_rev < LPFC_SLI_REV4)
@@ -6410,10 +6411,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -6646,7 +6644,6 @@ lpfc_nlp_release(struct kref *kref)
ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- kfree(ndlp->lat_data);
if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
mempool_free(ndlp->active_rrqs_xri_bitmap,
ndlp->phba->active_rrq_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 634f8fff7425..5c283936ff08 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -97,6 +97,18 @@ union CtCommandResponse {
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_NVME_DISC 0x4
+enum rft_word0 {
+ RFT_FCP_REG = (0x1 << 8),
+};
+
+enum rft_word1 {
+ RFT_NVME_REG = (0x1 << 8),
+};
+
+enum rft_word3 {
+ RFT_APP_SERV_REG = (0x1 << 0),
+};
+
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
@@ -131,25 +143,13 @@ struct lpfc_sli_ct_request {
uint8_t Fc4Type;
} gid_ff;
struct rft {
- uint32_t PortId; /* For RFT_ID requests */
-
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd0:16;
- uint32_t rsvd1:7;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd2:2;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd3:5;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t rsvd0:16;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd1:7;
- uint32_t rsvd3:5;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd2:2;
-#endif
+ __be32 port_id; /* For RFT_ID requests */
- uint32_t rsvd[7];
+ __be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */
+ __be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */
+ __be32 word2;
+ __be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */
+ __be32 word[4];
} rft;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
@@ -511,8 +511,6 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
-#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
-
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -664,6 +662,7 @@ struct fc_vft_header {
struct ls_rjt { /* Structure is in Big Endian format */
union {
+ __be32 ls_rjt_error_be;
uint32_t lsRjtError;
struct {
uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
@@ -704,6 +703,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
} un;
@@ -1442,30 +1442,56 @@ struct lpfc_vmid_gallapp_ident_list {
/* Definitions for HBA / Port attribute entries */
-/* Attribute Entry */
-struct lpfc_fdmi_attr_entry {
- union {
- uint32_t AttrInt;
- uint8_t AttrTypes[32];
- uint8_t AttrString[256];
- struct lpfc_name AttrWWN;
- } un;
+/* Attribute Entry Structures */
+
+struct lpfc_fdmi_attr_u32 {
+ __be16 type;
+ __be16 len;
+ __be32 value_u32;
};
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- /* Marks start of Value (ATTRIBUTE_ENTRY) */
- struct lpfc_fdmi_attr_entry AttrValue;
-} __packed;
+struct lpfc_fdmi_attr_wwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 name[8];
+};
+
+struct lpfc_fdmi_attr_fullwwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 nname[8];
+ u8 pname[8];
+};
+
+struct lpfc_fdmi_attr_fc4types {
+ __be16 type;
+ __be16 len;
+ u8 value_types[32];
+};
+
+struct lpfc_fdmi_attr_string {
+ __be16 type;
+ __be16 len;
+ char value_string[256];
+};
+
+/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */
+#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string)
/*
* HBA Attribute Block
*/
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
- struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+ /* Variable Length Attribute Entry TLV's follow */
};
/*
@@ -1729,7 +1755,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1737,6 +1762,28 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+#define PCI_VENDOR_ID_ATTO 0x117c
+#define PCI_DEVICE_ID_CLRY_16XE 0x0064
+#define PCI_DEVICE_ID_CLRY_161E 0x0063
+#define PCI_DEVICE_ID_CLRY_162E 0x0064
+#define PCI_DEVICE_ID_CLRY_164E 0x0065
+#define PCI_DEVICE_ID_CLRY_16XP 0x0094
+#define PCI_DEVICE_ID_CLRY_161P 0x00a0
+#define PCI_DEVICE_ID_CLRY_162P 0x0094
+#define PCI_DEVICE_ID_CLRY_164P 0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE 0x0094
+#define PCI_DEVICE_ID_CLRY_321E 0x00a2
+#define PCI_DEVICE_ID_CLRY_322E 0x00a3
+#define PCI_DEVICE_ID_CLRY_324E 0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP 0x00bb
+#define PCI_DEVICE_ID_CLRY_321P 0x00bc
+#define PCI_DEVICE_ID_CLRY_322P 0x00bd
+#define PCI_DEVICE_ID_CLRY_324P 0x00be
+#define PCI_DEVICE_ID_TLFC_2 0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2 0x4064
+#define PCI_DEVICE_ID_TLFC_3 0x0094
+#define PCI_DEVICE_ID_TLFC_3162 0x40a6
+#define PCI_DEVICE_ID_TLFC_3322 0x40a7
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1752,7 +1799,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -2649,19 +2695,26 @@ typedef struct {
} READ_SPARM_VAR;
/* Structure for MB Command READ_STATUS (14) */
+enum read_status_word1 {
+ RD_ST_CC = 0x01,
+ RD_ST_XKB = 0x80,
+};
+
+enum read_status_word17 {
+ RD_ST_XMIT_XKB_MASK = 0x3fffff,
+};
+
+enum read_status_word18 {
+ RD_ST_RCV_XKB_MASK = 0x3fffff,
+};
typedef struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1:31;
- uint32_t clrCounters:1;
- uint16_t activeXriCnt;
- uint16_t activeRpiCnt;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t clrCounters:1;
- uint32_t rsvd1:31;
- uint16_t activeRpiCnt;
- uint16_t activeXriCnt;
-#endif
+ u8 clear_counters; /* rsvd 7:1, cc 0 */
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 xkb; /* xkb 7, rsvd 6:0 */
+
+ u32 rsvd8;
uint32_t xmitByteCnt;
uint32_t rcvByteCnt;
@@ -2673,6 +2726,14 @@ typedef struct {
uint32_t totalRespExchanges;
uint32_t rcvPbsyCnt;
uint32_t rcvFbsyCnt;
+
+ u32 drop_frame_no_rq;
+ u32 empty_rq;
+ u32 drop_frame_no_xri;
+ u32 empty_xri;
+
+ u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */
+ u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */
} READ_STATUS_VAR;
/* Structure for MB Command READ_RPI (15) */
@@ -3038,7 +3099,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -3675,19 +3735,26 @@ union sli_var {
};
typedef struct {
+ struct_group_tagged(MAILBOX_word0, bits,
+ union {
+ struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t mbxStatus;
- uint8_t mbxCommand;
- uint8_t mbxReserved:6;
- uint8_t mbxHc:1;
- uint8_t mbxOwner:1; /* Low order bit first word */
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t mbxOwner:1; /* Low order bit first word */
- uint8_t mbxHc:1;
- uint8_t mbxReserved:6;
- uint8_t mbxCommand;
- uint16_t mbxStatus;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
#endif
+ };
+ u32 word0;
+ };
+ );
MAILVARIANTS un;
union sli_var us;
@@ -3746,7 +3813,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
-#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C
@@ -4369,23 +4436,15 @@ lpfc_is_LC_HBA(unsigned short device)
}
/*
- * Determine if an IOCB failed because of a link event or firmware reset.
+ * Determine if failed because of a link event or firmware reset.
*/
-
static inline int
-lpfc_error_lost_link(IOCB_t *iocbp)
+lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
{
- return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
- iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
- iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+ return (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 == IOERR_SLI_ABORTED ||
+ ulp_word4 == IOERR_LINK_DOWN ||
+ ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6ec42991d2ab..5288fc69908a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,6 +60,14 @@
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+#define get_wqe_reqtag(x) (((x)->wqe.words[9] >> 0) & 0xFFFF)
+#define get_wqe_tmo(x) (((x)->wqe.words[7] >> 24) & 0x00FF)
+
+#define get_job_ulpword(x, y) ((x)->iocb.un.ulpWord[y])
+
+#define set_job_ulpstatus(x, y) bf_set(lpfc_wcqe_c_status, &(x)->wcqe_cmpl, y)
+#define set_job_ulpword4(x, y) ((&(x)->wcqe_cmpl)->parameter = y)
+
struct dma_address {
uint32_t addr_lo;
uint32_t addr_hi;
@@ -230,6 +238,34 @@ struct lpfc_sli_intf {
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
+enum ulp_bde64_word3 {
+ ULP_BDE64_SIZE_MASK = 0xffffff,
+
+ ULP_BDE64_TYPE_SHIFT = 24,
+ ULP_BDE64_TYPE_MASK = (0xff << ULP_BDE64_TYPE_SHIFT),
+
+ /* BDE (Host_resident) */
+ ULP_BDE64_TYPE_BDE_64 = (0x00 << ULP_BDE64_TYPE_SHIFT),
+ /* Immediate Data BDE */
+ ULP_BDE64_TYPE_BDE_IMMED = (0x01 << ULP_BDE64_TYPE_SHIFT),
+ /* BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64P = (0x02 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Host-resident) */
+ ULP_BDE64_TYPE_BDE_64I = (0x08 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64IP = (0x0A << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Host-resident) */
+ ULP_BDE64_TYPE_BLP_64 = (0x40 << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Port-resident) */
+ ULP_BDE64_TYPE_BLP_64P = (0x42 << ULP_BDE64_TYPE_SHIFT),
+};
+
+struct ulp_bde64_le {
+ __le32 type_size; /* type 31:24, size 23:0 */
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
@@ -702,6 +738,7 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
#define LPFC_SEC_TO_MSEC 1000
+#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000)
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -2857,6 +2894,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
+#define lpfc_mbx_rd_conf_fawwpn_SHIFT 30
+#define lpfc_mbx_rd_conf_fawwpn_MASK 0x00000001
+#define lpfc_mbx_rd_conf_fawwpn_WORD word1
#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
#define lpfc_mbx_rd_conf_wcs_WORD word1
@@ -3444,9 +3484,10 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x12
-#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_LD_SIGNAL 0x23
#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
@@ -3477,13 +3518,17 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_cmf_SHIFT 0
#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_lds_qry_SHIFT 0
+#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001
+#define lpfc_mbx_set_feature_lds_qry_WORD word6
+#define LPFC_QUERY_LDS_OP 1
#define lpfc_mbx_set_feature_mi_SHIFT 0
#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
#define lpfc_mbx_set_feature_mi_WORD word6
#define lpfc_mbx_set_feature_milunq_SHIFT 16
#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
#define lpfc_mbx_set_feature_milunq_WORD word6
- uint32_t word7;
+ u32 word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UERP_WORD word7
@@ -3497,6 +3542,8 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
+ u32 word9;
+ u32 word10;
};
@@ -4274,7 +4321,7 @@ struct lpfc_acqe_cgn_signal {
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
- uint32_t reserved;
+ uint32_t event_data3;
uint32_t trailer;
#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
@@ -4287,6 +4334,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
+#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
};
/*
@@ -4437,12 +4485,8 @@ struct wqe_common {
#define wqe_cmd_type_MASK 0x0000000f
#define wqe_cmd_type_WORD word11
#define wqe_els_id_SHIFT 4
-#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_MASK 0x00000007
#define wqe_els_id_WORD word11
-#define LPFC_ELS_ID_FLOGI 3
-#define LPFC_ELS_ID_FDISC 2
-#define LPFC_ELS_ID_LOGO 1
-#define LPFC_ELS_ID_DEFAULT 0
#define wqe_irsp_SHIFT 4
#define wqe_irsp_MASK 0x00000001
#define wqe_irsp_WORD word11
@@ -4452,6 +4496,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
@@ -4489,6 +4536,14 @@ struct lpfc_wqe_generic{
uint32_t payload[4];
};
+enum els_request64_wqe_word11 {
+ LPFC_ELS_ID_DEFAULT,
+ LPFC_ELS_ID_LOGO,
+ LPFC_ELS_ID_FDISC,
+ LPFC_ELS_ID_FLOGI,
+ LPFC_ELS_ID_PLOGI,
+};
+
struct els_request64_wqe {
struct ulp_bde64 bde;
uint32_t payload_len;
@@ -4690,7 +4745,6 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4753,6 +4807,9 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
+#define cmf_sync_period_SHIFT 16
+#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
};
@@ -5001,22 +5058,6 @@ struct lpfc_grp_hdr {
{ FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
}
-/* EDC supports two descriptors. When allocated, it is the
- * size of this structure plus each supported descriptor.
- */
-struct lpfc_els_edc_req {
- struct fc_els_edc edc; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
-/* Minimum structure defines for the EDC response.
- * Balance is in buffer.
- */
-struct lpfc_els_edc_rsp {
- struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
/* Used for logging FPIN messages */
#define LPFC_FPIN_WWPN_LINE_SZ 128
#define LPFC_FPIN_WWPN_LINE_CNT 6
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 6a90e6e53d09..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -124,5 +122,35 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ba17a8f740a9..b535f1fd3010 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -324,8 +325,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
- if (prg->dist < 4)
- dist = dist_char[prg->dist];
+ dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
@@ -340,7 +340,6 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/**
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
- * cfg_soft_wwnn, cfg_soft_wwpn
* @vport: pointer to lpfc vport data structure.
*
*
@@ -350,22 +349,13 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
void
lpfc_update_vport_wwn(struct lpfc_vport *vport)
{
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
-
- /* If the soft name exists then update it using the service params */
- if (vport->phba->cfg_soft_wwnn)
- u64_to_wwn(vport->phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (vport->phba->cfg_soft_wwpn)
- u64_to_wwn(vport->phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
+ struct lpfc_hba *phba = vport->phba;
/*
* If the name is empty or there exists a soft name
* then copy the service params name, otherwise use the fc name
*/
- if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ if (vport->fc_nodename.u.wwn[0] == 0)
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
else
@@ -378,22 +368,35 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
*/
if (vport->fc_portname.u.wwn[0] != 0 &&
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name)))
+ sizeof(struct lpfc_name))) {
vport->vport_flag |= FAWWPN_PARAM_CHG;
- if (vport->fc_portname.u.wwn[0] == 0 ||
- vport->phba->cfg_soft_wwpn ||
- (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
- vport->vport_flag & FAWWPN_SET) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_DISCOVERY | LOG_ELS,
+ "2701 FA-PWWN change WWPN from %llx to "
+ "%llx: vflag x%x fawwpn_flag x%x\n",
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64
+ (vport->fc_sparam.portName.u.wwn),
+ vport->vport_flag,
+ phba->sli4_hba.fawwpn_flag);
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ }
+ }
+
+ if (vport->fc_portname.u.wwn[0] == 0)
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- vport->vport_flag &= ~FAWWPN_SET;
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
- vport->vport_flag |= FAWWPN_SET;
- }
+ sizeof(struct lpfc_name));
else
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
- sizeof(struct lpfc_name));
+ sizeof(struct lpfc_name));
}
/**
@@ -452,15 +455,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
"READ_SPARM mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return -EIO;
}
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
+ * longer needed. Prevent unintended ctx_buf access as the mbox is
+ * reused.
+ */
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -695,8 +699,14 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba)
return rc;
}
mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
- phba->sli4_hba.pc_sli4_params.mi_ver =
+
+ /* Are we forcing MI off via module parameter? */
+ if (phba->cfg_enable_mi)
+ phba->sli4_hba.pc_sli4_params.mi_ver =
bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ else
+ phba->sli4_hba.pc_sli4_params.mi_ver = 0;
+
phba->sli4_hba.pc_sli4_params.cmf =
bf_get(cfg_cmf, mbx_sli4_parameters);
phba->sli4_hba.pc_sli4_params.pls =
@@ -1027,7 +1037,7 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
@@ -1652,7 +1662,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_PCI_ERR) {
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1995,6 +2005,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -2104,7 +2115,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
@@ -2184,7 +2195,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
- struct lpfc_dmabuf *mp;
int rc = 0;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -2193,23 +2203,17 @@ lpfc_handle_latt(struct lpfc_hba *phba)
goto lpfc_handle_latt_err_exit;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
rc = 2;
- goto lpfc_handle_latt_free_pmb;
- }
-
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- rc = 3;
- goto lpfc_handle_latt_free_mp;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ goto lpfc_handle_latt_err_exit;
}
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
-
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -2230,11 +2234,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_handle_latt_free_mbuf:
phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-lpfc_handle_latt_free_mp:
- kfree(mp);
-lpfc_handle_latt_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
spin_lock_irq(&phba->hbalock);
@@ -2257,6 +2257,101 @@ lpfc_handle_latt_err_exit:
return;
}
+static void
+lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
+{
+ int i, j;
+
+ while (length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->SerialNumber[j++] = vpd[(*pindex)++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelDesc[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelName[j++] = vpd[(*pindex)++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ProgramType[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3 + i);
+ while (i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ (*pindex)++;
+ } else
+ phba->Port[j++] = vpd[(*pindex)++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ } else {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ *pindex += i;
+ length -= (3 + i);
+ }
+ }
+}
+
/**
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
* @phba: pointer to lpfc hba data structure.
@@ -2276,7 +2371,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
- int i, j;
+ int i;
int finished = 0;
int index = 0;
@@ -2309,101 +2404,10 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
Length = ((((unsigned short)lenhi) << 8) + lenlo);
if (Length > len - index)
Length = len - index;
- while (Length > 0) {
- /* Look for Serial Number */
- if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->SerialNumber[j++] = vpd[index++];
- if (j == 31)
- break;
- }
- phba->SerialNumber[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
- phba->vpd_flag |= VPD_MODEL_DESC;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelDesc[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ModelDesc[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
- phba->vpd_flag |= VPD_MODEL_NAME;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelName[j++] = vpd[index++];
- if (j == 79)
- break;
- }
- phba->ModelName[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
- phba->vpd_flag |= VPD_PROGRAM_TYPE;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ProgramType[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ProgramType[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
- phba->vpd_flag |= VPD_PORT;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_GET)) {
- j++;
- index++;
- } else
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
- }
- if ((phba->sli_rev != LPFC_SLI_REV4) ||
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_NON))
- phba->Port[j] = 0;
- continue;
- }
- else {
- index += 2;
- i = vpd[index];
- index += 1;
- index += i;
- Length -= (3 + i);
- }
- }
- finished = 0;
- break;
+
+ lpfc_fill_vpd(phba, vpd, Length, &index);
+ finished = 0;
+ break;
case 0x78:
finished = 1;
break;
@@ -2417,6 +2421,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
}
/**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+ char *model = "<Unknown>";
+ int tbolt = 0;
+
+ switch (sub_dev_id) {
+ case PCI_DEVICE_ID_CLRY_161E:
+ model = "161E";
+ break;
+ case PCI_DEVICE_ID_CLRY_162E:
+ model = "162E";
+ break;
+ case PCI_DEVICE_ID_CLRY_164E:
+ model = "164E";
+ break;
+ case PCI_DEVICE_ID_CLRY_161P:
+ model = "161P";
+ break;
+ case PCI_DEVICE_ID_CLRY_162P:
+ model = "162P";
+ break;
+ case PCI_DEVICE_ID_CLRY_164P:
+ model = "164P";
+ break;
+ case PCI_DEVICE_ID_CLRY_321E:
+ model = "321E";
+ break;
+ case PCI_DEVICE_ID_CLRY_322E:
+ model = "322E";
+ break;
+ case PCI_DEVICE_ID_CLRY_324E:
+ model = "324E";
+ break;
+ case PCI_DEVICE_ID_CLRY_321P:
+ model = "321P";
+ break;
+ case PCI_DEVICE_ID_CLRY_322P:
+ model = "322P";
+ break;
+ case PCI_DEVICE_ID_CLRY_324P:
+ model = "324P";
+ break;
+ case PCI_DEVICE_ID_TLFC_2XX2:
+ model = "2XX2";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3162:
+ model = "3162";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3322:
+ model = "3322";
+ tbolt = 1;
+ break;
+ default:
+ model = "Unknown";
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79, "%s", model);
+
+ if (descp && descp[0] == '\0')
+ snprintf(descp, 255,
+ "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+ (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+ model,
+ phba->Port);
+}
+
+/**
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
* @phba: pointer to lpfc hba data structure.
* @mdp: pointer to the data structure to hold the derived model name.
@@ -2446,6 +2534,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
+ if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+ lpfc_get_atto_model_desc(phba, mdp, descp);
+ return;
+ }
+
if (phba->lmt & LMT_64Gb)
max_speed = 64;
else if (phba->lmt & LMT_32Gb)
@@ -2595,11 +2688,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -2688,7 +2776,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
}
/**
- * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a IOCB ring.
* @cnt: the number of IOCBs to be posted to the IOCB ring.
@@ -2700,7 +2788,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
* The number of IOCBs NOT able to be posted to the IOCB ring.
**/
int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
@@ -2806,7 +2894,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
@@ -2983,6 +3071,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_EVT_DEVICE_RM);
}
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
+
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
* Lets wait for this to happen, if needed.
@@ -2995,7 +3099,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
+ LOG_DISCOVERY,
"0282 did:x%x ndlp:x%px "
"refcnt:%d xflags x%x nflag x%x\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -3692,7 +3796,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
- int offline = 0;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3702,6 +3807,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
@@ -3725,11 +3831,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- if (offline) {
+ if (offline || hba_pci_err) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
NLP_RPI_REGISTERED);
spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
} else {
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -4260,8 +4369,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
- lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
@@ -4305,9 +4413,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6074 Current allocated XRI sgl count:%d, "
- "maximum XRI count:%d\n",
+ "maximum XRI count:%d els_xri_cnt:%d\n\n",
phba->sli4_hba.io_xri_cnt,
- phba->sli4_hba.io_xri_max);
+ phba->sli4_hba.io_xri_max,
+ els_xri_cnt);
cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
@@ -4446,12 +4555,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
pwqeq->sli4_lxritag = lxri;
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- pwqeq->context1 = lpfc_ncmd;
/* Initialize local short-hand pointers. */
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
- lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+ lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
spin_lock_init(&lpfc_ncmd->buf_lock);
/* add the nvme buffer to a post list */
@@ -4460,7 +4568,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6114 Allocate %d out of %d requested new NVME "
- "buffers\n", bcnt, num_to_alloc);
+ "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
+ sizeof(*lpfc_ncmd));
+
/* post the list of nvme buffer sgls to port if available */
if (!list_empty(&post_nblist))
@@ -4507,6 +4617,17 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
+static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ return LPFC_MAX_SG_TABLESIZE;
+ else
+ return phba->cfg_scsi_seg_cnt;
+ else
+ return phba->cfg_sg_seg_cnt;
+}
+
/**
* lpfc_vmid_res_alloc - Allocates resources for VMID
* @phba: pointer to lpfc hba data structure.
@@ -4609,42 +4730,26 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
- template = &phba->port_template;
-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
- memcpy(template, &lpfc_template, sizeof(*template));
+ template = &lpfc_template;
if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
template->eh_host_reset_handler = NULL;
- /* Template for all vports this physical port creates */
- memcpy(&phba->vport_template, &lpfc_template,
- sizeof(*template));
- phba->vport_template.shost_groups = lpfc_vport_groups;
- phba->vport_template.eh_bus_reset_handler = NULL;
- phba->vport_template.eh_host_reset_handler = NULL;
- phba->vport_template.vendor_id = 0;
-
- /* Initialize the host templates with updated value */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- template->sg_tablesize = phba->cfg_scsi_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_scsi_seg_cnt;
- } else {
- template->sg_tablesize = phba->cfg_sg_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_sg_seg_cnt;
- }
-
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
/* NVMET is for physical port only */
- memcpy(template, &lpfc_template_nvme,
- sizeof(*template));
+ template = &lpfc_template_nvme;
}
} else {
- template = &phba->vport_template;
+ /* Seed vport template */
+ template = &lpfc_vport_template;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
@@ -4677,11 +4782,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-
- if (phba->cfg_xpsgl && !phba->nvmet_support)
- shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
- else
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -4712,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
rc = lpfc_vmid_res_alloc(phba, vport);
if (rc)
- goto out;
+ goto out_put_shost;
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
@@ -4730,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
- goto out_put_shost;
+ goto out_free_vmid;
spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->port_list_lock);
return vport;
-out_put_shost:
+out_free_vmid:
kfree(vport->vmid);
bitmap_free(vport->vmid_priority_range);
+out_put_shost:
scsi_host_put(shost);
out:
return NULL;
@@ -5295,7 +5396,6 @@ static void
lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -5312,18 +5412,13 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
"0395 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0396 The lpfc_dmabuf allocation failed\n");
+ "0396 mailbox allocation failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0397 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -5335,7 +5430,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -5374,7 +5469,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
}
/*
@@ -5409,10 +5504,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -5470,38 +5563,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
{
- struct rxtable_entry *entry;
- int cnt = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (!phba->rxtable || head == tail) {
- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "4411 Rxtable is empty\n");
- return;
- }
- last = tail;
- start = head;
-
- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
- while (start != last) {
- if (start)
- start--;
- else
- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
- entry = &phba->rxtable[start];
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
- "Lat %lld ASz %lld Info %02d BWUtil %d "
- "Int %d slot %d\n",
- cnt, entry->max_bytes_per_interval,
- entry->total_bytes, entry->rcv_bytes,
- entry->avg_io_latency, entry->avg_io_size,
- entry->cmf_info, entry->timer_utilization,
- entry->timer_interval, start);
- cnt++;
- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
- return;
+ "4411 Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
+ LPFC_MAX_RXMONITOR_DUMP);
}
}
@@ -5519,7 +5586,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
struct tm broken;
struct timespec64 cur_time;
u32 cnt;
- u16 value;
+ u32 value;
/* Make sure we have a congestion info buffer */
if (!phba->cgn_i)
@@ -5852,21 +5919,8 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
/* Use the frequency found in the last rcv'ed FPIN */
value = phba->cgn_fpin_frequency;
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- value = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
- phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ cp->cgn_alarm_freq = cpu_to_le16(value);
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED);
@@ -5921,11 +5975,10 @@ lpfc_cmf_timer(struct hrtimer *timer)
{
struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
cmf_timer);
- struct rxtable_entry *entry;
+ struct rx_info_entry entry;
uint32_t io_cnt;
- uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi, extra;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5996,20 +6049,28 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Calculate any extra bytes needed to account for the
* timer accuracy. If we are less than LPFC_CMF_INTERVAL
- * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
- * add an extra 2%. The goal is to equalize total with a
- * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ * calculate the adjustment needed for total to reflect
+ * a full LPFC_CMF_INTERVAL.
*/
- if (ms == LPFC_CMF_INTERVAL)
- extra = div_u64(total, 50);
- else if (ms < LPFC_CMF_INTERVAL)
- extra = div_u64(total, 33);
+ if (ms && ms < LPFC_CMF_INTERVAL) {
+ cnt = div_u64(total, ms); /* bytes per ms */
+ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
+
+ /* If the timeout is scheduled to be shorter,
+ * this value may skew the data, so cap it at mbpi.
+ */
+ if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
+ cnt = mbpi;
+
+ extra = cnt - total;
+ }
lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
*/
mbpi = phba->cmf_link_byte_count;
+ extra = 0;
}
phba->cmf_timer_cnt++;
@@ -6035,39 +6096,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
}
/* Save rxmonitor information for debug */
- if (phba->rxtable) {
- head = atomic_xchg(&phba->rxtable_idx_head,
- LPFC_RXMONITOR_TABLE_IN_USE);
- entry = &phba->rxtable[head];
- entry->total_bytes = total;
- entry->rcv_bytes = rcv;
- entry->cmf_busy = busy;
- entry->cmf_info = phba->cmf_active_info;
+ if (phba->rx_monitor) {
+ entry.total_bytes = total;
+ entry.cmf_bytes = total + extra;
+ entry.rcv_bytes = rcv;
+ entry.cmf_busy = busy;
+ entry.cmf_info = phba->cmf_active_info;
if (io_cnt) {
- entry->avg_io_latency = div_u64(lat, io_cnt);
- entry->avg_io_size = div_u64(rcv, io_cnt);
+ entry.avg_io_latency = div_u64(lat, io_cnt);
+ entry.avg_io_size = div_u64(rcv, io_cnt);
} else {
- entry->avg_io_latency = 0;
- entry->avg_io_size = 0;
+ entry.avg_io_latency = 0;
+ entry.avg_io_size = 0;
}
- entry->max_read_cnt = max_read;
- entry->io_cnt = io_cnt;
- entry->max_bytes_per_interval = mbpi;
+ entry.max_read_cnt = max_read;
+ entry.io_cnt = io_cnt;
+ entry.max_bytes_per_interval = mbpi;
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
- entry->timer_utilization = phba->cmf_last_ts;
+ entry.timer_utilization = phba->cmf_last_ts;
else
- entry->timer_utilization = ms;
- entry->timer_interval = ms;
+ entry.timer_utilization = ms;
+ entry.timer_interval = ms;
phba->cmf_last_ts = 0;
- /* Increment rxtable index */
- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (head == tail) {
- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- atomic_set(&phba->rxtable_idx_tail, tail);
- }
- atomic_set(&phba->rxtable_idx_head, head);
+ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
}
if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
@@ -6082,6 +6134,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Each minute save Fabric and Driver congestion information */
lpfc_cgn_save_evt_cnt(phba);
+ phba->hba_flag &= ~HBA_SHORT_CMF;
+
/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
* minute, adjust our next timer interval, if needed, to ensure a
* 1 minute granularity when we get the next timer interrupt.
@@ -6092,6 +6146,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
jiffies);
if (timer_interval <= 0)
timer_interval = LPFC_CMF_INTERVAL;
+ else
+ phba->hba_flag |= HBA_SHORT_CMF;
/* If we adjust timer_interval, max_bytes_per_interval
* needs to be adjusted as well.
@@ -6133,6 +6189,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
{
uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+ u8 cnt = 0;
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -6151,26 +6208,36 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
phba->trunk_link.link1.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
phba->trunk_link.link2.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
phba->trunk_link.link3.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ cnt++;
}
+ if (cnt)
+ phba->trunk_link.phy_lnk_speed =
+ phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
+ else
+ phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
+
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
@@ -6210,7 +6277,6 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
static void
lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -6249,7 +6315,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
LPFC_FC_LA_TYPE_LINK_DOWN)
phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
+ else if (!phba->sli4_hba.conf_trunk)
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
@@ -6270,18 +6336,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
"2897 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2898 The lpfc_dmabuf allocation failed\n");
+ "2898 The mboxq prep failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2899 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -6293,7 +6353,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6338,13 +6398,11 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -6375,7 +6433,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"2901 Async SLI event - Type:%d, Event Data: x%08x "
"x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- acqe_sli->reserved, acqe_sli->trailer);
+ acqe_sli->event_data3, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -6404,7 +6462,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
@@ -6536,12 +6594,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
/* Misconfigured WWN. Reports that the SLI Port is configured
* to use FA-WWN, but the attached device doesn’t support it.
- * No driver action is required.
* Event Data1 - N.A, Event Data2 - N.A
+ * This event only happens on the physical port.
*/
- lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
- "2699 Misconfigured FA-WWN - Attached device does "
- "not support FA-WWN\n");
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
+ "2699 Misconfigured FA-PWWN - Attached device "
+ "does not support FA-PWWN\n");
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
+ memset(phba->pport->fc_portname.u.wwn, 0,
+ sizeof(struct lpfc_name));
break;
case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
/* EEPROM failure. No driver action is required */
@@ -6566,9 +6627,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* Alarm overrides warning, so check that first */
if (cgn_signal->alarm_cnt) {
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of alarm cnt for cgn_info */
- atomic_add(cgn_signal->alarm_cnt,
- &phba->cgn_fabric_alarm_cnt);
/* Keep track of alarm cnt for CMF_SYNC_WQE */
atomic_add(cgn_signal->alarm_cnt,
&phba->cgn_sync_alarm_cnt);
@@ -6577,13 +6635,20 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* signal action needs to be taken */
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of warning cnt for cgn_info */
- atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
/* Keep track of warning cnt for CMF_SYNC_WQE */
atomic_add(cnt, &phba->cgn_sync_warn_cnt);
}
}
break;
+ case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
+ /* May be accompanied by a temperature event */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
+ "2902 Remote Degrade Signaling: x%08x x%08x "
+ "x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ acqe_sli->event_data3);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -6997,6 +7062,12 @@ lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
spin_unlock_irq(&phba->hbalock);
}
+static const char * const lpfc_cmf_mode_to_str[] = {
+ "OFF",
+ "MANAGED",
+ "MONITOR",
+};
+
/**
* lpfc_cgn_params_parse - Process a FW cong parm change event
* @phba: pointer to lpfc hba data structure.
@@ -7016,6 +7087,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
{
struct lpfc_cgn_info *cp;
uint32_t crc, oldmode;
+ char acr_string[4] = {0};
/* Make sure the FW has encoded the correct magic number to
* validate the congestion parameter in FW memory.
@@ -7092,9 +7164,6 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MONITOR:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4661 Switch from MANAGED to "
- "`MONITOR mode\n");
phba->cmf_max_bytes_per_interval =
phba->cmf_link_byte_count;
@@ -7113,14 +7182,26 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MANAGED:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4662 Switch from MONITOR to "
- "MANAGED mode\n");
lpfc_cmf_signal_init(phba);
break;
}
break;
}
+ if (oldmode != LPFC_CFG_OFF ||
+ oldmode != phba->cgn_p.cgn_param_mode) {
+ if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
+ scnprintf(acr_string, sizeof(acr_string), "%u",
+ phba->cgn_p.cgn_param_level0);
+ else
+ scnprintf(acr_string, sizeof(acr_string), "NA");
+
+ dev_info(&phba->pcidev->dev, "%d: "
+ "4663 CMF: Mode %s acr %s\n",
+ phba->brd_no,
+ lpfc_cmf_mode_to_str
+ [phba->cgn_p.cgn_param_mode],
+ acr_string);
+ }
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
"4669 FW cgn parm buf wrong magic 0x%x "
@@ -7602,7 +7683,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -7686,13 +7766,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -7868,6 +7941,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -7971,7 +8046,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -7998,6 +8073,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Right now the link is down, if FA-PWWN is configured the
+ * firmware will try FLOGI before the driver gets a link up.
+ * If it fails, the driver should get a MISCONFIGURED async
+ * event which will clear this flag. The only notification
+ * the driver gets is if it fails, if it succeeds there is no
+ * notification given. Assume success.
+ */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
+
rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
if (unlikely(rc))
goto out_free_bsmbx;
@@ -8221,8 +8308,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
+ if (!phba->lpfc_sg_dma_buf_pool) {
+ rc = -ENOMEM;
goto out_free_bsmbx;
+ }
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
@@ -8230,8 +8319,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp),
i, 0);
- if (!phba->lpfc_cmd_rsp_buf_pool)
+ if (!phba->lpfc_cmd_rsp_buf_pool) {
+ rc = -ENOMEM;
goto out_free_sg_dma_buf;
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -8387,6 +8478,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -8529,7 +8623,6 @@ static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
if (phba->wq) {
- flush_workqueue(phba->wq);
destroy_workqueue(phba->wq);
phba->wq = NULL;
}
@@ -8973,6 +9066,36 @@ lpfc_hba_free(struct lpfc_hba *phba)
}
/**
+ * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is will setup initial FDMI attribute masks for
+ * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
+ * to get these attributes first before falling back, the attribute
+ * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
+ **/
+void
+lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+
+ vport->load_flag |= FC_ALLOW_FDMI;
+ if (phba->cfg_enable_SmartSAN ||
+ phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
+ /* Setup appropriate attribute masks */
+ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+ if (phba->cfg_enable_SmartSAN)
+ vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+ else
+ vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
+}
+
+/**
* lpfc_create_shost - Create hba physical port with associated scsi host.
* @phba: pointer to lpfc hba data structure.
*
@@ -9015,21 +9138,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
/* Put reference to SCSI host to driver's device private data */
pci_set_drvdata(phba->pcidev, shost);
+ lpfc_setup_fdmi_mask(vport);
+
/*
* At this point we are fully registered with PSA. In addition,
* any initial discovery should be completed.
*/
- vport->load_flag |= FC_ALLOW_FDMI;
- if (phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
-
- /* Setup appropriate attribute masks */
- vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_enable_SmartSAN)
- vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
- else
- vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
- }
return 0;
}
@@ -9802,7 +9916,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
uint16_t forced_link_speed;
- uint32_t if_type, qmin;
+ uint32_t if_type, qmin, fawwpn;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -9844,10 +9958,24 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
}
+ fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
+
+ if (fawwpn) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_DISCOVERY,
+ "2702 READ_CONFIG: FA-PWWN is "
+ "configured on\n");
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
+ } else {
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
+ }
+
phba->sli4_hba.conf_trunk =
bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
/* Reduce resource usage in kdump environment */
@@ -12053,7 +12181,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
rc = pci_enable_msi(phba->pcidev);
if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0462 PCI enable MSI mode success.\n");
+ "0012 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc);
@@ -12285,7 +12413,7 @@ lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_irq_chann; i++) {
eqhdl = lpfc_get_eq_hdl(i);
- eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->irq = LPFC_IRQ_EMPTY;
eqhdl->phba = phba;
}
}
@@ -12658,7 +12786,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
__lpfc_cpuhp_remove(phba);
@@ -12709,7 +12837,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+ irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -12922,9 +13050,17 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_DRIVER_HANDLER_NAME"%d", index);
eqhdl->idx = index;
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = pci_irq_vector(phba->pcidev, index);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0489 MSI-X fast-path (%d) "
+ "pci_irq_vec failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ eqhdl->irq = rc;
+
+ rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -12932,8 +13068,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
goto cfg_fail_out;
}
- eqhdl->irq = pci_irq_vector(phba->pcidev, index);
-
if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
@@ -12998,7 +13132,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
@@ -13051,7 +13184,14 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ rc = pci_irq_vector(phba->pcidev, 0);
+ if (rc < 0) {
+ pci_free_irq_vectors(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0496 MSI pci_irq_vec failed (%d)\n", rc);
+ return rc;
+ }
+ eqhdl->irq = rc;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
@@ -13078,8 +13218,8 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
* MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - successful
- * other values - error
+ * Interrupt mode (2, 1, 0) - successful
+ * LPFC_INTR_ERROR - error
**/
static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
@@ -13124,7 +13264,14 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
intr_mode = 0;
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ retval = pci_irq_vector(phba->pcidev, 0);
+ if (retval < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0502 INTR pci_irq_vec failed (%d)\n",
+ retval);
+ return LPFC_INTR_ERROR;
+ }
+ eqhdl->irq = retval;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
@@ -13159,7 +13306,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
} else {
@@ -13351,8 +13497,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
if (phba->pport)
@@ -13371,15 +13518,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -13466,7 +13610,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
phba->cgn_evt_minute = 0;
phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
- memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+ memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
cp->cgn_info_version = LPFC_CGN_INFO_V3;
@@ -13525,7 +13669,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
return;
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
- memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+ memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
ktime_get_real_ts64(&cmpl_time);
time64_to_tm(cmpl_time.tv_sec, 0, &broken);
@@ -14259,6 +14403,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -14807,9 +14952,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
- /* Enable RAS FW log support */
- lpfc_sli4_ras_setup(phba);
-
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
@@ -15054,24 +15196,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2826 PCI channel disable preparing for reset\n");
+ int offline = pci_channel_offline(phba->pcidev);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -15120,6 +15266,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -15127,17 +15274,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- phba->hba_flag |= HBA_PCI_ERR;
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15171,17 +15325,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
- phba->hba_flag &= ~HBA_PCI_ERR;
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15195,6 +15353,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15236,8 +15396,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
@@ -15659,34 +15817,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
unsigned int temp_idx;
int i;
int j = 0;
- unsigned long rem_nsec, iflags;
- bool log_verbose = false;
- struct lpfc_vport *port_iterator;
-
- /* Don't dump messages if we explicitly set log_verbose for the
- * physical port or any vport.
- */
- if (phba->cfg_log_verbose)
- return;
-
- spin_lock_irqsave(&phba->port_list_lock, iflags);
- list_for_each_entry(port_iterator, &phba->port_list, listentry) {
- if (port_iterator->load_flag & FC_UNLOADING)
- continue;
- if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (port_iterator->cfg_log_verbose)
- log_verbose = true;
-
- scsi_host_put(lpfc_shost_from_vport(port_iterator));
-
- if (log_verbose) {
- spin_unlock_irqrestore(&phba->port_list_lock,
- iflags);
- return;
- }
- }
- }
- spin_unlock_irqrestore(&phba->port_list_lock, iflags);
+ unsigned long rem_nsec;
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 7d480c798794..b39cefcd8703 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,7 +35,7 @@
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
-#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
@@ -73,7 +73,7 @@ do { \
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
lpfc_dmp_dbg((vport)->phba); \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
@@ -89,11 +89,11 @@ do { \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
lpfc_dmp_dbg(phba); \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); \
- } else if (!(phba)->cfg_log_verbose)\
+ } else if (!log_verbose)\
lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
} \
} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c754ee96bee..9858b1743769 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,80 @@
#include "lpfc_compat.h"
/**
+ * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * one or more DMA buffers for the data transfer. This routine provides
+ * a standard framework for allocating the dma buffer and assigning to the
+ * @mbox. Callers should cleanup the mbox with a call to
+ * lpfc_mbox_rsrc_cleanup.
+ *
+ * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is
+ * responsible to ensure the hbalock is released. Also note that the
+ * driver design is a single dmabuf/mbuf per mbox in the ctx_buf.
+ *
+ **/
+int
+lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ kfree(mp);
+ return -ENOMEM;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+
+ /* Initialization only. Driver does not use a list of dmabufs. */
+ INIT_LIST_HEAD(&mp->list);
+ mbox->ctx_buf = mp;
+ return 0;
+}
+
+/**
+ * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ * @locked: value that indicates if the hbalock is held (1) or not (0).
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * possibly a DMA buffer for the data transfer. This routine provides
+ * a standard framework for releasing any dma buffers and freeing all
+ * memory resources in it as well as releasing the @mbox back to the @phba pool.
+ * Callers should use this routine for cleanup for all mailboxes prepped with
+ * lpfc_mbox_rsrc_prep.
+ *
+ **/
+void
+lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mbox->ctx_buf = NULL;
+
+ /* Release the generic BPL buffer memory. */
+ if (mp) {
+ if (locked == MBOX_THD_LOCKED)
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ else
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
* lpfc_dump_static_vport - Dump HBA's static vport information.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -61,6 +135,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
{
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
+ int rc;
mb = &pmb->u.mb;
@@ -79,22 +154,15 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
return 0;
}
- /* For SLI4 HBAs driver need to allocate memory */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2605 lpfc_dump_static_vport: memory"
- " allocation failed\n");
+ "2605 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
+
+ mp = pmb->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -429,7 +497,7 @@ lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
- * SLI-3, Message Signaled Interrupt Fearure.
+ * SLI-3, Message Signaled Interrupt Feature.
*/
/* Multi-message attention configuration */
@@ -606,26 +674,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
+ int rc;
- mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->mbxOwner = OWN_HOST;
-
/* Get a buffer to hold the HBAs Service Parameters */
-
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
- mb->mbxCommand = MBX_READ_SPARM64;
- /* READ_SPARAM: no buffers */
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0301 READ_SPARAM: no buffers\n");
- return (1);
+ return 1;
}
- INIT_LIST_HEAD(&mp->list);
+
+ mp = pmb->ctx_buf;
+ mb = &pmb->u.mb;
+ mb->mbxOwner = OWN_HOST;
mb->mbxCommand = MBX_READ_SPARM64;
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -633,9 +696,6 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
if (phba->sli_rev >= LPFC_SLI_REV3)
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
- /* save address for completion */
- pmb->ctx_buf = mp;
-
return (0);
}
@@ -756,6 +816,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
+ int rc;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -766,12 +827,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
+
/* Get a buffer to hold NPorts Service Parameters */
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
@@ -779,15 +838,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
"rpi x%x\n", vpi, did, rpi);
return 1;
}
- INIT_LIST_HEAD(&mp->list);
- sparam = mp->virt;
/* Copy param's into a new buffer */
+ mp = pmb->ctx_buf;
+ sparam = mp->virt;
memcpy(sparam, param, sizeof (struct serv_parm));
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
-
+ /* Finish initializing the mailbox. */
mb->mbxCommand = MBX_REG_LOGIN64;
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -1723,7 +1780,9 @@ lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
- * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ * This routine cleans up and releases an SLI4 mailbox command that was
+ * configured using lpfc_sli4_config. It accounts for the embedded and
+ * non-embedded config types.
**/
void
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
@@ -2277,33 +2336,24 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
+ int rc;
memset(mbox, 0, sizeof(*mbox));
mb = &mbox->u.mb;
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
- /* dump config region 23 failed to allocate memory */
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc dump config region 23: memory"
- " allocation failed\n");
+ "2569 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
- /* save address for completion */
- mbox->ctx_buf = (uint8_t *)mp;
-
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_23;
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+ mp = mbox->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
@@ -2326,7 +2376,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
rc = SUCCESS;
mbx_failed:
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, rc);
}
@@ -2338,30 +2388,25 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
- goto error_mbuf_free;
+ goto error_mbox_free;
lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
DMP_SFF_PAGE_A2_SIZE);
- /* We don't need dma buffer for link stat. */
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-
- memset(mbox, 0, sizeof(*mbox));
lpfc_read_lnk_stat(phba, mbox);
mbox->vport = rdp_context->ndlp->vport;
+
+ /* Save the dma buffer for cleanup in the final completion. */
+ mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
- goto error_cmd_free;
+ goto error_mbox_free;
return;
-error_mbuf_free:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-error_cmd_free:
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+error_mbox_free:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
@@ -2409,9 +2454,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
return;
error:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
@@ -2427,27 +2470,19 @@ error:
int
lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
+ int rc;
struct lpfc_dmabuf *mp = NULL;
memset(mbox, 0, sizeof(*mbox));
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"3569 dump type 3 page 0xA0 allocation failed\n");
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
- /* save address for completion */
- mbox->ctx_buf = mp;
-
bf_set(lpfc_mbx_memory_dump_type3_type,
&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
bf_set(lpfc_mbx_memory_dump_type3_link,
@@ -2456,6 +2491,8 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+
+ mp = mbox->ctx_buf;
mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 870e53b8f81d..89cbeba06aea 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
phba->cgn_i = NULL;
}
- /* Free RX table */
- kfree(phba->rxtable);
- phba->rxtable = NULL;
+ /* Free RX Monitor */
+ if (phba->rx_monitor) {
+ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ }
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27263f02ab9f..b86ff9fcdf0c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -171,12 +171,11 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
void *ptr = NULL;
- IOCB_t *irsp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
- /* For lpfc_els_abort, context2 could be zero'ed to delay
+ /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
* freeing associated memory till after ABTS completes.
*/
if (pcmd) {
@@ -187,10 +186,16 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
}
} else {
- /* Force ulpStatus error since we are returning NULL ptr */
- if (!(irsp->ulpStatus)) {
- irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
- irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ /* Force ulp_status error since we are returning NULL ptr */
+ if (!(ulp_status)) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
+ IOSTAT_LOCAL_REJECT);
+ rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ } else {
+ rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
}
ptr = NULL;
}
@@ -324,6 +329,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
+ union lpfc_wqe128 *wqe;
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
@@ -333,9 +339,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct ls_rjt stat;
uint32_t vid, flag;
int rc;
+ u32 remote_did;
memset(&stat, 0, sizeof (struct ls_rjt));
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
@@ -366,7 +373,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
- icmd = &cmdiocb->iocb;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ wqe = &cmdiocb->wqe;
+ else
+ icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -456,7 +467,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- vport->fc_myDID = icmd->un.rcvels.parmRo;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ vport->fc_myDID = bf_get(els_rsp64_sid,
+ &cmdiocb->wqe.xmit_els_rsp);
+ } else {
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+ }
/* If there is an outstanding FLOGI, abort it now.
* The remote NPort is not going to ACC our FLOGI
@@ -497,6 +513,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, link_mbox);
link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
link_mbox->vport = vport;
+
+ /* The default completion handling for CONFIG_LINK
+ * does not require the ndlp so no reference is needed.
+ */
link_mbox->ctx_ndlp = ndlp;
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
@@ -537,7 +557,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
* always be deferring the ACC.
*/
- rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
+ else
+ remote_did = icmd->un.rcvels.remoteID;
+ rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc)
goto out;
@@ -571,7 +595,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* a default RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
login_mbox = NULL;
} else {
/* In order to preserve RPIs, we want to cleanup
@@ -588,9 +613,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, login_mbox);
- if (rc)
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ ndlp, login_mbox);
+ if (rc && login_mbox)
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
return 1;
}
@@ -611,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!login_mbox->ctx_ndlp)
+ goto out;
+
login_mbox->context3 = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
@@ -619,8 +648,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Start the ball rolling by issuing REG_LOGIN here */
rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
goto out;
+ }
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
return 1;
@@ -674,17 +705,17 @@ static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
- IOCB_t *icmd;
uint32_t *lp;
uint32_t cmd;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
cmd = *lp++;
@@ -698,8 +729,8 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ppn = (struct lpfc_name *) & sp->portName;
}
- icmd = &cmdiocb->iocb;
- if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+ if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
+ lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
/*
* As soon as we send ACC, the remote NPort can
@@ -710,7 +741,6 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
-
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
@@ -804,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
@@ -893,7 +924,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
uint32_t *payload;
uint32_t cmd;
- payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ payload = cmdiocb->cmd_dmabuf->virt;
cmd = *payload;
if (vport->phba->nvmet_support) {
/* Must be a NVME PRLI */
@@ -930,9 +961,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct fc_rport *rport = ndlp->rport;
u32 roles;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
+ npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
@@ -1078,8 +1109,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
mempool_free(pmb, phba->mbox_mem_pool);
+ }
}
}
@@ -1193,7 +1226,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
@@ -1303,29 +1336,30 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *mp;
+ struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
uint32_t vid, flag;
- IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
int rc;
+ u32 ulp_status;
+ u32 did;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
/* Recovery from PLOGI collision logic */
return ndlp->nlp_state;
}
- irsp = &rspiocb->iocb;
-
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -1434,7 +1468,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
goto out;
}
- if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (lpfc_reg_rpi(phba, vport->vpi, did,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
@@ -1467,11 +1503,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
* command
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(mbox, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
@@ -1664,17 +1696,18 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
ADISC *ap;
int rc;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- irsp = &rspiocb->iocb;
- if ((irsp->ulpStatus) ||
+ if ((ulp_status) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
@@ -1821,7 +1854,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1841,16 +1873,11 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
phba->sli.mboxq_cnt--;
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -1955,8 +1982,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
@@ -2116,13 +2144,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *irsp;
PRLI *npr;
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
* format is different so NULL the two PRLI types so that the
@@ -2131,13 +2161,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+ if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
- else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+ else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if ((vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
goto out;
@@ -2736,16 +2765,18 @@ static uint32_t
lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status)
return NLP_STE_FREED_NODE;
- }
+
return ndlp->nlp_state;
}
@@ -2753,14 +2784,16 @@ static uint32_t
lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -2787,14 +2820,16 @@ static uint32_t
lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 9601edd838e1..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return -EINVAL;
remoteport = lpfc_rport->remoteport;
- if (!vport->localport)
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -EINVAL;
lport = vport->localport->private;
@@ -313,8 +319,10 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint32_t status;
- pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
- ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+ pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
+ ndlp = cmdwqe->ndlp;
+ buf_ptr = cmdwqe->bpl_dmabuf;
+
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -324,16 +332,16 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
- cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+ cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
+ ndlp);
lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter);
- if (cmdwqe->context3) {
- buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+ if (buf_ptr) {
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- cmdwqe->context3 = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
}
if (pnvme_lsreq->done)
pnvme_lsreq->done(pnvme_lsreq, status);
@@ -345,18 +353,19 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
cmdwqe->sli4_xritag, status);
if (ndlp) {
lpfc_nlp_put(ndlp);
- cmdwqe->context1 = NULL;
+ cmdwqe->ndlp = NULL;
}
lpfc_sli_release_iocbq(phba, cmdwqe);
}
static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
@@ -380,7 +389,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *),
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
@@ -400,19 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
- genwqe->context3 = (uint8_t *)bmp;
- genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+ genwqe->bpl_dmabuf = bmp;
+ genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
- genwqe->context1 = lpfc_nlp_get(ndlp);
- if (!genwqe->context1) {
+ genwqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!genwqe->ndlp) {
dev_warn(&phba->pcidev->dev,
"Warning: Failed node ref, not sending LS_REQ\n");
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
- genwqe->context2 = (uint8_t *)pnvme_lsreq;
+ genwqe->context_un.nvme_lsreq = pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
if (!tmo)
@@ -432,7 +441,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
first_len = xmit_len;
}
- genwqe->rsvd2 = num_entry;
+ genwqe->num_bdes = num_entry;
genwqe->hba_wqidx = 0;
/* Words 0 - 2 */
@@ -483,8 +492,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
- genwqe->wqe_cmpl = cmpl;
- genwqe->iocb_cmpl = NULL;
+ genwqe->cmd_cmpl = cmpl;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
genwqe->vport = vport;
genwqe->retry = retry;
@@ -534,7 +542,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_dmabuf *bmp;
struct ulp_bde64 *bpl;
@@ -559,6 +567,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
@@ -662,7 +672,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -721,8 +732,8 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
- if (wqe->context2 == pnvme_lsreq) {
- wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
+ wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true;
break;
}
@@ -906,7 +917,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
/*
- * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
+ * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -917,11 +928,11 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* TODO: What are the failure codes.
**/
static void
-lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_iocbq *pwqeOut)
{
- struct lpfc_io_buf *lpfc_ncmd =
- (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
@@ -1054,25 +1065,37 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
+ /* Get the NVME cmd details for this unique error. */
+ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
/* Check if this is really an ERSP */
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
lpfc_ncmd->status = IOSTAT_SUCCESS;
lpfc_ncmd->result = 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6084 NVME Completion ERSP: "
- "xri %x placed x%x\n",
- lpfc_ncmd->cur_iocbq.sli4_xritag,
- wcqe->total_data_placed);
+ "6084 NVME FCP_ERR ERSP: "
+ "xri %x placed x%x opcode x%x cmd_id "
+ "x%x cqe_status x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
- "placed x%x\n",
+ "placed x%x opcode x%x cmd_id x%x, "
+ "cqe_status x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
- wcqe->total_data_placed);
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
case IOSTAT_LOCAL_REJECT:
/* Let fall through to set command final state. */
@@ -1184,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
@@ -1241,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1268,6 +1298,19 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Words 13 14 15 are for PBDE support */
+ /* add the VMID tags as per switch response */
+ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+ }
+ }
+
pwqeq->vport = vport;
return 0;
}
@@ -1390,8 +1433,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
if ((nseg - 1) == i)
bf_set(lpfc_sli4_sge_last, sgl, 1);
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
+ physaddr = sg_dma_address(data_sg);
+ dma_len = sg_dma_len(data_sg);
sgl->addr_lo = cpu_to_le32(
putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(
@@ -1493,6 +1536,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u8 *uuid = NULL;
+ int err;
+ enum dma_data_direction iodir;
+#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1515,7 +1563,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1650,6 +1699,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+ if (uuid) {
+ if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+ iodir = DMA_TO_DEVICE;
+ else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+ iodir = DMA_FROM_DEVICE;
+ else
+ iodir = DMA_NONE;
+
+ err = lpfc_vmid_get_appid(vport, uuid, iodir,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_ncmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+ }
+ }
+#endif
+
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
@@ -1730,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
- * @abts_cmpl: Pointer to wcqe complete object.
+ * @rspiocb: Pointer to response iocb object.
*
* This is the callback function for any NVME FCP IO that was aborted.
*
@@ -1739,15 +1815,16 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
**/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
+ bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
+ get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
bf_get(lpfc_wcqe_c_status, abts_cmpl),
bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
@@ -1784,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
+ struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1866,7 +1944,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
/* Don't abort IOs no longer on the pending queue. */
- if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
@@ -1880,7 +1958,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
- if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
@@ -1907,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ /*
+ * Get Command Id from cmd to plug into response. This
+ * code is not needed in the next NVME Transport drop.
+ */
+ cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x\n",
- nvmereq_wqe->sli4_xritag);
+ "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
+ nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
+ cp->sqe.common.command_id);
return;
out_unlock:
@@ -1975,8 +2059,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- pwqeq->iocb_flag = LPFC_IO_NVME;
- pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+ pwqeq->cmd_flag = LPFC_IO_NVME;
+ pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
@@ -2169,8 +2253,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
- if (!vport || !vport->localport ||
- !qp || !qp->io_wq)
+ if (!vport->localport || !qp || !qp->io_wq)
return;
pring = qp->io_wq->pring;
@@ -2180,8 +2263,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
- if (!vport || !vport->localport ||
- vport->phba->hba_flag & HBA_PCI_ERR)
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2346,6 +2430,11 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
spin_lock_irq(&ndlp->lock);
+
+ /* If an oldrport exists, so does the ndlp reference. If not
+ * a new reference is needed because either the node has never
+ * been registered or it's been unregistered and getting deleted.
+ */
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
@@ -2456,12 +2545,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!nrport || !remoteport)
goto rescan_exit;
- /* Only rescan if we are an NVME target in the MAPPED state */
+ /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
nvme_fc_rescan_remoteport(remoteport);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6172 NVME rescanned DID x%06x "
"port_state x%x\n",
ndlp->nlp_DID, remoteport->port_state);
@@ -2541,8 +2630,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
- unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+ if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
@@ -2708,7 +2796,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete wcqe;
struct lpfc_wcqe_complete *wcqep = &wcqe;
- lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+ lpfc_ncmd = pwqeIn->io_buf;
if (!lpfc_ncmd) {
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
@@ -2736,12 +2824,14 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqep->word0 = 0;
bf_set(lpfc_wcqe_c_status, wcqep, stat);
wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
bf_set(lpfc_wcqe_c_xb, wcqep, 1);
- (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
+ memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cc54ffb5c205..733c277948c0 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -234,7 +234,7 @@ int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
@@ -248,6 +248,6 @@ int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
+ struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 731802527b81..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -285,7 +285,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba,
* transmission of an NVME LS response.
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. The function frees memory resources used for the command
@@ -293,9 +293,10 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba,
**/
void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
+ struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
uint32_t status, result;
@@ -316,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
status, result, axchg->oxid);
- lpfc_nlp_put(cmdwqe->context1);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ lpfc_nlp_put(cmdwqe->ndlp);
+ cmdwqe->context_un.axchg = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
ls_rsp->done(ls_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -331,7 +332,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME LS commands
@@ -340,10 +341,11 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
if (!phba->targetport)
goto finish;
@@ -365,7 +367,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
finish:
- __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
}
/**
@@ -707,7 +709,7 @@ out:
* lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME FCP commands
@@ -715,17 +717,18 @@ out:
**/
static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ uint32_t status, result, op, logerr;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
#endif
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
ctxp->flag &= ~LPFC_NVME_IO_INP;
rsp = &ctxp->hdlrctx.fcp_req;
@@ -817,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -862,7 +863,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_hba *phba = axchg->phba;
struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
@@ -898,9 +899,9 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
}
/* Save numBdes for bpl2sgl */
- nvmewqeq->rsvd2 = 1;
+ nvmewqeq->num_bdes = 1;
nvmewqeq->hba_wqidx = 0;
- nvmewqeq->context3 = &dmabuf;
+ nvmewqeq->bpl_dmabuf = &dmabuf;
dmabuf.virt = &bpl;
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
@@ -913,9 +914,8 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
* be referenced after it returns back to this routine.
*/
- nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = axchg;
+ nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
+ nvmewqeq->context_un.axchg = axchg;
lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
@@ -923,7 +923,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
/* clear to be sure there's no reference */
- nvmewqeq->context3 = NULL;
+ nvmewqeq->bpl_dmabuf = NULL;
if (rc == WQE_SUCCESS) {
/*
@@ -940,7 +940,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = -ENXIO;
- lpfc_nlp_put(nvmewqeq->context1);
+ lpfc_nlp_put(nvmewqeq->ndlp);
out_free_buf:
/* Give back resources */
@@ -1072,10 +1072,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = ctxp;
- nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
+ nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
+ nvmewqeq->context_un.axchg = ctxp;
+ nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
ctxp->wqeq->hba_wqidx = rsp->hwqid;
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
@@ -1118,8 +1117,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->oxid, rc);
ctxp->wqeq->hba_wqidx = 0;
- nvmewqeq->context2 = NULL;
- nvmewqeq->context3 = NULL;
+ nvmewqeq->context_un.axchg = NULL;
+ nvmewqeq->bpl_dmabuf = NULL;
rc = -EBUSY;
aerr:
return rc;
@@ -1275,7 +1274,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
* lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
* @phba: Pointer to HBA context object
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* This function is the completion handler for NVME LS requests.
* The function updates any states and statistics, then calls the
@@ -1283,8 +1282,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
**/
static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
__lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
}
@@ -1581,14 +1581,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
- ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
nvmewqe = ctx_buf->iocbq;
wqe = &nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
- ctx_buf->iocbq->context1 = NULL;
+ ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
@@ -2023,12 +2023,14 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
&wq->wqfull_list, list) {
if (ctxp) {
/* Checking for a specific IO to flush */
- if (nvmewqeq->context2 == ctxp) {
+ if (nvmewqeq->context_un.axchg == ctxp) {
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock,
iflags);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
+ sizeof(*wcqep));
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
- wcqep);
+ nvmewqeq);
return;
}
continue;
@@ -2036,7 +2038,8 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* Flush all IOs */
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
}
}
@@ -2066,7 +2069,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
+ ctxp = nvmewqeq->context_un.axchg;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
@@ -2612,10 +2615,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
ctxp->wqeq = nvmewqe;
/* prevent preparing wqe with NULL ndlp reference */
- nvmewqe->context1 = lpfc_nlp_get(ndlp);
- if (nvmewqe->context1 == NULL)
+ nvmewqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!nvmewqe->ndlp)
goto nvme_wqe_free_wqeq_exit;
- nvmewqe->context2 = ctxp;
+ nvmewqe->context_un.axchg = ctxp;
wqe = &nvmewqe->wqe;
memset(wqe, 0, sizeof(union lpfc_wqe));
@@ -2676,7 +2679,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+ nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -2687,8 +2690,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
return nvmewqe;
nvme_wqe_free_wqeq_exit:
- nvmewqe->context2 = NULL;
- nvmewqe->context3 = NULL;
+ nvmewqe->context_un.axchg = NULL;
+ nvmewqe->ndlp = NULL;
+ nvmewqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, nvmewqe);
return NULL;
}
@@ -2990,7 +2994,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->context1 = ndlp;
+ nvmewqe->ndlp = ndlp;
for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
@@ -3031,7 +3035,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
* lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -3039,15 +3043,16 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
**/
static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
unsigned long flags;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3078,8 +3083,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -3100,7 +3105,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -3108,15 +3113,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t result;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (!ctxp) {
@@ -3162,8 +3168,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -3181,7 +3187,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for LS cmds
@@ -3189,13 +3195,14 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (phba->nvmet_support) {
@@ -3226,8 +3233,8 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
kfree(ctxp);
}
@@ -3314,10 +3321,10 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
OTHER_COMMAND);
abts_wqeq->vport = phba->pport;
- abts_wqeq->context1 = ndlp;
- abts_wqeq->context2 = ctxp;
- abts_wqeq->context3 = NULL;
- abts_wqeq->rsvd2 = 0;
+ abts_wqeq->ndlp = ndlp;
+ abts_wqeq->context_un.axchg = ctxp;
+ abts_wqeq->bpl_dmabuf = NULL;
+ abts_wqeq->num_bdes = 0;
/* hba_wqidx should already be setup from command we are aborting */
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
abts_wqeq->iocb.ulpLe = 1;
@@ -3328,46 +3335,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
-/**
- * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-static void
-lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the I/O associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3377,7 +3344,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3417,7 +3384,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
abts_wqeq = ctxp->abort_wqeq;
ctxp->state = LPFC_NVME_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3446,7 +3413,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
/* Outstanding abort is in progress */
- if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3461,16 +3428,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
/* Ready - mark outstanding as aborted by driver. */
- abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME;
- abts_wqeq->context2 = ctxp;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME;
+ abts_wqeq->context_un.axchg = ctxp;
abts_wqeq->vport = phba->pport;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3526,9 +3494,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq = ctxp->wqeq;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3612,9 +3579,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
}
spin_lock_irqsave(&phba->hbalock, flags);
- abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
@@ -3625,8 +3591,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
out:
if (tgtp)
atomic_inc(&tgtp->xmt_abort_rsp_error);
- abts_wqeq->context2 = NULL;
- abts_wqeq->context3 = NULL;
+ abts_wqeq->rsp_dmabuf = NULL;
+ abts_wqeq->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6ccf573acdec..7a1563564df7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -87,14 +87,6 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp);
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag);
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid);
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
@@ -120,62 +112,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
#define LPFC_INVALID_REFTAG ((u32)-1)
/**
- * lpfc_update_stats - Update statistical data for the command completion
- * @vport: The virtual port on which this call is executing.
- * @lpfc_cmd: lpfc scsi command object pointer.
- *
- * This function is called when there is a command completion and this
- * function updates the statistical data for the command completion.
- **/
-static void
-lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- unsigned long flags;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long latency;
- int i;
-
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- (cmd->result))
- return;
-
- latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
- rdata = lpfc_cmd->rdata;
- pnode = rdata->pnode;
-
- spin_lock_irqsave(shost->host_lock, flags);
- if (!pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return;
- }
-
- if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
- i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
- phba->bucket_step;
- /* check array subscript bounds */
- if (i < 0)
- i = 0;
- else if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT - 1;
- } else {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
- if (latency <= (phba->bucket_base +
- ((1<<i)*phba->bucket_step)))
- break;
- }
-
- pnode->lat_data[i].cmd_count++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
-/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -362,7 +298,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
kfree(psb);
break;
}
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
@@ -433,7 +369,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->ulpClass = CLASS3;
psb->status = IOSTAT_SUCCESS;
/* Put it back into the SCSI buffer list */
- psb->cur_iocbq.context1 = psb;
+ psb->cur_iocbq.io_buf = psb;
spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
@@ -468,7 +404,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
@@ -524,7 +460,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -571,7 +507,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
* for command completion wake up the thread.
*/
spin_lock_irqsave(&psb->buf_lock, iflag);
- psb->cur_iocbq.iocb_flag &=
+ psb->cur_iocbq.cmd_flag &=
~LPFC_DRIVER_ABORTED;
if (psb->waitq)
wake_up(psb->waitq);
@@ -593,8 +529,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ (iocbq->cmd_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
@@ -695,7 +631,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
lpfc_cmd->prot_seg_cnt = 0;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->timeout = 0;
@@ -783,7 +719,7 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
psb->pCmd = NULL;
- psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
}
@@ -931,7 +867,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+ !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -959,7 +895,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+ !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
@@ -2942,154 +2878,58 @@ out:
* -1 - Internal error (bad profile, ...etc)
*/
static int
-lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
{
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf;
int ret = 0;
- u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
+ struct lpfc_wcqe_complete *wcqe;
+ u32 status;
u32 bghm = 0;
u32 bgstat = 0;
u64 failing_sector = 0;
- if (status == CQE_STATUS_DI_ERROR) {
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- bgstat |= BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
- bgstat |= BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
- bgstat |= BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
- bghm = wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!bgstat)
- bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- if (lpfc_bgs_get_guard_err(bgstat)) {
- ret = 1;
-
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
- set_host_byte(cmd, DID_ABORT);
- phba->bg_guard_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9059 BLKGRD: Guard Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_reftag_err(bgstat)) {
- ret = 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wcqe = &pIocbOut->wcqe_cmpl;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
- set_host_byte(cmd, DID_ABORT);
+ if (status == CQE_STATUS_DI_ERROR) {
+ /* Guard Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
+ bgstat |= BGS_GUARD_ERR_MASK;
- phba->bg_reftag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9060 BLKGRD: Ref Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_apptag_err(bgstat)) {
- ret = 1;
+ /* AppTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
+ bgstat |= BGS_APPTAG_ERR_MASK;
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
- set_host_byte(cmd, DID_ABORT);
+ /* RefTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
+ bgstat |= BGS_REFTAG_ERR_MASK;
- phba->bg_apptag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9062 BLKGRD: App Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
- /*
- * setup sense data descriptor 0 per SPC-4 as an information
- * field, and put the failing LBA in it.
- * This code assumes there was also a guard/app/ref tag error
- * indication.
- */
- cmd->sense_buffer[7] = 0xc; /* Additional sense length */
- cmd->sense_buffer[8] = 0; /* Information descriptor type */
- cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
- cmd->sense_buffer[10] = 0x80; /* Validity bit */
+ /* Check to see if there was any good data before the
+ * error
+ */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
- /* bghm is a "on the wire" FC frame based count */
- switch (scsi_get_prot_op(cmd)) {
- case SCSI_PROT_READ_INSERT:
- case SCSI_PROT_WRITE_STRIP:
- bghm /= cmd->device->sector_size;
- break;
- case SCSI_PROT_READ_STRIP:
- case SCSI_PROT_WRITE_INSERT:
- case SCSI_PROT_READ_PASS:
- case SCSI_PROT_WRITE_PASS:
- bghm /= (cmd->device->sector_size +
- sizeof(struct scsi_dif_tuple));
- break;
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK |
+ BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
}
- failing_sector = scsi_get_lba(cmd);
- failing_sector += bghm;
-
- /* Descriptor Information */
- put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
- }
-
- if (!ret) {
- /* No error was reported - problem in FW? */
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9068 BLKGRD: Unknown error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
-
- /* Calculate what type of error it was */
- lpfc_calc_bg_err(phba, lpfc_cmd);
+ } else {
+ bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ bghm = bgf->bghm;
+ bgstat = bgf->bgstat;
}
- return ret;
-}
-
-/*
- * This function checks for BlockGuard errors detected by
- * the HBA. In case of errors, the ASC/ASCQ fields in the
- * sense buffer will be set accordingly, paired with
- * ILLEGAL_REQUEST to signal to the kernel that the HBA
- * detected corruption.
- *
- * Returns:
- * 0 - No error found
- * 1 - BlockGuard error found
- * -1 - Internal error (bad profile, ...etc)
- */
-static int
-lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *pIocbOut)
-{
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
- int ret = 0;
- uint32_t bghm = bgf->bghm;
- uint32_t bgstat = bgf->bgstat;
- uint64_t failing_sector = 0;
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
@@ -3117,7 +2957,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
@@ -3131,10 +2970,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9056 BLKGRD: Ref Tag error in cmd "
@@ -3146,10 +2983,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9061 BLKGRD: App Tag error in cmd "
@@ -3434,7 +3269,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
@@ -3591,15 +3426,15 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
switch (scsi_get_prot_op(scsi_cmnd)) {
case SCSI_PROT_WRITE_STRIP:
case SCSI_PROT_READ_STRIP:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
break;
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_INSERT:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
break;
case SCSI_PROT_WRITE_PASS:
case SCSI_PROT_READ_PASS:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
break;
}
@@ -3630,7 +3465,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
/* Word 10 */
bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
@@ -3640,14 +3475,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
/* Word 7. DIF Flags */
- if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
+ if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
return 0;
@@ -3936,7 +3771,7 @@ lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
else
time = div_u64(time + 500, 1000); /* round it */
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->rcv_bytes);
atomic64_add(time, &cgs->rx_latency);
atomic_inc(&cgs->rx_io_cnt);
@@ -3980,7 +3815,7 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
atomic_set(&phba->rx_max_read_cnt, size);
}
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->total_bytes);
return 0;
}
@@ -4173,7 +4008,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
* @phba: The hba for which this call is being executed.
* @pwqeIn: The command WQE for the scsi cmnd.
- * @wcqe: Pointer to driver response CQE object.
+ * @pwqeOut: Pointer to driver response WQE object.
*
* This routine assigns scsi command result by looking into response WQE
* status field appropriately. This routine handles QUEUE FULL condition as
@@ -4181,10 +4016,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
**/
static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *pwqeOut)
{
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
@@ -4194,7 +4029,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct Scsi_Host *shost;
u32 logit = LOG_FCP;
u32 status, idx;
- unsigned long iflags = 0;
u32 lat;
u8 wait_xb_clr = 0;
@@ -4209,30 +4043,16 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
rdata = lpfc_cmd->rdata;
ndlp = rdata->pnode;
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- /* TOREMOVE - currently this flag is checked during
- * the release of lpfc_iocbq. Remove once we move
- * to lpfc_wqe_job construct.
- *
- * This needs to be done outside buf_lock
- */
- spin_lock_irqsave(&phba->hbalock, iflags);
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
- /* Guard against abort handler being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"9042 I/O completion: Not an active IO\n");
- spin_unlock(&lpfc_cmd->buf_lock);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
idx = lpfc_cmd->cur_iocbq.hba_wqidx;
if (phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
@@ -4391,10 +4211,12 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
break;
}
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_LINK_DOWN ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4406,12 +4228,14 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* This is a response for a BG enabled
* cmd. Parse BG error
*/
- lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
- wcqe);
+ lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9040 non-zero BGSTAT "
+ "on unprotected cmd\n");
}
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9040 non-zero BGSTAT on unprotected cmd\n");
}
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9036 Local Reject FCP cmd x%x failed"
@@ -4448,14 +4272,13 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"9039 Iodone <%d/%llu> cmd x%px, error "
- "x%x SNS x%x x%x Data: x%x x%x\n",
+ "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
- cmd->result, *lp, *(lp + 3), cmd->retries,
- scsi_get_resid(cmd));
+ cmd->result, *lp, *(lp + 3),
+ (u64)scsi_get_lba(cmd),
+ cmd->retries, scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
-
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4508,7 +4331,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4531,7 +4354,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
{
struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) pIocbIn->context1;
+ (struct lpfc_io_buf *) pIocbIn->io_buf;
struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
@@ -4568,7 +4391,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exchange busy status from HBA */
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
- if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -4681,7 +4504,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4736,7 +4559,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4777,7 +4599,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4854,9 +4676,9 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
- piocbq->context1 = lpfc_cmd;
- if (!piocbq->iocb_cmpl)
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->io_buf = lpfc_cmd;
+ if (!piocbq->cmd_cmpl)
+ piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
return 0;
@@ -4966,10 +4788,9 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
pwqeq->vport = vport;
- pwqeq->vport = vport;
- pwqeq->context1 = lpfc_cmd;
+ pwqeq->io_buf = lpfc_cmd;
pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
- pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+ pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
return 0;
}
@@ -5016,7 +4837,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @lun: Logical unit number.
@@ -5030,10 +4851,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd,
- uint64_t lun,
- uint8_t task_mgmt_cmd)
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
{
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
@@ -5054,15 +4874,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
- if (vport->phba->sli_rev == 3 &&
- !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- piocb->ulpContext =
- vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- }
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
piocb->ulpPU = 0;
@@ -5078,8 +4893,79 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
} else
piocb->ulpTimeout = lpfc_cmd->timeout;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+ return 1;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
+{
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ struct fcp_cmnd *fcp_cmnd;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ return 0;
+
+ pwqeq->vport = vport;
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+ fcp_cmnd->fcpCntl3 = 0;
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+ ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
+ (ndlp->nlp_fcp_info & 0x0f));
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
+ } else {
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
+ }
+
+ lpfc_prep_embed_io(vport->phba, lpfc_cmd);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
return 1;
}
@@ -5106,6 +4992,8 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
@@ -5113,6 +5001,8 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -5139,8 +5029,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) cmdiocbq->context1;
+ struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
@@ -5315,253 +5204,6 @@ void lpfc_poll_timeout(struct timer_list *t)
}
/*
- * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash: calculated hash value
- * @buf: uuid associated with the VE
- * Return the VMID entry associated with the UUID
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
- u32 hash, u8 *buf)
-{
- struct lpfc_vmid *vmp;
-
- hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
- if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
- return vmp;
- }
- return NULL;
-}
-
-/*
- * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash - calculated hash value
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- *
- * This routine will insert the newly acquired VMID entity in the hash table.
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp)
-{
- hash_add(vport->hash_table, &vmp->hnode, hash);
-}
-
-/*
- * lpfc_vmid_hash_fn - create a hash value of the UUID
- * @vmid: uuid associated with the VE
- * @len: length of the VMID string
- * Returns the calculated hash value
- */
-int lpfc_vmid_hash_fn(const char *vmid, int len)
-{
- int c;
- int hash = 0;
-
- if (len == 0)
- return 0;
- while (len--) {
- c = *vmid++;
- if (c >= 'A' && c <= 'Z')
- c += 'a' - 'A';
-
- hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
- (c >> LPFC_VMID_HASH_SHIFT)) * 19;
- }
-
- return hash & LPFC_VMID_HASH_MASK;
-}
-
-/*
- * lpfc_vmid_update_entry - update the vmid entry in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @cmd: address of scsi cmd descriptor
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- * @tag: VMID tag
- */
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag)
-{
- u64 *lta;
-
- if (vport->vmid_priority_tagging)
- tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
- else
- tag->app_id = vmp->un.app_id;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- vmp->io_wr_cnt++;
- else
- vmp->io_rd_cnt++;
-
- /* update the last access timestamp in the table */
- lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
- *lta = jiffies;
-}
-
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid)
-{
- u32 hash;
- struct lpfc_vmid *pvmid;
-
- if (vport->port_type == LPFC_PHYSICAL_PORT) {
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- } else {
- hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
- pvmid =
- lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
- vmid->host_vmid);
- if (pvmid)
- vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
- else
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- }
-}
-
-/*
- * lpfc_vmid_get_appid - get the VMID associated with the UUID
- * @vport: The virtual port for which this call is being executed.
- * @uuid: UUID associated with the VE
- * @cmd: address of scsi_cmd descriptor
- * @tag: VMID tag
- * Returns status of the function
- */
-static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
- scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
-{
- struct lpfc_vmid *vmp = NULL;
- int hash, len, rc, i;
-
- /* check if QFPA is complete */
- if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
- LPFC_VMID_QFPA_CMPL)) {
- vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
- return -EAGAIN;
- }
-
- /* search if the UUID has already been mapped to the VMID */
- len = strlen(uuid);
- hash = lpfc_vmid_hash_fn(uuid, len);
-
- /* search for the VMID in the table */
- read_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* if found, check if its already registered */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- read_unlock(&vport->vmid_lock);
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- rc = 0;
- } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
- vmp->flag & LPFC_VMID_DE_REGISTER)) {
- /* else if register or dereg request has already been sent */
- /* Hence VMID tag will not be added for this I/O */
- read_unlock(&vport->vmid_lock);
- rc = -EBUSY;
- } else {
- /* The VMID was not found in the hashtable. At this point, */
- /* drop the read lock first before proceeding further */
- read_unlock(&vport->vmid_lock);
- /* start the process to obtain one as per the */
- /* type of the VMID indicated */
- write_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* while the read lock was released, in case the entry was */
- /* added by other context or is in process of being added */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- write_unlock(&vport->vmid_lock);
- return 0;
- } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
- write_unlock(&vport->vmid_lock);
- return -EBUSY;
- }
-
- /* else search and allocate a free slot in the hash table */
- if (vport->cur_vmid_cnt < vport->max_vmid) {
- for (i = 0; i < vport->max_vmid; i++) {
- vmp = vport->vmid + i;
- if (vmp->flag == LPFC_VMID_SLOT_FREE)
- break;
- }
- if (i == vport->max_vmid)
- vmp = NULL;
- } else {
- vmp = NULL;
- }
-
- if (!vmp) {
- write_unlock(&vport->vmid_lock);
- return -ENOMEM;
- }
-
- /* Add the vmid and register */
- lpfc_put_vmid_in_hashtable(vport, hash, vmp);
- vmp->vmid_len = len;
- memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
- vmp->io_rd_cnt = 0;
- vmp->io_wr_cnt = 0;
- vmp->flag = LPFC_VMID_SLOT_USED;
-
- vmp->delete_inactive =
- vport->vmid_inactivity_timeout ? 1 : 0;
-
- /* if type priority tag, get next available VMID */
- if (lpfc_vmid_is_type_priority_tag(vport))
- lpfc_vmid_assign_cs_ctl(vport, vmp);
-
- /* allocate the per cpu variable for holding */
- /* the last access time stamp only if VMID is enabled */
- if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
- if (!vmp->last_io_time) {
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- write_unlock(&vport->vmid_lock);
-
- /* complete transaction with switch */
- if (lpfc_vmid_is_type_priority_tag(vport))
- rc = lpfc_vmid_uvem(vport, vmp, true);
- else
- rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
- if (!rc) {
- write_lock(&vport->vmid_lock);
- vport->cur_vmid_cnt++;
- vmp->flag |= LPFC_VMID_REQ_REGISTER;
- write_unlock(&vport->vmid_lock);
- } else {
- write_lock(&vport->vmid_lock);
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- free_percpu(vmp->last_io_time);
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- /* finally, enable the idle timer once */
- if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
- mod_timer(&vport->phba->inactive_vmid_poll,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
- vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
- }
- }
- return rc;
-}
-
-/*
* lpfc_is_command_vm_io - get the UUID from blk cgroup
* @cmd: Pointer to scsi_cmnd data structure
* Returns UUID if present, otherwise NULL
@@ -5570,7 +5212,9 @@ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
{
struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
- return bio ? blkcg_get_fc_appid(bio) : NULL;
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
+ return NULL;
+ return blkcg_get_fc_appid(bio);
}
/**
@@ -5591,6 +5235,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cur_iocbq = NULL;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_io_buf *lpfc_cmd;
@@ -5684,6 +5329,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
lpfc_cmd->rx_cmd_start = start;
+ cur_iocbq = &lpfc_cmd->cur_iocbq;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -5691,7 +5337,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ cur_iocbq->cmd_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
@@ -5733,7 +5379,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
goto out_host_busy_free_buf;
}
-
/* check the necessary and sufficient condition to support VMID */
if (lpfc_is_vmid_enabled(phba) &&
(ndlp->vmid_support ||
@@ -5744,22 +5389,21 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
uuid = lpfc_is_command_vm_io(cmnd);
if (uuid) {
- err = lpfc_vmid_get_appid(vport, uuid, cmnd,
- (union lpfc_vmid_io_tag *)
- &lpfc_cmd->cur_iocbq.vmid_tag);
+ err = lpfc_vmid_get_appid(vport, uuid,
+ cmnd->sc_data_direction,
+ (union lpfc_vmid_io_tag *)
+ &cur_iocbq->vmid_tag);
if (!err)
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
- atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
/* Issue I/O to adapter */
- err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq,
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
@@ -5772,25 +5416,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x "
- "FCP cmd x%x <%d/%llu> "
- "sid: x%x did: x%x oxid: x%x "
- "Data: x%x x%x x%x x%x\n",
- err, cmnd->cmnd[0],
- cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64)-1,
- vport->fc_myDID, ndlp->nlp_DID,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- phba->sli_rev == LPFC_SLI_REV4 ?
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iotag,
- phba->sli_rev == LPFC_SLI_REV4 ?
- bf_get(wqe_tmo,
- &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
+ "3376 FCP could not issue iocb err %x "
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ cur_iocbq->sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ cur_iocbq->iocb.ulpContext,
+ cur_iocbq->iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &cur_iocbq->wqe.generic.wqe_com) :
+ cur_iocbq->iocb.ulpTimeout,
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
@@ -5886,6 +5530,7 @@ static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
@@ -5897,7 +5542,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
@@ -5905,25 +5550,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (!lpfc_cmd)
return ret;
- spin_lock_irqsave(&phba->hbalock, flags);
+ /* Guard against IO completion being called at same time */
+ spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
+
+ spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
ret = FAILED;
- goto out_unlock;
+ goto out_unlock_hba;
}
- /* Guard against IO completion being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
if (!lpfc_cmd->pCmd) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
iocb = &lpfc_cmd->cur_iocbq;
@@ -5931,12 +5576,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
- if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
"cancelled by LLD.\n");
@@ -5956,16 +5601,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock_ring;
}
- BUG_ON(iocb->context1 != lpfc_cmd);
+ WARN_ON(iocb->io_buf != lpfc_cmd);
/* abort issued in recovery is still in progress */
- if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
goto wait_for_cmpl;
}
@@ -5973,7 +5618,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_unlock(&pring_s4->ring_lock);
ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
- lpfc_sli4_abort_fcp_cmpl);
+ lpfc_sli_abort_fcp_cmpl);
} else {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
@@ -5986,15 +5631,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
lpfc_cmd->waitq = NULL;
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
ret = FAILED;
- goto out;
+ goto out_unlock_hba;
}
/* no longer need the lock after this point */
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
@@ -6002,7 +5645,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
wait_for_cmpl:
/*
- * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
wait_event_timeout(waitq,
@@ -6029,10 +5672,9 @@ wait_for_cmpl:
out_unlock_ring:
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
-out_unlock_buf:
- spin_unlock(&lpfc_cmd->buf_lock);
-out_unlock:
- spin_unlock_irqrestore(&phba->hbalock, flags);
+out_unlock_hba:
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
@@ -6139,7 +5781,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to remote port
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
@@ -6152,7 +5794,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
unsigned int tgt_id, uint64_t lun_id,
uint8_t task_mgmt_cmd)
{
@@ -6165,21 +5807,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
int ret;
int status;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
- lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->pCmd = NULL;
lpfc_cmd->ndlp = pnode;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
- task_mgmt_cmd);
+ status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
@@ -6191,38 +5833,41 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %llu "
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
- iocbq->iocb_flag);
+ iocbq->cmd_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if ((status != IOCB_SUCCESS) ||
- (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+ (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
if (status != IOCB_SUCCESS ||
- iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
+ get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0727 TMF %s to TGT %d LUN %llu "
- "failed (%d, %d) iocb_flag x%x\n",
+ "failed (%d, %d) cmd_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id,
- iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4],
- iocbq->iocb_flag);
+ get_job_ulpstatus(phba, iocbqrsp),
+ get_job_word4(phba, iocbqrsp),
+ iocbq->cmd_flag);
/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
if (status == IOCB_SUCCESS) {
- if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ if (get_job_ulpstatus(phba, iocbqrsp) ==
+ IOSTAT_FCP_RSP_ERROR)
/* Something in the FCP_RSP was invalid.
* Check conditions */
ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
else
ret = FAILED;
- } else if (status == IOCB_TIMEDOUT) {
+ } else if ((status == IOCB_TIMEDOUT) ||
+ (status == IOCB_ABORTED)) {
ret = TIMEOUT_ERROR;
} else {
ret = FAILED;
@@ -6232,7 +5877,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_sli_release_iocbq(phba, iocbqrsp);
- if (ret != TIMEOUT_ERROR)
+ if (status != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return ret;
@@ -6241,7 +5886,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
/**
* lpfc_chk_tgt_mapped -
* @vport: The virtual port to check on
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to fc_rport data structure.
*
* This routine delays until the scsi target (aka rport) for the
* command exists (is present and logged in) or we declare it non-existent.
@@ -6251,19 +5896,20 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
* 0x2002 - Success
**/
static int
-lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
+ struct lpfc_nodelist *pnode = NULL;
unsigned long later;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
+
/*
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
@@ -6275,7 +5921,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata)
return FAILED;
pnode = rdata->pnode;
@@ -6346,6 +5992,7 @@ static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -6355,7 +6002,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
u32 logit = LOG_FCP;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0798 Device Reset rdata failure: rdata x%px\n",
@@ -6363,11 +6013,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0721 Device Reset rport failure: rdata x%px\n", rdata);
@@ -6383,7 +6033,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_LUN_RESET);
if (status != SUCCESS)
logit = LOG_TRACE_EVENT;
@@ -6420,6 +6070,7 @@ static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -6432,7 +6083,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0799 Target Reset rdata failure: rdata x%px\n",
@@ -6440,11 +6094,11 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
@@ -6468,7 +6122,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_TARGET_RESET);
if (status != SUCCESS) {
logit = LOG_TRACE_EVENT;
@@ -6527,95 +6181,6 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
}
/**
- * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
- * @cmnd: Pointer to scsi_cmnd data structure.
- *
- * This routine does target reset to all targets on @cmnd->device->host.
- * This emulates Parallel SCSI Bus Reset Semantics.
- *
- * Return code :
- * 0x2003 - Error
- * 0x2002 - Success
- **/
-static int
-lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
-{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_scsi_event_header scsi_event;
- int match;
- int ret = SUCCESS, status, i;
- u32 logit = LOG_FCP;
-
- scsi_event.event_type = FC_REG_SCSI_EVENT;
- scsi_event.subcategory = LPFC_EVENT_BUSRESET;
- scsi_event.lun = 0;
- memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
- memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
-
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
-
- status = fc_block_scsi_eh(cmnd);
- if (status != 0 && status != SUCCESS)
- return status;
-
- /*
- * Since the driver manages a single bus device, reset all
- * targets known to the driver. Should any target reset
- * fail, this routine returns failure to the midlayer.
- */
- for (i = 0; i < LPFC_MAX_TARGET; i++) {
- /* Search for mapped node by target ID */
- match = 0;
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
-
- if (vport->phba->cfg_fcp2_no_tgt_reset &&
- (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- continue;
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
- ndlp->nlp_sid == i &&
- ndlp->rport &&
- ndlp->nlp_type & NLP_FCP_TARGET) {
- match = 1;
- break;
- }
- }
- spin_unlock_irq(shost->host_lock);
- if (!match)
- continue;
-
- status = lpfc_send_taskmgmt(vport, cmnd,
- i, 0, FCP_TARGET_RESET);
-
- if (status != SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0700 Bus Reset on target %d failed\n",
- i);
- ret = FAILED;
- }
- }
- /*
- * We have to clean up i/o as : they may be orphaned by the TMFs
- * above; or if any of the TMFs failed, they may be in an
- * indeterminate state.
- * We will report success if all the i/o aborts successfully.
- */
-
- status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
- if (status != SUCCESS)
- ret = FAILED;
- if (ret == FAILED)
- logit = LOG_TRACE_EVENT;
-
- lpfc_printf_vlog(vport, KERN_ERR, logit,
- "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
- return ret;
-}
-
-/**
* lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
* @cmnd: Pointer to scsi_cmnd data structure.
*
@@ -7181,12 +6746,6 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_handler(struct scsi_cmnd *cmnd)
-{
- return FAILED;
-}
-
-static int
lpfc_no_slave(struct scsi_device *sdev)
{
return -ENODEV;
@@ -7198,11 +6757,6 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .eh_abort_handler = lpfc_no_handler,
- .eh_device_reset_handler = lpfc_no_handler,
- .eh_target_reset_handler = lpfc_no_handler,
- .eh_bus_reset_handler = lpfc_no_handler,
- .eh_host_reset_handler = lpfc_no_handler,
.slave_alloc = lpfc_no_slave,
.slave_configure = lpfc_no_slave,
.scan_finished = lpfc_scan_finished,
@@ -7226,7 +6780,6 @@ struct scsi_host_template lpfc_template = {
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
@@ -7241,3 +6794,30 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
+
+struct scsi_host_template lpfc_vport_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = NULL,
+ .eh_host_reset_handler = NULL,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .shost_groups = lpfc_vport_groups,
+ .max_sectors = 0xFFFFFFFF,
+ .vendor_id = 0,
+ .change_queue_depth = scsi_change_queue_depth,
+ .track_queue_depth = 1,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 3836d7f6a575..eae56944f31b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
-struct lpfc_scsicmd_bkt {
- uint32_t cmd_count;
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 5dedb3de271d..99d06dc7ddf6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -70,8 +70,9 @@ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint8_t *, uint32_t *);
-static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
- struct lpfc_iocbq *);
+static struct lpfc_iocbq *
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *rspiocbq);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
@@ -89,17 +90,14 @@ static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pwqeq,
+ struct lpfc_sglq *sglq);
union lpfc_wqe128 lpfc_iread_cmd_template;
union lpfc_wqe128 lpfc_iwrite_cmd_template;
union lpfc_wqe128 lpfc_icmnd_cmd_template;
-static IOCB_t *
-lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
-{
- return &iocbq->iocb;
-}
-
/* Setup WQE templates for IOs */
void lpfc_wqe_cmd_template(void)
{
@@ -1251,29 +1249,24 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
- struct lpfc_sli_ring *pring = NULL;
int found = 0;
+ u8 cmnd;
- if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
- pring = phba->sli4_hba.nvmels_wq->pring;
- else
- pring = lpfc_phba_elsring(phba);
+ cmnd = get_job_cmnd(phba, piocbq);
- lockdep_assert_held(&pring->ring_lock);
-
- if (piocbq->iocb_flag & LPFC_IO_FCP) {
- lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
+ if (piocbq->cmd_flag & LPFC_IO_FCP) {
+ lpfc_cmd = piocbq->io_buf;
ndlp = lpfc_cmd->rdata->pnode;
- } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
- ndlp = piocbq->context_un.ndlp;
- } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
- if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
+ !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+ ndlp = piocbq->ndlp;
+ } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
ndlp = NULL;
else
- ndlp = piocbq->context_un.ndlp;
+ ndlp = piocbq->ndlp;
} else {
- ndlp = piocbq->context1;
+ ndlp = piocbq->ndlp;
}
spin_lock(&phba->sli4_hba.sgl_list_lock);
@@ -1380,7 +1373,7 @@ static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_sglq *sglq;
- size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
@@ -1391,7 +1384,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (sglq) {
- if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ if (iocbq->cmd_flag & LPFC_IO_NVMET) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
sglq->state = SGL_FREED;
@@ -1403,7 +1396,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
goto out;
}
- if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
(!(unlikely(pci_channel_offline(phba->pcidev)))) &&
sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
@@ -1440,7 +1433,7 @@ out:
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
+ iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -1530,17 +1523,21 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (piocb->wqe_cmpl) {
- if (piocb->iocb_flag & LPFC_IO_NVME)
+ if (piocb->cmd_cmpl) {
+ if (piocb->cmd_flag & LPFC_IO_NVME) {
lpfc_nvme_cancel_iocb(phba, piocb,
ulpstatus, ulpWord4);
- else
- lpfc_sli_release_iocbq(phba, piocb);
-
- } else if (piocb->iocb_cmpl) {
- piocb->iocb.ulpStatus = ulpstatus;
- piocb->iocb.un.ulpWord[4] = ulpWord4;
- (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status,
+ &piocb->wcqe_cmpl, ulpstatus);
+ piocb->wcqe_cmpl.parameter = ulpWord4;
+ } else {
+ piocb->iocb.ulpStatus = ulpstatus;
+ piocb->iocb.un.ulpWord[4] = ulpWord4;
+ }
+ (piocb->cmd_cmpl) (phba, piocb, piocb);
+ }
} else {
lpfc_sli_release_iocbq(phba, piocb);
}
@@ -1724,20 +1721,18 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- if (phba->sli_rev == LPFC_SLI_REV4)
- lockdep_assert_held(&pring->ring_lock);
- else
- lockdep_assert_held(&phba->hbalock);
+ u32 ulp_command = 0;
BUG_ON(!piocb);
+ ulp_command = get_job_cmnd(phba, piocb);
list_add_tail(&piocb->list, &pring->txcmplq);
- piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
-
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
- (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (ulp_command != CMD_ABORT_XRI_WQE) &&
+ (ulp_command != CMD_ABORT_XRI_CN) &&
+ (ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
if (!(piocb->vport->load_flag & FC_UNLOADING))
mod_timer(&piocb->vport->els_tmofunc,
@@ -1773,7 +1768,7 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to driver command iocb object.
- * @cmf_cmpl: Pointer to completed WCQE.
+ * @rspiocb: Pointer to driver response iocb object.
*
* This routine will inform the driver of any BW adjustments we need
* to make. These changes will be picked up during the next CMF
@@ -1782,10 +1777,11 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
**/
static void
lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *cmf_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
union lpfc_wqe128 *wqe;
uint32_t status, info;
+ struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
uint64_t bw, bwdif, slop;
uint64_t pcent, bwpcent;
int asig, afpin, sigcnt, fpincnt;
@@ -1793,22 +1789,22 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
char *s;
/* First check for error */
- status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
if (status) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6211 CMF_SYNC_WQE Error "
"req_tag x%x status x%x hwstatus x%x "
"tdatap x%x parm x%x\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
- bf_get(lpfc_wcqe_c_status, cmf_cmpl),
- bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
- cmf_cmpl->total_data_placed,
- cmf_cmpl->parameter);
+ bf_get(lpfc_wcqe_c_request_tag, wcqe),
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed,
+ wcqe->parameter);
goto out;
}
/* Gather congestion information on a successful cmpl */
- info = cmf_cmpl->parameter;
+ info = wcqe->parameter;
phba->cmf_active_info = info;
/* See if firmware info count is valid or has changed */
@@ -1817,15 +1813,15 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
phba->cmf_info_per_interval = info;
- tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
- cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
+ tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
+ cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
/* Get BW requirement from firmware */
bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
if (!bw) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6212 CMF_SYNC_WQE x%x: NULL bw\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
goto out;
}
@@ -1920,6 +1916,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
+ u16 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -1934,7 +1931,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "6213 No available WQEs for CMF_SYNC_WQE\n");
+ "6244 No available WQEs for CMF_SYNC_WQE\n");
ret_val = ENOMEM;
goto out_unlock;
}
@@ -1974,10 +1971,14 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
lpfc_acqe_cgn_frequency;
bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ warn_sync_period = lpfc_acqe_cgn_frequency;
} else {
/* We hit a FPIN warning condition */
bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
+ warn_sync_period =
+ LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
}
}
@@ -1993,25 +1994,27 @@ initpath:
bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
sync_buf->vport = phba->pport;
- sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
- sync_buf->iocb_cmpl = NULL;
- sync_buf->context1 = NULL;
- sync_buf->context2 = NULL;
- sync_buf->context3 = NULL;
+ sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
+ sync_buf->cmd_dmabuf = NULL;
+ sync_buf->rsp_dmabuf = NULL;
+ sync_buf->bpl_dmabuf = NULL;
sync_buf->sli4_xritag = NO_XRI;
- sync_buf->iocb_flag |= LPFC_IO_CMF;
+ sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
- if (ret_val)
+ if (ret_val) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret_val;
@@ -2173,7 +2176,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Set up an iotag
*/
- nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+ nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
@@ -2194,9 +2197,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* If there is no completion routine to call, we can release the
* IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
- * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ * that have no rsp ring completion, cmd_cmpl MUST be NULL.
*/
- if (nextiocb->iocb_cmpl)
+ if (nextiocb->cmd_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
__lpfc_sli_release_iocbq(phba, nextiocb);
@@ -2833,6 +2836,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -2853,13 +2862,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t rpi, vpi;
int rc;
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
-
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
@@ -2867,6 +2869,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ if (mp) {
+ pmb->ctx_buf = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2892,8 +2900,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
@@ -2944,7 +2950,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
* lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
@@ -3196,7 +3202,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
uint32_t oxid, sid, did, fctl, size;
int ret = 1;
- d_buf = piocb->context2;
+ d_buf = piocb->cmd_dmabuf;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
fc_hdr = nvmebuf->hbuf.virt;
@@ -3359,6 +3365,56 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+static void
+lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t *irsp;
+ union lpfc_wqe128 *wqe;
+ u16 i = 0;
+
+ irsp = &saveq->iocb;
+ wqe = &saveq->wqe;
+
+ /* Fill wcqe with the IOCB status fields */
+ bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
+ saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
+ saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
+ saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
+
+ /* Source ID */
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
+
+ /* rx-id of the response frame */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
+
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ irsp->unsli3.rcvsli3.ox_id);
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ irsp->un.rcvels.remoteID);
+
+ /* unsol data len */
+ for (i = 0; i < irsp->ulpBdeCount; i++) {
+ struct lpfc_hbq_entry *hbqe = NULL;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->un.ulpWord[0];
+ saveq->wqe.gen_req.bde.tus.f.bdeSize =
+ hbqe->bde.tus.f.bdeSize;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->unsli3.sli3Words[4];
+ saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
+ }
+ }
+ }
+}
+
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
* @phba: Pointer to HBA context object.
@@ -3379,11 +3435,13 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
IOCB_t * irsp;
WORD5 * w5p;
+ dma_addr_t paddr;
uint32_t Rctl, Type;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
- irsp = &(saveq->iocb);
+ irsp = &saveq->iocb;
+ saveq->vport = phba->pport;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
@@ -3401,22 +3459,22 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
- (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->un.ulpWord[3]);
+ irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[3]);
+ irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[7]);
+ irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
@@ -3425,9 +3483,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) {
- saveq->context2 = lpfc_sli_get_buff(phba, pring,
+ saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
- if (!saveq->context2)
+ if (!saveq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3437,9 +3495,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- saveq->context3 = lpfc_sli_get_buff(phba, pring,
+ saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
- if (!saveq->context3)
+ if (!saveq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3449,11 +3507,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
list_for_each_entry(iocbq, &saveq->list, list) {
- irsp = &(iocbq->iocb);
+ irsp = &iocbq->iocb;
if (irsp->ulpBdeCount != 0) {
- iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->un.ulpWord[3]);
- if (!iocbq->context2)
+ if (!iocbq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3463,9 +3522,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->unsli3.sli3Words[7]);
- if (!iocbq->context3)
+ if (!iocbq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3476,7 +3536,20 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
}
+ } else {
+ paddr = getPaddr(irsp->un.cont64[0].addrHigh,
+ irsp->un.cont64[0].addrLow);
+ saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (irsp->ulpBdeCount == 2) {
+ paddr = getPaddr(irsp->un.cont64[1].addrHigh,
+ irsp->un.cont64[1].addrLow);
+ saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
+
if (irsp->ulpBdeCount != 0 &&
(irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
@@ -3494,12 +3567,14 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!found)
list_add_tail(&saveq->clist,
&pring->iocb_continue_saveq);
+
if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
list_del_init(&iocbq->clist);
saveq = iocbq;
- irsp = &(saveq->iocb);
- } else
+ irsp = &saveq->iocb;
+ } else {
return 0;
+ }
}
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
@@ -3522,6 +3597,19 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (irsp->unsli3.rcvsli3.vpi == 0xffff)
+ saveq->vport = phba->pport;
+ else
+ saveq->vport = lpfc_find_vport_by_vpid(phba,
+ irsp->unsli3.rcvsli3.vpi);
+ }
+
+ /* Prepare WQE with Unsol frame */
+ lpfc_sli_prep_unsol_wqe(phba, saveq);
+
if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
@@ -3550,36 +3638,28 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- uint16_t iotag;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
+ u16 iotag;
if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
+ iotag = get_wqe_reqtag(prspiocb);
else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
- iotag = prspiocb->iocb.ulpIoTag;
+ iotag = prspiocb->iocb.ulpIoTag;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0317 iotag x%x is out of "
- "range: max iotag x%x wd0 x%x\n",
- iotag, phba->sli.last_iotag,
- *(((uint32_t *) &prspiocb->iocb) + 7));
+ "range: max iotag x%x\n",
+ iotag, phba->sli.last_iotag);
return NULL;
}
@@ -3600,33 +3680,23 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
- if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
- else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0372 iotag x%x lookup error: max iotag (x%x) "
- "iocb_flag x%x\n",
+ "cmd_flag x%x\n",
iotag, phba->sli.last_iotag,
- cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
+ cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
return NULL;
}
@@ -3652,20 +3722,38 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
struct lpfc_iocbq *cmdiocbp;
- int rc = 1;
unsigned long iflag;
+ u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ ulp_command = get_job_cmnd(phba, saveq);
+ ulp_status = get_job_ulpstatus(phba, saveq);
+ ulp_word4 = get_job_word4(phba, saveq);
+ ulp_context = get_job_ulpcontext(phba, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ iotag = get_wqe_reqtag(saveq);
+ else
+ iotag = saveq->iocb.ulpIoTag;
+
if (cmdiocbp) {
- if (cmdiocbp->iocb_cmpl) {
+ ulp_command = get_job_cmnd(phba, cmdiocbp);
+ if (cmdiocbp->cmd_cmpl) {
/*
* If an ELS command failed send an event to mgmt
* application.
*/
- if (saveq->iocb.ulpStatus &&
+ if (ulp_status &&
(pring->ringno == LPFC_ELS_RING) &&
- (cmdiocbp->iocb.ulpCommand ==
- CMD_ELS_REQUEST64_CR))
+ (ulp_command == CMD_ELS_REQUEST64_CR))
lpfc_send_els_failure_event(phba,
cmdiocbp, saveq);
@@ -3675,11 +3763,11 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
if (pring->ringno == LPFC_ELS_RING) {
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- (cmdiocbp->iocb_flag &
+ (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED)) {
spin_lock_irqsave(&phba->hbalock,
iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
@@ -3694,12 +3782,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
- saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (saveq->iocb_flag &
+ if (saveq->cmd_flag &
LPFC_EXCHANGE_BUSY) {
/* Set cmdiocb flag for the
* exchange busy so sgl (xri)
@@ -3709,12 +3797,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag |=
+ cmdiocbp->cmd_flag |=
LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
- if (cmdiocbp->iocb_flag &
+ if (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED) {
/*
* Clear LPFC_DRIVER_ABORTED
@@ -3723,34 +3811,34 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
- cmdiocbp->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- cmdiocbp->iocb.un.ulpWord[4] =
- IOERR_ABORT_REQUESTED;
+ set_job_ulpstatus(cmdiocbp,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(cmdiocbp,
+ IOERR_ABORT_REQUESTED);
/*
- * For SLI4, irsiocb contains
+ * For SLI4, irspiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
*/
- saveq->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- saveq->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
+ set_job_ulpstatus(saveq,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(saveq,
+ IOERR_SLI_ABORTED);
spin_lock_irqsave(
&phba->hbalock, iflag);
- saveq->iocb_flag |=
+ saveq->cmd_flag |=
LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
}
}
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -3768,16 +3856,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ pring->ringno, iotag, ulp_status,
+ ulp_word4, ulp_command, ulp_context);
}
}
- return rc;
+ return 1;
}
/**
@@ -3986,18 +4070,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (cmdiocbq->iocb_cmpl) {
+ if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
break;
@@ -4088,155 +4169,159 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *rspiocbp)
{
struct lpfc_iocbq *saveq;
- struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *cmdiocb;
struct lpfc_iocbq *next_iocb;
- IOCB_t *irsp = NULL;
+ IOCB_t *irsp;
uint32_t free_saveq;
- uint8_t iocb_cmd_type;
+ u8 cmd_type;
lpfc_iocb_type type;
unsigned long iflag;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ u32 ulp_word4 = get_job_word4(phba, rspiocbp);
+ u32 ulp_command = get_job_cmnd(phba, rspiocbp);
int rc;
spin_lock_irqsave(&phba->hbalock, iflag);
/* First add the response iocb to the countinueq list */
- list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
pring->iocb_continueq_cnt++;
- /* Now, determine whether the list is completed for processing */
- irsp = &rspiocbp->iocb;
- if (irsp->ulpLe) {
- /*
- * By default, the driver expects to free all resources
- * associated with this iocb completion.
- */
- free_saveq = 1;
- saveq = list_get_first(&pring->iocb_continueq,
- struct lpfc_iocbq, list);
- irsp = &(saveq->iocb);
- list_del_init(&pring->iocb_continueq);
- pring->iocb_continueq_cnt = 0;
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
- pring->stats.iocb_rsp++;
+ pring->stats.iocb_rsp++;
- /*
- * If resource errors reported from HBA, reduce
- * queuedepths of the SCSI device.
- */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_NO_RESOURCES)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- phba->lpfc_rampdown_queue_depth(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- }
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
- if (irsp->ulpStatus) {
- /* Rsp ring <ringno> error: IOCB */
+ if (ulp_status) {
+ /* Rsp ring <ringno> error: IOCB */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ irsp = &rspiocbp->iocb;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0328 Rsp Ring %d error: "
+ "0328 Rsp Ring %d error: ulp_status x%x "
+ "IOCB Data: "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x\n",
+ pring->ringno, ulp_status,
+ get_job_ulpword(rspiocbp, 0),
+ get_job_ulpword(rspiocbp, 1),
+ get_job_ulpword(rspiocbp, 2),
+ get_job_ulpword(rspiocbp, 3),
+ get_job_ulpword(rspiocbp, 4),
+ get_job_ulpword(rspiocbp, 5),
+ *(((uint32_t *)irsp) + 6),
+ *(((uint32_t *)irsp) + 7),
+ *(((uint32_t *)irsp) + 8),
+ *(((uint32_t *)irsp) + 9),
+ *(((uint32_t *)irsp) + 10),
+ *(((uint32_t *)irsp) + 11),
+ *(((uint32_t *)irsp) + 12),
+ *(((uint32_t *)irsp) + 13),
+ *(((uint32_t *)irsp) + 14),
+ *(((uint32_t *)irsp) + 15));
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0321 Rsp Ring %d error: "
"IOCB Data: "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7),
- *(((uint32_t *) irsp) + 8),
- *(((uint32_t *) irsp) + 9),
- *(((uint32_t *) irsp) + 10),
- *(((uint32_t *) irsp) + 11),
- *(((uint32_t *) irsp) + 12),
- *(((uint32_t *) irsp) + 13),
- *(((uint32_t *) irsp) + 14),
- *(((uint32_t *) irsp) + 15));
+ rspiocbp->wcqe_cmpl.word0,
+ rspiocbp->wcqe_cmpl.total_data_placed,
+ rspiocbp->wcqe_cmpl.parameter,
+ rspiocbp->wcqe_cmpl.word3);
}
+ }
- /*
- * Fetch the IOCB command type and call the correct completion
- * routine. Solicited and Unsolicited IOCBs on the ELS ring
- * get freed back to the lpfc_iocb_list by the discovery
- * kernel thread.
- */
- iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
- type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
- switch (type) {
- case LPFC_SOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- break;
-
- case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (!rc)
- free_saveq = 0;
- break;
- case LPFC_ABORT_IOCB:
- cmdiocbp = NULL;
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+ /*
+ * Fetch the iocb command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ cmd_type = ulp_command & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+ case LPFC_ABORT_IOCB:
+ cmdiocb = NULL;
+ if (ulp_command != CMD_XRI_ABORTED_CX)
+ cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocb) {
+ /* Call the specified completion routine */
+ if (cmdiocb->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
- saveq);
+ cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
- }
- if (cmdiocbp) {
- /* Call the specified completion routine */
- if (cmdiocbp->iocb_cmpl) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
- saveq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- } else
- __lpfc_sli_release_iocbq(phba,
- cmdiocbp);
- }
- break;
-
- case LPFC_UNKNOWN_IOCB:
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
- char adaptermsg[LPFC_MAX_ADPTMSG];
- memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *)irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
} else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0335 Unknown IOCB "
- "command Data: x%x "
- "x%x x%x x%x\n",
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ __lpfc_sli_release_iocbq(phba, cmdiocb);
}
- break;
}
+ break;
+ case LPFC_UNKNOWN_IOCB:
+ if (ulp_command == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ ulp_command,
+ ulp_status,
+ get_wqe_reqtag(rspiocbp),
+ get_job_ulpcontext(phba, rspiocbp));
+ }
+ break;
+ }
- if (free_saveq) {
- list_for_each_entry_safe(rspiocbp, next_iocb,
- &saveq->list, list) {
- list_del_init(&rspiocbp->list);
- __lpfc_sli_release_iocbq(phba, rspiocbp);
- }
- __lpfc_sli_release_iocbq(phba, saveq);
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del_init(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
}
- rspiocbp = NULL;
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rspiocbp;
}
@@ -4429,8 +4514,8 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
irspiocbq = container_of(cq_event, struct lpfc_iocbq,
cq_event);
/* Translate ELS WCQE to response IOCBQ */
- irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
- irspiocbq);
+ irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
+ irspiocbq);
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
@@ -4466,42 +4551,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
@@ -4554,11 +4659,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_IOQ_FLUSH ||
- !phba->sli4_hba.hdwq) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -4573,7 +4673,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4599,7 +4699,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4749,7 +4849,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
- volatile uint32_t mbox;
+ volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -4783,13 +4883,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
- mbox = 0;
- ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+ mbox.word0 = 0;
+ mbox.mbxCommand = MBX_KILL_BOARD;
+ mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
- writel(mbox, mbox_buf);
+ writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@@ -4810,12 +4910,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
- if (resp_data != mbox)
+ if (resp_data != mbox.word0)
mdelay(1);
else
break;
@@ -5046,12 +5146,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return rc;
- }
-
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -5091,9 +5185,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
- MAILBOX_t *mb;
+ volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
- volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@@ -5110,24 +5203,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
- word0 = 0;
- mb = (MAILBOX_t *) &word0;
- mb->mbxCommand = MBX_RESTART;
- mb->mbxHc = 1;
+ mb.word0 = 0;
+ mb.mbxCommand = MBX_RESTART;
+ mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
- word0 = 1; /* This is really setting up word1 */
+ mb.word0 = 1; /* This is really setting up word1 */
else
- word0 = 0; /* This is really setting up word1 */
+ mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
@@ -5186,6 +5278,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -5762,26 +5856,20 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
mboxq->mcqe.trailer);
if (rc) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = 0;
out_free_mboxq:
- mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return rc;
}
@@ -5980,6 +6068,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -6123,6 +6215,9 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
LPFC_MBOXQ_t *mbox;
+ *extnt_count = 0;
+ *extnt_size = 0;
+
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -6738,8 +6833,13 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
phba->sli4_hba.pc_sli4_params.mi_ver);
break;
+ case LPFC_SET_LD_SIGNAL:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 16;
+ bf_set(lpfc_mbx_set_feature_lds_qry,
+ &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
+ break;
case LPFC_SET_ENABLE_CMF:
- bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
mbox->u.mqe.un.set_feature.param_len = 4;
bf_set(lpfc_mbx_set_feature_cmf,
@@ -7735,6 +7835,62 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
static void
+lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
+ "4622 SET_FEATURE (x%x) mbox failed, "
+ "status x%x add_status x%x, mbx status x%x\n",
+ LPFC_SET_LD_SIGNAL, shdr_status,
+ shdr_add_status, pmb->u.mb.mbxStatus);
+ phba->degrade_activate_threshold = 0;
+ phba->degrade_deactivate_threshold = 0;
+ phba->fec_degrade_interval = 0;
+ goto out;
+ }
+
+ phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
+ phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
+ phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
+ "4624 Success: da x%x dd x%x interval x%x\n",
+ phba->degrade_activate_threshold,
+ phba->degrade_deactivate_threshold,
+ phba->fec_degrade_interval);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+int
+lpfc_read_lds_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -7881,6 +8037,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
}
/**
+ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entries: Number of rx_info_entry objects to allocate in ring
+ *
+ * Return:
+ * 0 - Success
+ * ENOMEM - Failure to kmalloc
+ **/
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries)
+{
+ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
+ GFP_KERNEL);
+ if (!rx_monitor->ring)
+ return -ENOMEM;
+
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_lock_init(&rx_monitor->lock);
+ rx_monitor->entries = entries;
+
+ return 0;
+}
+
+/**
+ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ **/
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
+{
+ spin_lock(&rx_monitor->lock);
+ kfree(rx_monitor->ring);
+ rx_monitor->ring = NULL;
+ rx_monitor->entries = 0;
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_unlock(&rx_monitor->lock);
+}
+
+/**
+ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entry: Pointer to rx_info_entry
+ *
+ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
+ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
+ *
+ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
+ *
+ * In cases of old data overflow, we do a best effort of FIFO order.
+ **/
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+
+ spin_lock(ring_lock);
+ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
+ *tail_idx = (*tail_idx + 1) % ring_size;
+
+ /* Best effort of FIFO saved data */
+ if (*tail_idx == *head_idx)
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ spin_unlock(ring_lock);
+}
+
+/**
+ * lpfc_rx_monitor_report - Read out rx_monitor's ring
+ * @phba: Pointer to lpfc_hba object
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @buf: Pointer to char buffer that will contain rx monitor info data
+ * @buf_len: Length buf including null char
+ * @max_read_entries: Maximum number of entries to read out of ring
+ *
+ * Used to dump/read what's in rx_monitor's ring buffer.
+ *
+ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
+ * information to kmsg instead of filling out buf.
+ *
+ * Return:
+ * Number of entries read out of the ring
+ **/
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ struct rx_info_entry *entry;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+ u32 cnt = 0;
+ char tmp[DBG_LOG_STR_SZ] = {0};
+ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
+
+ if (!log_to_kmsg) {
+ /* clear the buffer to be sure */
+ memset(buf, 0, buf_len);
+
+ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
+ "%-8s%-8s%-8s%-16s\n",
+ "MaxBPI", "Tot_Data_CMF",
+ "Tot_Data_Cmd", "Tot_Data_Cmpl",
+ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+ /* Needs to be _bh because record is called from timer interrupt
+ * context
+ */
+ spin_lock_bh(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+ /* Read out this entry's data. */
+ if (!log_to_kmsg) {
+ /* If !log_to_kmsg, then store to buf. */
+ scnprintf(tmp, sizeof(tmp),
+ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
+ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
+ *head_idx, entry->max_bytes_per_interval,
+ entry->cmf_bytes, entry->total_bytes,
+ entry->rcv_bytes, entry->avg_io_latency,
+ entry->avg_io_size, entry->max_read_cnt,
+ entry->cmf_busy, entry->io_cnt,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval);
+
+ /* Check for buffer overflow */
+ if ((strlen(buf) + strlen(tmp)) >= buf_len)
+ break;
+
+ /* Append entry's data to buffer */
+ strlcat(buf, tmp, buf_len);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02u: MBPI %llu Xmit %llu "
+ "Cmpl %llu Lat %llu ASz %llu Info %02u "
+ "BWUtil %u Int %u slot %u\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size, entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval, *head_idx);
+ }
+
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ /* Don't feed more than max_read_entries */
+ cnt++;
+ if (cnt >= max_read_entries)
+ break;
+ }
+ spin_unlock_bh(ring_lock);
+
+ return cnt;
+}
+
+/**
* lpfc_cmf_setup - Initialize idle_stat tracking
* @phba: Pointer to HBA context object.
*
@@ -7905,10 +8227,6 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
sli4_params = &phba->sli4_hba.pc_sli4_params;
- /* Are we forcing MI off via module parameter? */
- if (!phba->cfg_enable_mi)
- sli4_params->mi_ver = 0;
-
/* Always try to enable MI feature if we can */
if (sli4_params->mi_ver) {
lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
@@ -8058,19 +8376,29 @@ no_cmf:
phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
/* Allocate RX Monitor Buffer */
- if (!phba->rxtable) {
- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
- sizeof(struct rxtable_entry),
- GFP_KERNEL);
- if (!phba->rxtable) {
+ if (!phba->rx_monitor) {
+ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
+ GFP_KERNEL);
+
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2644 Failed to alloc memory "
"for RX Monitor Buffer\n");
return -ENOMEM;
}
+
+ /* Instruct the rx_monitor object to instantiate its ring */
+ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
+ LPFC_MAX_RXMONITOR_ENTRY)) {
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2645 Failed to alloc memory "
+ "for RX Monitor's Ring\n");
+ return -ENOMEM;
+ }
}
- atomic_set(&phba->rxtable_idx_head, 0);
- atomic_set(&phba->rxtable_idx_tail, 0);
+
return 0;
}
@@ -8454,8 +8782,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
- * This memory was allocated by the lpfc_read_sparam routine. Release
- * it to the mbuf pool.
+ * This memory was allocated by the lpfc_read_sparam routine but is
+ * no longer needed. It is released and ctx_buf NULLed to prevent
+ * unintended pointer access as the mbox is reused.
*/
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -8775,6 +9104,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
mempool_free(mboxq, phba->mbox_mem_pool);
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
phba->hba_flag |= HBA_SETUP;
return rc;
@@ -10106,7 +10438,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
lockdep_assert_held(&phba->hbalock);
- if (piocb->iocb_cmpl && (!piocb->vport) &&
+ if (piocb->cmd_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -10144,24 +10476,14 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
* For IOCBs, like QUE_RING_BUF, that have no rsp ring
- * completion, iocb_cmpl MUST be 0.
+ * completion, cmd_cmpl MUST be 0.
*/
- if (piocb->iocb_cmpl)
- piocb->iocb_cmpl = NULL;
+ if (piocb->cmd_cmpl)
+ piocb->cmd_cmpl = NULL;
fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
@@ -10208,715 +10530,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
}
/**
- * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
- * @phba: Pointer to HBA context object.
- * @piocbq: Pointer to command iocb.
- * @sglq: Pointer to the scatter gather queue object.
- *
- * This routine converts the bpl or bde that is in the IOCB
- * to a sgl list for the sli4 hardware. The physical address
- * of the bpl/bde is converted back to a virtual address.
- * If the IOCB contains a BPL then the list of BDE's is
- * converted to sli4_sge's. If the IOCB contains a single
- * BDE then it is converted to a single sli_sge.
- * The IOCB is still in cpu endianess so the contents of
- * the bpl can be used without byte swapping.
- *
- * Returns valid XRI = Success, NO_XRI = Failure.
-**/
-static uint16_t
-lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_sglq *sglq)
-{
- uint16_t xritag = NO_XRI;
- struct ulp_bde64 *bpl = NULL;
- struct ulp_bde64 bde;
- struct sli4_sge *sgl = NULL;
- struct lpfc_dmabuf *dmabuf;
- IOCB_t *icmd;
- int numBdes = 0;
- int i = 0;
- uint32_t offset = 0; /* accumulated offset in the sg request list */
- int inbound = 0; /* number of sg reply entries inbound from firmware */
-
- if (!piocbq || !sglq)
- return xritag;
-
- sgl = (struct sli4_sge *)sglq->sgl;
- icmd = &piocbq->iocb;
- if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
- return sglq->sli4_xritag;
- if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = icmd->un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- /* The addrHigh and addrLow fields within the IOCB
- * have not been byteswapped yet so there is no
- * need to swap them back.
- */
- if (piocbq->context3)
- dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
- else
- return xritag;
-
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- if (!bpl)
- return xritag;
-
- for (i = 0; i < numBdes; i++) {
- /* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
-
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((i+1) == numBdes)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
- /* The offsets in the sgl need to be accumulated
- * separately for the request and reply lists.
- * The request is always first, the reply follows.
- */
- if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- /* add up the reply sg entries */
- if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
- inbound++;
- /* first inbound? reset the offset */
- if (inbound == 1)
- offset = 0;
- bf_set(lpfc_sli4_sge_offset, sgl, offset);
- bf_set(lpfc_sli4_sge_type, sgl,
- LPFC_SGE_TYPE_DATA);
- offset += bde.tus.f.bdeSize;
- }
- sgl->word2 = cpu_to_le32(sgl->word2);
- bpl++;
- sgl++;
- }
- } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
- /* The addrHigh and addrLow fields of the BDE have not
- * been byteswapped yet so they need to be swapped
- * before putting them in the sgl.
- */
- sgl->addr_hi =
- cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
- sgl->addr_lo =
- cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len =
- cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
- }
- return sglq->sli4_xritag;
-}
-
-/**
- * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
- * @phba: Pointer to HBA context object.
- * @iocbq: Pointer to command iocb.
- * @wqe: Pointer to the work queue entry.
- *
- * This routine converts the iocb command to its Work Queue Entry
- * equivalent. The wqe pointer should not have any fields set when
- * this routine is called because it will memcpy over them.
- * This routine does not set the CQ_ID or the WQEC bits in the
- * wqe.
- *
- * Returns: 0 = Success, IOCB_ERROR = Failure.
- **/
-static int
-lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
- union lpfc_wqe128 *wqe)
-{
- uint32_t xmit_len = 0, total_len = 0;
- uint8_t ct = 0;
- uint32_t fip;
- uint32_t abort_tag;
- uint8_t command_type = ELS_COMMAND_NON_FIP;
- uint8_t cmnd;
- uint16_t xritag;
- uint16_t abrt_iotag;
- struct lpfc_iocbq *abrtiocbq;
- struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = LPFC_ELS_ID_DEFAULT;
- int numBdes, i;
- struct ulp_bde64 bde;
- struct lpfc_nodelist *ndlp;
- uint32_t *pcmd;
- uint32_t if_type;
-
- fip = phba->hba_flag & HBA_FIP_SUPPORT;
- /* The fcp commands will set command type */
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- command_type = FCP_COMMAND;
- else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
- command_type = ELS_COMMAND_FIP;
- else
- command_type = ELS_COMMAND_NON_FIP;
-
- if (phba->fcp_embed_io)
- memset(wqe, 0, sizeof(union lpfc_wqe128));
- /* Some of the fields are in the right position already */
- memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
-
- abort_tag = (uint32_t) iocbq->iotag;
- xritag = iocbq->sli4_xritag;
- /* words0-2 bpl convert bde */
- if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)iocbq->context3)->virt;
- if (!bpl)
- return IOCB_ERROR;
-
- /* Should already be byte swapped. */
- wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
- wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
- xmit_len = wqe->generic.bde.tus.f.bdeSize;
- total_len = 0;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- total_len += bde.tus.f.bdeSize;
- }
- } else
- xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
-
- iocbq->iocb.ulpIoTag = iocbq->iotag;
- cmnd = iocbq->iocb.ulpCommand;
-
- switch (iocbq->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = iocbq->context_un.ndlp;
- else
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- if (!iocbq->iocb.ulpLe) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2007 Only Limited Edition cmd Format"
- " supported 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- wqe->els_req.payload_len = xmit_len;
- /* Els_reguest64 has a TMO */
- bf_set(wqe_tmo, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpTimeout);
- /* Need a VF for word 4 set the vf bit*/
- bf_set(els_req64_vf, &wqe->els_req, 0);
- /* And a VFID for word 12 */
- bf_set(els_req64_vfid, &wqe->els_req, 0);
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
- bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
- /* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP)
- els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
- >> LPFC_FIP_ELS_ID_SHIFT);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
- *pcmd == ELS_CMD_SCR ||
- *pcmd == ELS_CMD_RDF ||
- *pcmd == ELS_CMD_EDC ||
- *pcmd == ELS_CMD_RSCN_XMT ||
- *pcmd == ELS_CMD_FDISC ||
- *pcmd == ELS_CMD_LOGO ||
- *pcmd == ELS_CMD_QFPA ||
- *pcmd == ELS_CMD_UVEM ||
- *pcmd == ELS_CMD_PLOGI)) {
- bf_set(els_req64_sp, &wqe->els_req, 1);
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- if ((*pcmd == ELS_CMD_FLOGI) &&
- !(phba->fc_topology ==
- LPFC_TOPOLOGY_LOOP))
- bf_set(els_req64_sid, &wqe->els_req, 0);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- } else if (pcmd && iocbq->context1) {
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- }
- }
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
- bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- wqe->els_req.max_response_payload_len = total_len - xmit_len;
- break;
- case CMD_XMIT_SEQUENCE64_CX:
- bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.un.ulpWord[3]);
- bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- /* The entire sequence is transmitted for this IOCB */
- xmit_len = total_len;
- cmnd = CMD_XMIT_SEQUENCE64_CR;
- if (phba->link_flag & LS_LOOPBACK_MODE)
- bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- fallthrough;
- case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=reserved */
- wqe->xmit_sequence.rsvd3 = 0;
- /* word4 relative_offset memcpy */
- /* word5 r_ctl/df_ctl memcpy */
- bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_LENLOC_WORD12);
- bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
- wqe->xmit_sequence.xmit_len = xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=seq_payload_len */
- wqe->xmit_bcast64.seq_payload_len = xmit_len;
- /* word4 iocb=rsvd wqe=rsvd */
- /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
- /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
- case CMD_FCP_IWRITE64_CR:
- command_type = FCP_COMMAND_DATA_OUT;
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iwrite,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iwrite,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_IREAD64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iread,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iread,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_ICMND64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_icmd,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_icmd,
- 0);
- /* word3 iocb=IO_TAG wqe=reserved */
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- /* Always open the exchange */
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_GEN_REQUEST64_CR:
- /* For this command calculate the xmit length of the
- * request bde.
- */
- xmit_len = 0;
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
- xmit_len += bde.tus.f.bdeSize;
- }
- /* word3 iocb=IO_TAG wqe=request_payload_len */
- wqe->gen_req.request_payload_len = xmit_len;
- /* word4 iocb=parameter wqe=relative_offset memcpy */
- /* word5 [rctl, type, df_ctl, la] copied in memcpy */
- /* word6 context tag copied in memcpy */
- if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2015 Invalid CT %x command 0x%x\n",
- ct, iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
- bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
- bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
- wqe->gen_req.max_response_payload_len = total_len - xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_ELS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=response_payload_len */
- wqe->xmit_els_rsp.response_payload_len = xmit_len;
- /* word4 */
- wqe->xmit_els_rsp.word4 = 0;
- /* word5 iocb=rsvd wge=did */
- bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
- iocbq->iocb.un.xseq64.xmit_els_remoteID);
-
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (iocbq->vport->fc_flag & FC_PT2PT) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- if (iocbq->vport->fc_myDID == Fabric_DID) {
- bf_set(wqe_els_did,
- &wqe->xmit_els_rsp.wqe_dest, 0);
- }
- }
- }
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- }
- command_type = OTHER_COMMAND;
- break;
- case CMD_CLOSE_XRI_CN:
- case CMD_ABORT_XRI_CN:
- case CMD_ABORT_XRI_CX:
- /* words 0-2 memcpy should be 0 rserved */
- /* port will send abts */
- abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
- if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
- abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
- fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
- } else
- fip = 0;
-
- if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
- /*
- * The link is down, or the command was ELS_FIP
- * so the fw does not need to send abts
- * on the wire.
- */
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
- wqe->abort_cmd.rsrvd5 = 0;
- bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- /*
- * The abort handler will send us CMD_ABORT_XRI_CN or
- * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
- */
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- cmnd = CMD_ABORT_XRI_CX;
- command_type = OTHER_COMMAND;
- xritag = 0;
- break;
- case CMD_XMIT_BLS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* As BLS ABTS RSP WQE is very different from other WQEs,
- * we re-construct this WQE here based on information in
- * iocbq from scratch.
- */
- memset(wqe, 0, sizeof(*wqe));
- /* OX_ID is invariable to who sent ABTS to CT exchange */
- bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
- LPFC_ABTS_UNSOL_INT) {
- /* ABTS sent by initiator to CT exchange, the
- * RX_ID field will be filled with the newly
- * allocated responder XRI.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- iocbq->sli4_xritag);
- } else {
- /* ABTS sent by responder to CT exchange, the
- * RX_ID field will be filled with the responder
- * RX_ID from ABTS.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
- }
- bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
- bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
-
- /* Use CT=VPI */
- bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
- ndlp->nlp_DID);
- bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- /* Overwrite the pre-set comnd type with OTHER_COMMAND */
- command_type = OTHER_COMMAND;
- if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
- bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
- bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
- }
-
- break;
- case CMD_SEND_FRAME:
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
- bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
- bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
- bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- return 0;
- case CMD_XRI_ABORTED_CX:
- case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
- case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
- case CMD_FCP_TRSP64_CX: /* Target mode rcv */
- case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2014 Invalid command 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
- LPFC_IO_DIF_INSERT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- wqe->generic.wqe_com.abort_tag = abort_tag;
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
- bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- return 0;
-}
-
-/**
* __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue wqe on.
@@ -10924,7 +10537,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* @flag: Flag indicating if this command can be put into txq.
*
* __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
- * send an iocb command to an HBA with SLI-4 interface spec.
+ * send an iocb command to an HBA with SLI-3 interface spec.
*
* This function takes the hbalock before invoking the lockless version.
* The function will return success after it successfully submit the wqe to
@@ -10962,10 +10575,17 @@ static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- int rc;
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *)piocb->context1;
- union lpfc_wqe128 *wqe = &piocb->wqe;
+ struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
+
+ lpfc_prep_embed_io(phba, lpfc_cmd);
+ return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
+}
+
+void
+lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
struct sli4_sge *sgl;
/* 128 byte wqe support here */
@@ -11003,19 +10623,17 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
/* add the VMID tags as per switch response */
- if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
- if (phba->pport->vmid_priority_tagging) {
+ if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
(piocb->vmid_tag.cs_ctl_vmid));
- } else {
+ } else if (phba->cfg_vmid_app_header) {
bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
wqe->words[31] = piocb->vmid_tag.app_id;
}
}
- rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
- return rc;
}
/**
@@ -11037,13 +10655,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
+ union lpfc_wqe128 *wqe;
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring;
+ u32 ulp_command = get_job_cmnd(phba, piocb);
/* Get the WQ */
- if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if ((piocb->cmd_flag & LPFC_IO_FCP) ||
+ (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
@@ -11057,34 +10676,24 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
lockdep_assert_held(&pring->ring_lock);
-
+ wqe = &piocb->wqe;
if (piocb->sli4_xritag == NO_XRI) {
- if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (ulp_command == CMD_ABORT_XRI_CX)
sglq = NULL;
else {
- if (!list_empty(&pring->txq)) {
+ sglq = __lpfc_sli_get_els_sglq(phba, piocb);
+ if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
- pring, piocb);
+ pring,
+ piocb);
return IOCB_SUCCESS;
} else {
return IOCB_BUSY;
}
- } else {
- sglq = __lpfc_sli_get_els_sglq(phba, piocb);
- if (!sglq) {
- if (!(flag & SLI_IOCB_RET_IOCB)) {
- __lpfc_sli_ringtx_put(phba,
- pring,
- piocb);
- return IOCB_SUCCESS;
- } else
- return IOCB_BUSY;
- }
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ } else if (piocb->cmd_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
}
@@ -11101,15 +10710,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (sglq) {
piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
+ piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ piocb->sli4_xritag);
+
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
+ piocb->sli4_xritag);
+
+ if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
return IOCB_ERROR;
}
- if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ if (lpfc_sli4_wq_put(wq, wqe))
return IOCB_ERROR;
- if (lpfc_sli4_wq_put(wq, &wqe))
- return IOCB_ERROR;
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
@@ -11152,6 +10772,411 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
}
+static void
+__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+
+ if (expect_rsp) {
+ cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ cmd->un.elsreq64.remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ cmd->ulpTimeout = tmo;
+ } else {
+ cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
+ }
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (expect_rsp) {
+ cmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ cmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
+
+ cmd->ulpCt_h = 0;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ cmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ cmd->ulpCt_l = 1; /* context = VPI */
+ }
+}
+
+static void
+__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64_le *bde;
+ u8 els_id;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 0 - 2 BDE */
+ bde = (struct ulp_bde64_le *)&wqe->generic.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(cmd_size);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
+ bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
+
+ /* Transfer length */
+ wqe->els_req.payload_len = cmd_size;
+ wqe->els_req.max_response_payload_len = FCELSSIZE;
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
+
+ /* Word 11 - ELS_ID */
+ switch (elscmd) {
+ case ELS_CMD_PLOGI:
+ els_id = LPFC_ELS_ID_PLOGI;
+ break;
+ case ELS_CMD_FLOGI:
+ els_id = LPFC_ELS_ID_FLOGI;
+ break;
+ case ELS_CMD_LOGO:
+ els_id = LPFC_ELS_ID_LOGO;
+ break;
+ case ELS_CMD_FDISC:
+ if (!vport->fc_myDID) {
+ els_id = LPFC_ELS_ID_FDISC;
+ break;
+ }
+ fallthrough;
+ default:
+ els_id = LPFC_ELS_ID_DEFAULT;
+ break;
+ }
+
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ } else {
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
+
+ /* Transfer length */
+ wqe->xmit_els_rsp.response_payload_len = cmd_size;
+
+ bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
+ CMD_XMIT_ELS_RSP64_WQE);
+ }
+
+ bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ (vport->fc_flag & FC_PT2PT)) {
+ if (expect_rsp) {
+ bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
+
+ /* For ELS_REQUEST64_WQE, use the VPI by default */
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[vport->vpi]);
+ }
+
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
+ else
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
+ }
+}
+
+void
+lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
+ u8 expect_rsp)
+{
+ phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
+ elscmd, tmo, expect_rsp);
+}
+
+static void
+__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
+
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+
+ cmd->ulpContext = rpi;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpTimeout = tmo;
+}
+
+static void
+__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ union lpfc_wqe128 *cmdwqe;
+ struct ulp_bde64_le *bde, *bpl;
+ u32 xmit_len = 0, total_len = 0, size, type, i;
+
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
+
+ /* Calculate total_len and xmit_len */
+ bpl = (struct ulp_bde64_le *)bmp->virt;
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ total_len += size;
+ }
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
+ if (type != ULP_BDE64_TYPE_BDE_64)
+ break;
+ xmit_len += size;
+ }
+
+ /* Words 0 - 2 */
+ bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
+ bde->type_size = cpu_to_le32(xmit_len);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ /* Word 3 */
+ cmdwqe->gen_req.request_payload_len = xmit_len;
+
+ /* Word 5 */
+ bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
+ bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
+ bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
+ bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
+ bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+ /* Word 12 */
+ cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
+}
+
+void
+lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
+{
+ phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ IOCB_t *icmd;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = LA;
+ if (last_seq)
+ icmd->un.xseq64.w5.hcsw.Fctl |= LS;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = rctl;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ switch (cr_cx_cmd) {
+ case CMD_XMIT_SEQUENCE64_CR:
+ icmd->ulpContext = rpi;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ icmd->ulpContext = ox_id;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64 *bpl;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Words 0 - 2 */
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
+ wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
+ wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
+
+ /* Word 5 */
+ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
+ bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
+ bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
+
+ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+
+ /* Word 7 */
+ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+
+ /* Word 9 */
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
+
+ /* Word 12 */
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ wqe->xmit_sequence.xmit_len = full_size;
+ else
+ wqe->xmit_sequence.xmit_len =
+ wqe->xmit_sequence.bde.tus.f.bdeSize;
+}
+
+void
+lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
+ rctl, last_seq, cr_cx_cmd);
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ IOCB_t *icmd = NULL;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ /* Word 5 */
+ icmd->un.acxri.abortContextTag = ulp_context;
+ icmd->un.acxri.abortIoTag = iotag;
+
+ if (ia) {
+ /* Word 7 */
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ } else {
+ /* Word 3 */
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+
+ /* Word 7 */
+ icmd->ulpClass = ulp_class;
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ }
+
+ /* Word 7 */
+ icmd->ulpLe = 1;
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ union lpfc_wqe128 *wqe;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 3 */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ if (ia)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
+
+ /* Word 8 */
+ wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+
+ /* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+}
+
+void
+lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec)
+{
+ phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
+ cqid, ia, wqec);
+}
+
/**
* lpfc_sli_api_table_setup - Set up sli api function jump table
* @phba: The hba struct for which this call is being executed.
@@ -11170,11 +11195,19 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11182,7 +11215,6 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
dev_grp);
return -ENODEV;
}
- phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
}
@@ -11201,15 +11233,15 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{
struct lpfc_io_buf *lpfc_cmd;
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.hdwq))
return NULL;
/*
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+ if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
+ lpfc_cmd = piocb->io_buf;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
@@ -11243,7 +11275,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
pring = lpfc_sli4_calc_ring(phba, piocb);
@@ -11380,8 +11418,8 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3095 Event Context not found, no "
"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
- iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
- vpi, rpi);
+ vpi, rpi, iocbq->iocb.ulpStatus,
+ iocbq->iocb.ulpContext);
}
/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
@@ -12229,47 +12267,33 @@ static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
- uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb = NULL;
-
- if (irsp->ulpStatus) {
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 cmnd = get_job_cmnd(phba, cmdiocb);
+ if (ulp_status) {
/*
* Assume that the port already completed and returned, or
* will return the iocb. Just Log the message.
*/
- abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
- abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
-
- spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
- if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
- irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
- spin_unlock_irq(&phba->hbalock);
+ if (cmnd == CMD_ABORT_XRI_CX &&
+ ulp_status == IOSTAT_LOCAL_REJECT &&
+ ulp_word4 == IOERR_ABORT_REQUESTED) {
goto release_iocb;
}
- if (abort_iotag != 0 &&
- abort_iotag <= phba->sli.last_iotag)
- abort_iocb =
- phba->sli.iocbq_lookup[abort_iotag];
- } else
- /* For sli4 the abort_tag is the XRI,
- * so the abort routine puts the iotag of the iocb
- * being aborted in the context field of the abort
- * IOCB.
- */
- abort_iocb = phba->sli.iocbq_lookup[abort_context];
+ }
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb x%px "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ "with io cmd xri %x abort tag : x%x, "
+ "abort status %x abort code %x\n",
+ cmdiocb, get_job_abtsiotag(phba, cmdiocb),
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ get_wqe_reqtag(cmdiocb) :
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ ulp_status, ulp_word4);
- spin_unlock_irq(&phba->hbalock);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -12291,26 +12315,46 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = NULL;
- IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ LPFC_MBOXQ_t *mbox;
+ u32 ulp_command, ulp_status, ulp_word4, iotag;
+
+ ulp_command = get_job_cmnd(phba, cmdiocb);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+
+ /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
+ * The MBX_REG_LOGIN64 mbox command is freed back to the
+ * mbox_mem_pool here.
+ */
+ if (cmdiocb->context_un.mbox) {
+ mbox = cmdiocb->context_un.mbox;
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ cmdiocb->context_un.mbox = NULL;
+ }
+ }
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0139 Ignoring ELS cmd code x%x completion Data: "
- "x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ "x%x x%x x%x x%px\n",
+ ulp_command, ulp_status, ulp_word4, iotag,
+ cmdiocb->ndlp);
/*
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
*/
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- ndlp = cmdiocb->context_un.ndlp;
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
- } else {
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ else
lpfc_els_free_iocb(phba, cmdiocb);
- }
lpfc_nlp_put(ndlp);
}
@@ -12336,28 +12380,29 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
- IOCB_t *icmd = NULL;
- IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
unsigned long iflags;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ u32 ulp_command = get_job_cmnd(phba, cmdiocb);
+ u16 ulp_context, iotag;
+ bool ia;
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (ulp_command == CMD_ABORT_XRI_WQE ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
return IOCB_ABORTING;
if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
@@ -12367,10 +12412,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
if ((vport->load_flag & FC_UNLOADING) &&
pring->ringno == LPFC_ELS_RING) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
@@ -12382,43 +12427,47 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* This signals the response to set the correct status
* before calling the completion handler
*/
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
- iabt = &abtsiocbp->iocb;
- iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
- iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
- iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- if (pring->ringno == LPFC_ELS_RING)
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ ulp_context = cmdiocb->sli4_xritag;
+ iotag = abtsiocbp->iotag;
} else {
- iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ iotag = cmdiocb->iocb.ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
- ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
- iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ ndlp = cmdiocb->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = cmdiocb->iocb.ulpContext;
}
}
- iabt->ulpLe = 1;
- iabt->ulpClass = icmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocbp->iocb_flag |= LPFC_IO_FOF;
if (phba->link_state < LPFC_LINK_UP ||
(phba->sli_rev == LPFC_SLI_REV4 &&
- phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
- iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
+ (phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = true;
else
- iabt->ulpCommand = CMD_ABORT_XRI_CN;
+ ia = false;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
+ cmdiocb->iocb.ulpClass,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
+
+ abtsiocbp->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
+
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocbp->cmd_flag |= LPFC_IO_FOF;
if (cmpl)
- abtsiocbp->iocb_cmpl = cmpl;
+ abtsiocbp->cmd_cmpl = cmpl;
else
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -12438,14 +12487,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abort_iotag_exit:
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x retval x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag, retval);
-
+ "0339 Abort IO XRI x%x, Original iotag x%x, "
+ "abort tag x%x Cmdjob : x%px Abortjob : x%px "
+ "retval x%x\n",
+ ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
+ retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocbp);
}
@@ -12503,7 +12552,7 @@ static int
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
struct lpfc_vport *vport)
{
- IOCB_t *icmd = NULL;
+ u8 ulp_command;
/* No null ptr vports */
if (!iocbq || iocbq->vport != vport)
@@ -12512,12 +12561,13 @@ lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
/* iocb must be for FCP IO, already exists on the TX cmpl queue,
* can't be premarked as driver aborted, nor be an ABORT iocb itself
*/
- icmd = &iocbq->iocb;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
- (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ ulp_command = get_job_cmnd(vport->phba, iocbq);
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE))
return -EINVAL;
return 0;
@@ -12609,9 +12659,9 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd = NULL;
int sum, i;
unsigned long iflags;
+ u8 ulp_command;
spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
@@ -12619,14 +12669,15 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
if (!iocbq || iocbq->vport != vport)
continue;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
continue;
/* Include counting outstanding aborts */
- icmd = &iocbq->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
+ ulp_command = get_job_cmnd(phba, iocbq);
+ if (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE) {
sum++;
continue;
}
@@ -12641,33 +12692,6 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
}
/**
- * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
- * @phba: Pointer to HBA context object
- * @cmdiocb: Pointer to command iocb object.
- * @wcqe: pointer to the complete wcqe
- *
- * This function is called when an aborted FCP iocb completes. This
- * function is called by the ring event handler with no lock held.
- * This function frees the iocb. It is called for sli-4 adapters.
- **/
-void
-lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *wcqe)
-{
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3017 ABORT_XRI_CN completing on rpi x%x "
- "original iotag x%x, abort cmd iotag x%x "
- "status 0x%x, reason 0x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
- (bf_get(lpfc_wcqe_c_status, wcqe)
- & LPFC_IOCB_STATUS_MASK),
- wcqe->parameter);
- lpfc_sli_release_iocbq(phba, cmdiocb);
-}
-
-/**
* lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
@@ -12682,13 +12706,15 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on rpi x%x "
+ "3096 ABORT_XRI_CX completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->sli4_xritag :
cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4]);
+ get_job_abtsiotag(phba, cmdiocb),
+ cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
+ get_job_word4(phba, rspiocb));
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -12729,7 +12755,6 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
int errcnt = 0, ret_val = 0;
unsigned long iflags;
int i;
- void *fcp_cmpl = NULL;
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH)
@@ -12748,13 +12773,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV3) {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
- fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
} else if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, iocbq);
- fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
}
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
- fcp_cmpl);
+ lpfc_sli_abort_fcp_cmpl);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ret_val != IOCB_SUCCESS)
errcnt++;
@@ -12796,12 +12819,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
struct lpfc_hba *phba = vport->phba;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd;
int sum, i, ret_val;
unsigned long iflags;
struct lpfc_sli_ring *pring_s4 = NULL;
+ u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
+ bool ia;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12846,8 +12870,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
- if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -12863,41 +12887,50 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
- icmd = &iocbq->iocb;
- abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocbq->iocb.un.acxri.abortIoTag =
- iocbq->sli4_xritag;
- else
- abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
- abtsiocbq->iocb.ulpLe = 1;
- abtsiocbq->iocb.ulpClass = icmd->ulpClass;
- abtsiocbq->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = abtsiocbq->iotag;
+ ulp_context = iocbq->sli4_xritag;
+ cqid = lpfc_cmd->hdwq->io_cq_map;
+ } else {
+ iotag = iocbq->iocb.ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = iocbq->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = iocbq->iocb.ulpContext;
+ }
+ }
ndlp = lpfc_cmd->rdata->pnode;
if (lpfc_is_link_up(phba) &&
- (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
- abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
+ !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = false;
else
- abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+ ia = true;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
+ iocbq->iocb.ulpClass, cqid,
+ ia, false);
+
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
+ if (iocbq->cmd_flag & LPFC_IO_FCP)
+ abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->cmd_flag & LPFC_IO_FOF)
+ abtsiocbq->cmd_flag |= LPFC_IO_FOF;
/* Setup callback routine and issue the command. */
- abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
/*
* Indicate the IO is being aborted by the driver and set
* the caller's flag into the aborted IO.
*/
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@@ -12944,9 +12977,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
wait_queue_head_t *pdone_q;
unsigned long iflags;
struct lpfc_io_buf *lpfc_cmd;
+ size_t offset = offsetof(struct lpfc_iocbq, wqe);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
/*
* A time out has occurred for the iocb. If a time out
@@ -12955,26 +12989,27 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
*/
spin_unlock_irqrestore(&phba->hbalock, iflags);
- cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
- cmdiocbq->wait_iocb_cmpl = NULL;
- if (cmdiocbq->iocb_cmpl)
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
+ cmdiocbq->wait_cmd_cmpl = NULL;
+ if (cmdiocbq->cmd_cmpl)
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
else
lpfc_sli_release_iocbq(phba, cmdiocbq);
return;
}
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
- if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
+ /* Copy the contents of the local rspiocb into the caller's buffer. */
+ cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->rsp_iocb && rspiocbq)
+ memcpy((char *)cmdiocbq->rsp_iocb + offset,
+ (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
/* Set the exchange busy flag for task management commands */
- if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
- cur_iocbq);
- if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ cur_iocbq);
+ if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -12993,7 +13028,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
- * This routine grabs the hbalock and then test the iocb_flag to
+ * This routine grabs the hbalock and then test the cmd_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
@@ -13007,7 +13042,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
- ret = piocbq->iocb_flag & flag;
+ ret = piocbq->cmd_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
@@ -13022,14 +13057,14 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. The iocb_cmpl field of the shall be used
+ * iocb to complete. The cmd_cmpl field of the shall be used
* to handle iocbs which time out. If the field is NULL, the
* function shall free the iocbq structure. If more clean up is
* needed, the caller is expected to provide a completion function
* that will provide the needed clean up. If the iocb command is
* not completed within timeout seconds, the function will either
- * free the iocbq structure (if iocb_cmpl == NULL) or execute the
- * completion function set in the iocb_cmpl field and then return
+ * free the iocbq structure (if cmd_cmpl == NULL) or execute the
+ * completion function set in the cmd_cmpl field and then return
* a status of IOCB_TIMEDOUT. The caller should not free the iocb
* resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
@@ -13041,7 +13076,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* This function assumes that the iocb completions occur while
* this function sleep. So, this function cannot be called from
* the thread which process iocb completion for this ring.
- * This function clears the iocb_flag of the iocb object before
+ * This function clears the cmd_flag of the iocb object before
* issuing the iocb and the iocb completion handler sets this
* flag and wakes this thread when the iocb completes.
* The contents of the response iocb will be copied to prspiocbq
@@ -13067,24 +13102,26 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
unsigned long iflags;
bool iocb_completed = true;
- if (phba->sli_rev >= LPFC_SLI_REV4)
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
pring = lpfc_sli4_calc_ring(phba, piocb);
- else
+ } else
pring = &phba->sli.sli3_ring[ring_number];
/*
- * If the caller has provided a response iocbq buffer, then context2
+ * If the caller has provided a response iocbq buffer, then rsp_iocb
* is NULL or its an error.
*/
if (prspiocbq) {
- if (piocb->context2)
+ if (piocb->rsp_iocb)
return IOCB_ERROR;
- piocb->context2 = prspiocbq;
+ piocb->rsp_iocb = prspiocbq;
}
- piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
- piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
+ piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+ piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -13102,7 +13139,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+ if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
/*
* IOCB timed out. Inform the wake iocb wait
@@ -13110,7 +13147,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
*/
iocb_completed = false;
- piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb_completed) {
@@ -13162,10 +13199,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
if (prspiocbq)
- piocb->context2 = NULL;
+ piocb->rsp_iocb = NULL;
piocb->context_un.wait_queue = NULL;
- piocb->iocb_cmpl = NULL;
+ piocb->cmd_cmpl = NULL;
return retval;
}
@@ -13371,6 +13408,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -13421,7 +13459,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
@@ -14100,135 +14143,19 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
- * @phba: pointer to lpfc hba data structure
- * @pIocbIn: pointer to the rspiocbq
- * @pIocbOut: pointer to the cmdiocbq
- * @wcqe: pointer to the complete wcqe
- *
- * This routine transfers the fields of a command iocbq to a response iocbq
- * by copying all the IOCB fields from command iocbq and transferring the
- * completion status information from the complete wcqe.
- **/
-static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
- struct lpfc_iocbq *pIocbIn,
- struct lpfc_iocbq *pIocbOut,
- struct lpfc_wcqe_complete *wcqe)
-{
- int numBdes, i;
- unsigned long iflags;
- uint32_t status, max_response;
- struct lpfc_dmabuf *dmabuf;
- struct ulp_bde64 *bpl, bde;
- size_t offset = offsetof(struct lpfc_iocbq, iocb);
-
- memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
- sizeof(struct lpfc_iocbq) - offset);
- /* Map WCQE parameters into irspiocb parameters */
- status = bf_get(lpfc_wcqe_c_status, wcqe);
- pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
- if (pIocbOut->iocb_flag & LPFC_IO_FCP)
- if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
- pIocbIn->iocb.un.fcpi.fcpi_parm =
- pIocbOut->iocb.un.fcpi.fcpi_parm -
- wcqe->total_data_placed;
- else
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else {
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- switch (pIocbOut->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- bde.tus.w = le32_to_cpu(bpl[1].tus.w);
- max_response = bde.tus.f.bdeSize;
- break;
- case CMD_GEN_REQUEST64_CR:
- max_response = 0;
- if (!pIocbOut->context3)
- break;
- numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
- sizeof(struct ulp_bde64);
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- max_response += bde.tus.f.bdeSize;
- }
- break;
- default:
- max_response = wcqe->total_data_placed;
- break;
- }
- if (max_response < wcqe->total_data_placed)
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
- else
- pIocbIn->iocb.un.genreq64.bdl.bdeSize =
- wcqe->total_data_placed;
- }
-
- /* Convert BG errors for completion status */
- if (status == CQE_STATUS_DI_ERROR) {
- pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-
- if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
- pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
- else
- pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
-
- pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_HI_WATER_MARK_PRESENT_MASK;
- pIocbIn->iocb.unsli3.sli3_bg.bghm =
- wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- /* Pick up HBA exchange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-}
-
-/**
- * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
* @phba: Pointer to HBA context object.
* @irspiocbq: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event and construct
- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
* discovery engine to handle.
*
* Return: Pointer to the receive IOCBQ, NULL otherwise.
**/
static struct lpfc_iocbq *
-lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
- struct lpfc_iocbq *irspiocbq)
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -14240,11 +14167,13 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
@@ -14254,13 +14183,18 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
}
- spin_lock_irqsave(&pring->ring_lock, iflags);
+ memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
+ memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
+
/* Put the iocb back on the txcmplq */
lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- /* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
return irspiocbq;
}
@@ -15061,7 +14995,6 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_sli_ring *pring = cq->pring;
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq irspiocbq;
unsigned long iflags;
/* Check for response status */
@@ -15087,9 +15020,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
@@ -15100,39 +15033,31 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
cmdiocbq->isr_timestamp = cq->isr_timestamp;
#endif
- if (cmdiocbq->iocb_cmpl == NULL) {
- if (cmdiocbq->wqe_cmpl) {
- /* For FCP the flag is cleared in wqe_cmpl */
- if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
- /* Pass the cmd_iocb and the wcqe to the upper layer */
- (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
- return;
+ if (cmdiocbq->cmd_cmpl) {
+ /* For FCP the flag is cleared in cmd_cmpl */
+ if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
+ sizeof(struct lpfc_wcqe_complete));
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- return;
- }
-
- /* Only SLI4 non-IO commands stil use IOCB */
- /* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
-
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
}
-
- /* Pass the cmd_iocb and the rsp state to the upper layer */
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
/**
@@ -16068,7 +15993,6 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
@@ -17990,8 +17914,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
- phba->sli4_hba.max_cfg_param.max_xri, 0);
+ xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@@ -18443,7 +18367,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
- case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
@@ -18464,6 +18387,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
default:
goto drop;
}
@@ -18848,11 +18772,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- struct lpfc_nodelist *ndlp;
-
if (cmd_iocbq) {
- ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
- lpfc_nlp_put(ndlp);
+ lpfc_nlp_put(cmd_iocbq->ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
}
@@ -18860,8 +18781,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3154 BLS ABORT RSP failed, data: x%x/x%x\n",
- rsp_iocbq->iocb.ulpStatus,
- rsp_iocbq->iocb.un.ulpWord[4]);
+ get_job_ulpstatus(phba, rsp_iocbq),
+ get_job_word4(phba, rsp_iocbq));
}
/**
@@ -18903,7 +18824,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
uint32_t sid, fctl;
- IOCB_t *icmd;
+ union lpfc_wqe128 *icmd;
int rc;
if (!lpfc_is_link_up(phba))
@@ -18931,32 +18852,22 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
if (!ctiocb)
return;
+ icmd = &ctiocb->wqe;
+
/* Extract the F_CTL field from FC_HDR */
fctl = sli4_fctl_from_fc_hdr(fc_hdr);
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.bdeSize = 0;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
-
- /* Fill in the rest of iocb fields */
- icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
- icmd->ulpBdeCount = 0;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = lpfc_nlp_get(ndlp);
- if (!ctiocb->context1) {
+ ctiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!ctiocb->ndlp) {
lpfc_sli_release_iocbq(phba, ctiocb);
return;
}
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
+ ctiocb->abort_rctl = FC_RCTL_BA_ACC;
if (fctl & FC_FC_EX_CTX)
/* Exchange responder sent the abort so we
@@ -18976,10 +18887,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
*/
if ((fctl & FC_FC_EX_CTX) &&
(lxri > lpfc_sli4_get_iocb_cnt(phba))) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
/* If BA_ABTS failed to abort a partially assembled receive sequence,
@@ -18987,10 +18900,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* the IOCB for a BA_RJT.
*/
if (aborted == false) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
if (fctl & FC_FC_EX_CTX) {
@@ -18998,31 +18913,41 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
+ bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
}
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
- bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
/* Xmit CT abts response on exchange <xid> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ ctiocb->abort_rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2925 Failed to issue CT ABTS RSP x%x on "
"xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ ctiocb->abort_rctl, oxid,
phba->link_state);
lpfc_nlp_put(ndlp);
- ctiocb->context1 = NULL;
+ ctiocb->ndlp = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -19142,7 +19067,6 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct fc_frame_header *fc_hdr;
uint32_t sid;
uint32_t len, tot_len;
- struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -19155,40 +19079,40 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
- first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->wcqe_cmpl.total_data_placed = 0;
+ bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
first_iocbq->vport = vport;
/* Check FC Header to see what TYPE of frame we are rcv'ing */
if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
- first_iocbq->iocb.un.rcvels.parmRo =
- sli4_did_from_fc_hdr(fc_hdr);
- first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
- } else
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = NO_XRI;
- first_iocbq->iocb.unsli3.rcvsli3.ox_id =
- be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Physical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->phba->vpi_ids[vport->vpi];
- /* put the first buffer into the first IOCBq */
+ bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
+ sli4_did_from_fc_hdr(fc_hdr));
+ }
+
+ bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ NO_XRI);
+ bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ be16_to_cpu(fc_hdr->fh_ox_id));
+
+ /* put the first buffer into the first iocb */
tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
+ first_iocbq->bpl_dmabuf = NULL;
+ /* Keep track of the BDE count */
+ first_iocbq->wcqe_cmpl.word3 = 1;
- first_iocbq->context2 = &seq_dmabuf->dbuf;
- first_iocbq->context3 = NULL;
- first_iocbq->iocb.ulpBdeCount = 1;
if (tot_len > LPFC_DATA_BUF_SIZE)
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
-
- first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+ first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
}
iocbq = first_iocbq;
/*
@@ -19200,30 +19124,25 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
- if (!iocbq->context3) {
- iocbq->context3 = d_buf;
- iocbq->iocb.ulpBdeCount++;
+ if (!iocbq->bpl_dmabuf) {
+ iocbq->bpl_dmabuf = d_buf;
+ iocbq->wcqe_cmpl.word3++;
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- if (len > LPFC_DATA_BUF_SIZE)
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- else
- pbde->tus.f.bdeSize = len;
-
- iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ iocbq->unsol_rcv_len = len;
+ iocbq->wcqe_cmpl.total_data_placed += len;
tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
if (first_iocbq) {
- first_iocbq->iocb.ulpStatus =
- IOSTAT_FCP_RSP_ERROR;
- first_iocbq->iocb.un.ulpWord[4] =
- IOERR_NO_RESOURCES;
+ bf_set(lpfc_wcqe_c_status,
+ &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
+ first_iocbq->wcqe_cmpl.parameter =
+ IOERR_NO_RESOURCES;
}
lpfc_in_buf_free(vport->phba, d_buf);
continue;
@@ -19232,19 +19151,21 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- iocbq->context2 = d_buf;
- iocbq->context3 = NULL;
- iocbq->iocb.ulpBdeCount = 1;
+ iocbq->cmd_dmabuf = d_buf;
+ iocbq->bpl_dmabuf = NULL;
+ iocbq->wcqe_cmpl.word3 = 1;
+
if (len > LPFC_DATA_BUF_SIZE)
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ len;
tot_len += len;
- iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
-
- iocbq->iocb.un.rcvels.remoteID = sid;
+ iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
@@ -19276,16 +19197,18 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
if (!lpfc_complete_unsol_iocb(phba,
phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
+ fc_hdr->fh_type)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+ }
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
- &iocbq->list, list) {
+ &iocbq->list, list) {
list_del_init(&curr_iocb->list);
lpfc_sli_release_iocbq(phba, curr_iocb);
}
@@ -19296,7 +19219,7 @@ static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
@@ -19312,7 +19235,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq = NULL;
- union lpfc_wqe *wqe;
+ union lpfc_wqe128 *pwqe;
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
@@ -19347,34 +19270,46 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
/* copyin the payload */
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
- /* fill in BDE's for command */
- iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
-
- iocbq->context2 = pcmd;
+ iocbq->cmd_dmabuf = pcmd;
iocbq->vport = vport;
- iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
- iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
+ iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->num_bdes = 0;
+
+ pwqe = &iocbq->wqe;
+ /* fill in BDE's for command */
+ pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
+ pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
+ pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
+ pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+
+ pwqe->send_frame.frame_len = frame_len;
+ pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
+ pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
+ pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
+ pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
+ pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
+ pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
+
+ pwqe->generic.wqe_com.word7 = 0;
+ pwqe->generic.wqe_com.word10 = 0;
+
+ bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
+ bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
+ bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
+ pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
+
+ iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
- /*
- * Setup rest of the iocb as though it were a WQE
- * Build the SEND_FRAME WQE
- */
- wqe = (union lpfc_wqe *)&iocbq->iocb;
-
- wqe->send_frame.frame_len = frame_len;
- wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
- wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
- wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
- wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
- wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
- wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
-
- iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
- iocbq->iocb.ulpLe = 1;
- iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
if (rc == IOCB_ERROR)
goto exit;
@@ -19668,7 +19603,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@@ -20311,8 +20246,8 @@ next_priority:
* have been tested so that we can detect when we should
* change the priority level.
*/
- next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
- LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
}
@@ -20654,11 +20589,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
}
lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
out:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return data_length;
}
@@ -20973,7 +20904,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *act_mbx_ndlp = NULL;
LIST_HEAD(mbox_cmd_list);
@@ -20999,8 +20929,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
- /* Put reference count for delayed processing */
+
+ /* This reference is local to this routine. The
+ * reference is removed at routine exit.
+ */
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
}
@@ -21043,12 +20977,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mb->ctx_buf = NULL;
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
@@ -21058,7 +20986,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
lpfc_nlp_put(ndlp);
}
}
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
}
/* Release the ndlp with the cleaned-up active mailbox command */
@@ -21089,10 +21017,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
- struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
struct lpfc_queue *wq;
+ int ret = 0;
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
@@ -21131,44 +21058,33 @@ lpfc_drain_txq(struct lpfc_hba *phba)
txq_cnt);
break;
}
- sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
- if (!sglq) {
- __lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
- break;
- }
txq_cnt--;
- /* The xri and iocb resources secured,
- * attempt to issue request
- */
- piocbq->sli4_lxritag = sglq->sli4_lxritag;
- piocbq->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
- fail_msg = "to convert bpl to sgl";
- else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
- fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(wq, &wqe))
- fail_msg = " - Wq is full";
- else
- lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+ ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
+ if (ret && ret != IOCB_BUSY) {
+ fail_msg = " - Cannot send IO ";
+ piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ }
if (fail_msg) {
+ piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
/* Failed means we can't issue and need to cancel */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2822 IOCB failed %s iotag 0x%x "
- "xri 0x%x\n",
- fail_msg,
- piocbq->iotag, piocbq->sli4_xritag);
+ "xri 0x%x %d flg x%x\n",
+ fail_msg, piocbq->iotag,
+ piocbq->sli4_xritag, ret,
+ piocbq->cmd_flag);
list_add_tail(&piocbq->list, &completions);
fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ if (txq_cnt == 0 || ret == IOCB_BUSY)
+ break;
}
-
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ IOERR_SLI_ABORTED);
return txq_cnt;
}
@@ -21216,14 +21132,14 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
if (cmd == CMD_XMIT_BLS_RSP64_WQE)
return sglq->sli4_xritag;
- numBdes = pwqeq->rsvd2;
+ numBdes = pwqeq->num_bdes;
if (numBdes) {
/* The addrHigh and addrLow fields within the WQE
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- if (pwqeq->context3)
- dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+ if (pwqeq->bpl_dmabuf)
+ dmabuf = pwqeq->bpl_dmabuf;
else
return xritag;
@@ -21317,7 +21233,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
uint32_t ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
- if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+ if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
pring = phba->sli4_hba.nvmels_wq->pring;
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -21348,7 +21264,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
+ if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -21370,12 +21286,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVMET requests */
- if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ if (pwqe->cmd_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
- ctxp = pwqe->context2;
+ ctxp = pwqe->context_un.axchg;
sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag;
@@ -21436,12 +21352,12 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return WQE_NORESOURCE;
/* Indicate the IO is being aborted by the driver. */
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
abtswqe = &abtsiocb->wqe;
memset(abtswqe, 0, sizeof(*abtswqe));
- if (!lpfc_is_link_up(phba))
+ if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
abtswqe->abort_cmd.rsrvd5 = 0;
@@ -21455,15 +21371,15 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_IO_FCP;
- if (cmdiocb->iocb_flag & LPFC_IO_NVME)
- abtsiocb->iocb_flag |= LPFC_IO_NVME;
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
+ abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocb->cmd_flag |= LPFC_IO_FCP;
+ if (cmdiocb->cmd_flag & LPFC_IO_NVME)
+ abtsiocb->cmd_flag |= LPFC_IO_NVME;
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocb->cmd_flag |= LPFC_IO_FOF;
abtsiocb->vport = vport;
- abtsiocb->wqe_cmpl = cmpl;
+ abtsiocb->cmd_cmpl = cmpl;
lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
@@ -21474,7 +21390,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocb);
}
@@ -21836,8 +21752,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
/* MUST zero fields if buffer is reused by another protocol */
lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
@@ -22218,7 +22133,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -22255,9 +22169,12 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
}
exit:
+ /* This is an embedded SLI4 mailbox with an external buffer allocated.
+ * Free the pcmd and then cleanup with the correct routine.
+ */
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
return byte_cnt;
}
@@ -22449,7 +22366,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
return NULL;
}
- tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
GFP_ATOMIC,
&tmp->fcp_cmd_rsp_dma_handle);
@@ -22551,3 +22468,180 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
+
+/**
+ * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
+ * @phba: phba object
+ * @job: job entry of the command to be posted.
+ *
+ * Fill the common fields of the wqe for each of the command.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
+{
+ u8 cmnd;
+ u32 *pcmd;
+ u32 if_type = 0;
+ u32 fip, abort_tag;
+ struct lpfc_nodelist *ndlp = NULL;
+ union lpfc_wqe128 *wqe = &job->wqe;
+ u8 command_type = ELS_COMMAND_NON_FIP;
+
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ /* The fcp commands will set command type */
+ if (job->cmd_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
+ abort_tag = job->iotag;
+ cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
+
+ switch (cmnd) {
+ case CMD_ELS_REQUEST64_WQE:
+ ndlp = job->ndlp;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ pcmd = (u32 *)job->cmd_dmabuf->virt;
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
+ *pcmd == ELS_CMD_EDC ||
+ *pcmd == ELS_CMD_RSCN_XMT ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ job->vport->fc_myDID);
+
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ } else if (pcmd) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
+ }
+
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ break;
+ case CMD_XMIT_ELS_RSP64_WQE:
+ ndlp = job->ndlp;
+
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ if (job->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ if (job->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
+
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ }
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_GEN_REQUEST64_WQE:
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_SEQUENCE64_WQE:
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+
+ wqe->xmit_sequence.rsvd3 = 0;
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_BLS_RSP64_WQE:
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
+ case CMD_ABORT_XRI_WQE: /* abort iotag */
+ case CMD_SEND_FRAME: /* mds loopback */
+ /* cases already formatted for sli4 wqe - no chgs necessary */
+ return;
+ default:
+ dump_stack();
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6207 Invalid command 0x%x\n",
+ cmnd);
+ break;
+ }
+
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 5161ccacea3e..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,7 +35,13 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
-union lpfc_vmid_iocb_tag {
+/* Enumeration to describe the thread lock context. */
+enum lpfc_mbox_ctx {
+ MBOX_THD_UNLOCKED,
+ MBOX_THD_LOCKED
+};
+
+union lpfc_vmid_tag {
uint32_t app_id;
uint8_t cs_ctl_vmid;
struct lpfc_vmid_context *vmid_context; /* UVEM context information */
@@ -69,16 +75,25 @@ struct lpfc_iocbq {
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
- struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
union lpfc_wqe128 wqe; /* SLI-4 */
IOCB_t iocb; /* SLI-3 */
+ struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
- uint8_t rsvd2;
- uint8_t priority; /* OAS priority */
- uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint32_t iocb_flag;
+ u32 unsol_rcv_len; /* Receive len in usol path */
+
+ /* Pack the u8's together and make them module-4. */
+ u8 num_bdes; /* Number of BDEs */
+ u8 abort_bls; /* ABTS by initiator or responder */
+ u8 abort_rctl; /* ACC or RJT flag */
+ u8 priority; /* OAS priority */
+ u8 retry; /* retry counter for IOCB cmd - if needed */
+ u8 rsvd1; /* Pad for u32 */
+ u8 rsvd2; /* Pad for u32 */
+ u8 rsvd3; /* Pad for u32 */
+
+ u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -111,27 +126,29 @@ struct lpfc_iocbq {
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
- void *context1; /* caller context information */
- void *context2; /* caller context information */
- void *context3; /* caller context information */
+ struct lpfc_dmabuf *cmd_dmabuf;
+ struct lpfc_dmabuf *rsp_dmabuf;
+ struct lpfc_dmabuf *bpl_dmabuf;
uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
- struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
- struct lpfc_nodelist *ndlp;
struct lpfc_node_rrq *rrq;
+ struct nvmefc_ls_req *nvme_lsreq;
+ struct lpfc_async_xchg_ctx *axchg;
+ struct bsg_job_data *dd_data;
} context_un;
- union lpfc_vmid_iocb_tag vmid_tag;
- void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *);
+ struct lpfc_io_buf *io_buf;
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfc_nodelist *ndlp;
+ union lpfc_vmid_tag vmid_tag;
+ void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -338,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5962cf508842..cbb1aa1cf025 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -489,7 +489,7 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
- uint16_t irq;
+ int irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
@@ -611,6 +611,8 @@ struct lpfc_vector_map_info {
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+#define LPFC_IRQ_EMPTY 0xffffffff
+
/* Multi-XRI pool */
#define XRI_BATCH 8
@@ -981,6 +983,9 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_nd_MASK 0x1
uint8_t flash_id;
uint8_t asic_rev;
+ uint16_t fawwpn_flag; /* FA-WWPN support state */
+#define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */
+#define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a4d3b24fbce..192d5630a44d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.3"
+#define LPFC_DRIVER_VERSION "14.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644
index 000000000000..ed1d7f7b88a3
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -0,0 +1,286 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+ enum dma_data_direction iodir,
+ struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else if (vport->phba->cfg_vmid_app_header)
+ tag->app_id = vmp->un.app_id;
+
+ if (iodir == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else if (iodir == DMA_FROM_DEVICE)
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = -EPERM, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) &&
+ !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+ (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC);
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else if (vport->phba->cfg_vmid_app_header)
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index da9a1f72d938..4d171f5c213f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -135,12 +135,14 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
/*
- * Grab buffer pointer and clear context1 so we can use
- * lpfc_sli_issue_box_wait
+ * Wait for the read_sparams mailbox to complete. Driver needs
+ * this per vport to start the FDISC. If the mailbox fails,
+ * just cleanup and return an error unless the failure is a
+ * mailbox timeout. For MBX_TIMEOUT, allow the default
+ * mbox completion handler to take care of the cleanup. This
+ * is safe as the mailbox command isn't one that triggers
+ * another mailbox.
*/
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- pmb->ctx_buf = NULL;
-
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
@@ -148,34 +150,29 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1830 Signal aborted mbxCmd x%x\n",
mb->mbxCommand);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EINTR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
mb->mbxCommand, mb->mbxStatus, rc);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EIO;
}
}
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
-
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return 0;
}
@@ -486,22 +483,67 @@ error_out:
}
static int
+lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !ndlp->logo_waitq) {
+ ndlp->logo_waitq = &waitq;
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ rc = lpfc_issue_els_npiv_logo(vport, ndlp);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
+
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ goto logo_cmpl;
+ /* LOGO wait failed. Correct status. */
+ rc = -EINTR;
+ } else {
+ rc = -EIO;
+ }
+
+ /* Error - clean up node flags. */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+
+ logo_cmpl:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1824 Issue LOGO completes with status %d\n",
+ rc);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->logo_waitq = NULL;
+ spin_unlock_irq(&ndlp->lock);
+ return rc;
+}
+
+static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
- long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Can't disable during an outstanding delete. */
+ if (vport->load_flag & FC_UNLOADING)
+ return 0;
+
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && phba->link_state >= LPFC_LINK_UP) {
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
- }
+ if (ndlp && phba->link_state >= LPFC_LINK_UP)
+ (void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@@ -600,7 +642,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- long timeout;
+ int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -665,15 +707,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
- while (vport->ct_flags && timeout)
- timeout = schedule_timeout(timeout);
- else
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+ if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric\n");
+ "delete objects on fabric, "
+ "rc %d\n", rc);
+ }
}
/*
@@ -688,11 +729,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
+
+ rc = lpfc_send_npiv_logo(vport, ndlp);
+ if (rc)
+ goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))
@@ -769,74 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
-
-/**
- * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
- * @vport: Pointer to vport object.
- *
- * This function resets the statistical data for the vport. This function
- * is called with the host_lock held
- **/
-void
-lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->lat_data)
- memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
- sizeof(struct lpfc_scsicmd_bkt));
- }
-}
-
-
-/**
- * lpfc_alloc_bucket - Allocate data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * This function allocates data buffer required for all the FC
- * nodes of the vport to collect statistical data.
- **/
-void
-lpfc_alloc_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
-
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_ATOMIC);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0287 lpfc_alloc_bucket failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
- }
-}
-
-/**
- * lpfc_free_bucket - Free data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * Th function frees statistical data buffer of all the FC
- * nodes of the vport.
- **/
-void
-lpfc_free_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
- }
-}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index f4b8528dd2e7..fa60c146c169 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
-void lpfc_vport_reset_stat_data(struct lpfc_vport *);
-void lpfc_alloc_bucket(struct lpfc_vport *);
-void lpfc_free_bucket(struct lpfc_vport *);
-
#endif /* H_LPFC_VPORT */