aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bfa/bfa_svc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bfa/bfa_svc.c')
-rw-r--r--drivers/scsi/bfa/bfa_svc.c827
1 files changed, 785 insertions, 42 deletions
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 2e856e6710f7..299c1c889b33 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -67,6 +67,9 @@ enum bfa_fcport_sm_event {
BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+ BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
+ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
+ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
};
/*
@@ -197,6 +200,10 @@ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
@@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
+ {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
+ {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
};
@@ -440,9 +449,11 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
- INIT_LIST_HEAD(&mod->fcxp_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_req_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
- INIT_LIST_HEAD(&mod->fcxp_unused_q);
+ INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
mod->fcxp_list = fcxp;
@@ -450,7 +461,14 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp->fcxp_mod = mod;
fcxp->fcxp_tag = i;
- list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+ if (i < (mod->num_fcxps / 2)) {
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ fcxp->req_rsp = BFA_TRUE;
+ } else {
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+ fcxp->req_rsp = BFA_FALSE;
+ }
+
bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
fcxp->reqq_waiting = BFA_FALSE;
@@ -514,7 +532,8 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (!cfg->drvcfg.min_cfg)
mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
- INIT_LIST_HEAD(&mod->wait_q);
+ INIT_LIST_HEAD(&mod->req_wait_q);
+ INIT_LIST_HEAD(&mod->rsp_wait_q);
claim_fcxps_mem(mod);
}
@@ -542,7 +561,8 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
struct list_head *qe, *qen;
/* Enqueue unused fcxp resources to free_q */
- list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+ list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
+ list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
@@ -559,11 +579,14 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
}
static struct bfa_fcxp_s *
-bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp;
- bfa_q_deq(&fm->fcxp_free_q, &fcxp);
+ if (req)
+ bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
+ else
+ bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
if (fcxp)
list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
@@ -642,7 +665,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
struct bfa_fcxp_wqe_s *wqe;
- bfa_q_deq(&mod->wait_q, &wqe);
+ if (fcxp->req_rsp)
+ bfa_q_deq(&mod->req_wait_q, &wqe);
+ else
+ bfa_q_deq(&mod->rsp_wait_q, &wqe);
+
if (wqe) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
@@ -657,7 +684,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
list_del(&fcxp->qe);
- list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+
+ if (fcxp->req_rsp)
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ else
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
}
static void
@@ -900,21 +931,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
* Address (given the sge index).
* @param[in] get_rsp_sglen function ptr to be called to get a response SG
* len (given the sge index).
+ * @param[in] req Allocated FCXP is used to send req or rsp?
+ * request - BFA_TRUE, response - BFA_FALSE
*
* @return FCXP instance. NULL on failure.
*/
struct bfa_fcxp_s *
-bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
- int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
- bfa_fcxp_get_sglen_t req_sglen_cbfn,
- bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
- bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp = NULL;
WARN_ON(bfa == NULL);
- fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
+ fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
if (fcxp == NULL)
return NULL;
@@ -1071,17 +1104,20 @@ bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
}
void
-bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
void *caller, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
- bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- WARN_ON(!list_empty(&mod->fcxp_free_q));
+ if (req)
+ WARN_ON(!list_empty(&mod->fcxp_req_free_q));
+ else
+ WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
wqe->alloc_cbfn = alloc_cbfn;
wqe->alloc_cbarg = alloc_cbarg;
@@ -1094,7 +1130,10 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
wqe->rsp_sga_cbfn = rsp_sga_cbfn;
wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
- list_add_tail(&wqe->qe, &mod->wait_q);
+ if (req)
+ list_add_tail(&wqe->qe, &mod->req_wait_q);
+ else
+ list_add_tail(&wqe->qe, &mod->rsp_wait_q);
}
void
@@ -1102,7 +1141,8 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
+ WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
+ !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
list_del(&wqe->qe);
}
@@ -1153,8 +1193,13 @@ bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
int i;
for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
- bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
- list_add_tail(qe, &mod->fcxp_unused_q);
+ if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
+ bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_req_unused_q);
+ } else {
+ bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_rsp_unused_q);
+ }
}
}
@@ -1208,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
* Just ignore
*/
break;
+ case BFA_LPS_SM_SET_N2N_PID:
+ /*
+ * When topology is set to loop, bfa_lps_set_n2n_pid() sends
+ * this event. Ignore this event.
+ */
+ break;
default:
bfa_sm_fault(lps->bfa, event);
@@ -1225,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_FWRSP:
+ case BFA_LPS_SM_OFFLINE:
if (lps->status == BFA_STATUS_OK) {
bfa_sm_set_state(lps, bfa_lps_sm_online);
if (lps->fdisc)
@@ -1253,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
bfa_lps_login_comp(lps);
break;
- case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@@ -1404,11 +1455,11 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_FWRSP:
+ case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_lps_logout_comp(lps);
break;
- case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@@ -1786,6 +1837,8 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+ else
+ bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/*
@@ -2131,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2187,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2212,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
bfa_trc(fcport->bfa,
- pevent->link_state.vc_fcf.fcf.fipenabled);
+ pevent->link_state.attr.vc_fcf.fcf.fipenabled);
bfa_trc(fcport->bfa,
- pevent->link_state.vc_fcf.fcf.fipfailed);
+ pevent->link_state.attr.vc_fcf.fcf.fipfailed);
- if (pevent->link_state.vc_fcf.fcf.fipfailed)
+ if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_FIP_FCF_DISC, 0,
"FIP FCF Discovery Failed");
@@ -2273,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2366,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
}
break;
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2411,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
bfa_reqq_wcancel(&fcport->reqq_wait);
break;
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2562,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
+ case BFA_FCPORT_SM_DPORTENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
+ break;
+
default:
bfa_sm_fault(fcport->bfa, event);
}
@@ -2642,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
}
}
+static void
+bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_DPORTENABLE:
+ case BFA_FCPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_START:
+ /*
+ * Ignore event for a port that is dport
+ */
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ break;
+
+ case BFA_FCPORT_SM_DPORTDISABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_DPORTENABLE:
+ case BFA_FCPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_START:
+ /*
+ * Ignore event for a port as there is FAA misconfig
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ else
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
/*
* Link state is down
*/
@@ -2905,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
*/
do_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_dma_ready = BFA_FALSE;
/*
* initialize and set default configuration
@@ -2915,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
port_cfg->maxfrsize = 0;
port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+ port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
+ port_cfg->qos_bw.med = BFA_QOS_BW_MED;
+ port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
INIT_LIST_HEAD(&fcport->stats_pending_q);
INIT_LIST_HEAD(&fcport->statsclr_pending_q);
@@ -2958,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa)
bfa_trunk_iocdisable(bfa);
}
+/*
+ * Update loop info in fcport for SCN online
+ */
+static void
+bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
+ struct bfa_fcport_loop_info_s *loop_info)
+{
+ fcport->myalpa = loop_info->myalpa;
+ fcport->alpabm_valid =
+ loop_info->alpabm_val;
+ memcpy(fcport->alpabm.alpa_bm,
+ loop_info->alpabm.alpa_bm,
+ sizeof(struct fc_alpabm_s));
+}
+
static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
{
@@ -2967,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
fcport->speed = pevent->link_state.speed;
fcport->topology = pevent->link_state.topology;
- if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
- fcport->myalpa = 0;
+ if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
+ bfa_fcport_update_loop_info(fcport,
+ &pevent->link_state.attr.loop_info);
+ return;
+ }
/* QoS Details */
fcport->qos_attr = pevent->link_state.qos_attr;
- fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
+ fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
/*
* update trunk state if applicable
@@ -2981,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
trunk->attr.state = BFA_TRUNK_DISABLED;
/* update FCoE specific */
- fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
+ fcport->fcoe_vlan =
+ be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
bfa_trc(fcport->bfa, fcport->speed);
bfa_trc(fcport->bfa, fcport->topology);
@@ -3415,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
case BFI_FCPORT_I2H_ENABLE_RSP:
if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
+ fcport->stats_dma_ready = BFA_TRUE;
if (fcport->use_flash_cfg) {
fcport->cfg = i2hmsg.penable_rsp->port_cfg;
fcport->cfg.maxfrsize =
@@ -3430,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
else
fcport->trunk.attr.state =
BFA_TRUNK_DISABLED;
+ fcport->qos_attr.qos_bw =
+ i2hmsg.penable_rsp->port_cfg.qos_bw;
fcport->use_flash_cfg = BFA_FALSE;
}
@@ -3438,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
else
fcport->qos_attr.state = BFA_QOS_DISABLED;
+ fcport->qos_attr.qos_bw_op =
+ i2hmsg.penable_rsp->port_cfg.qos_bw;
+
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
}
break;
@@ -3450,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
case BFI_FCPORT_I2H_EVENT:
if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
- else
- bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
+ else {
+ if (i2hmsg.event->link_state.linkstate_rsn ==
+ BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
+ bfa_sm_send_event(fcport,
+ BFA_FCPORT_SM_FAA_MISCONFIG);
+ else
+ bfa_sm_send_event(fcport,
+ BFA_FCPORT_SM_LINKDOWN);
+ }
+ fcport->qos_attr.qos_bw_op =
+ i2hmsg.event->link_state.qos_attr.qos_bw_op;
break;
case BFI_FCPORT_I2H_TRUNK_SCN:
@@ -3571,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
if (fcport->cfg.trunked == BFA_TRUE)
return BFA_STATUS_TRUNK_ENABLED;
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (speed == BFA_PORT_SPEED_16GBPS))
+ return BFA_STATUS_UNSUPP_SPEED;
if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
bfa_trc(bfa, fcport->speed_sup);
return BFA_STATUS_UNSUPP_SPEED;
@@ -3625,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
switch (topology) {
case BFA_PORT_TOPOLOGY_P2P:
+ break;
+
case BFA_PORT_TOPOLOGY_LOOP:
+ if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
+ (fcport->qos_attr.state != BFA_QOS_DISABLED))
+ return BFA_STATUS_ERROR_QOS_ENABLED;
+ if (fcport->cfg.ratelimit != BFA_FALSE)
+ return BFA_STATUS_ERROR_TRL_ENABLED;
+ if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
+ (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
+ return BFA_STATUS_ERROR_TRUNK_ENABLED;
+ if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
+ (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
+ return BFA_STATUS_UNSUPP_SPEED;
+ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
+ return BFA_STATUS_LOOP_UNSUPP_MEZZ;
+ if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
+ return BFA_STATUS_DPORT_ERR;
+ break;
+
case BFA_PORT_TOPOLOGY_AUTO:
break;
@@ -3648,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa)
return fcport->topology;
}
+/**
+ * Get config topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.topology;
+}
+
bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
{
@@ -3723,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
u8
bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
{
- struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
+ return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
- return fcport->cfg.rx_bbcredit;
+ else
+ return 0;
}
void
@@ -3812,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (bfa_ioc_is_disabled(&bfa->ioc))
- return BFA_STATUS_IOC_DISABLED;
+ if (!bfa_iocfc_is_operational(bfa) ||
+ !fcport->stats_dma_ready)
+ return BFA_STATUS_IOC_NON_OP;
if (!list_empty(&fcport->statsclr_pending_q))
return BFA_STATUS_DEVBUSY;
@@ -3838,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ if (!bfa_iocfc_is_operational(bfa) ||
+ !fcport->stats_dma_ready)
+ return BFA_STATUS_IOC_NON_OP;
+
if (!list_empty(&fcport->stats_pending_q))
return BFA_STATUS_DEVBUSY;
@@ -3867,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
}
bfa_boolean_t
+bfa_fcport_is_dport(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ BFA_PORT_ST_DPORT);
+}
+
+bfa_status_t
+bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+
+ bfa_trc(bfa, ioc_type);
+
+ if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
+ (qos_bw->low > qos_bw->high))
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((ioc_type == BFA_IOC_TYPE_FC) &&
+ (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
+ fcport->cfg.qos_bw = *qos_bw;
+
+ return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
@@ -3943,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
return fcport->cfg.trunked;
}
+void
+bfa_fcport_dportenable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
+ bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
+}
+
+void
+bfa_fcport_dportdisable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
+ bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
+}
+
/*
* Rport State machine functions
*/
@@ -4237,6 +4531,10 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_rport_offline_cb(rp);
+ break;
+
default:
bfa_stats(rp, sm_off_unexp);
bfa_sm_fault(rp->bfa, event);
@@ -4353,6 +4651,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_offp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_rport_offline_cb(rp);
break;
default:
@@ -4664,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
break;
+ case BFI_RPORT_I2H_LIP_SCN_ONLINE:
+ bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
+ &msg.lip_scn->loop_info);
+ bfa_cb_rport_scn_online(bfa);
+ break;
+
+ case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
+ bfa_cb_rport_scn_offline(bfa);
+ break;
+
+ case BFI_RPORT_I2H_NO_DEV:
+ rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
+ bfa_cb_rport_scn_no_dev(rp->rport_drv);
+ break;
+
default:
bfa_trc(bfa, m->mhdr.msg_id);
WARN_ON(1);
@@ -4731,8 +5045,10 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
WARN_ON(speed == 0);
WARN_ON(speed == BFA_PORT_SPEED_AUTO);
- rport->rport_info.speed = speed;
- bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+ if (rport) {
+ rport->rport_info.speed = speed;
+ bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+ }
}
/* Set Rport LUN Mask */
@@ -5303,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
}
/*
+ * Dport forward declaration
+ */
+
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+ BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
+ BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
+ BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
+ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
+ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
+};
+
+static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_qresume(void *cbarg);
+static void bfa_dport_req_comp(struct bfa_dport_s *dport,
+ bfi_diag_dport_rsp_t *msg);
+
+/*
* BFA fcdiag module
*/
#define BFA_DIAG_QTEST_TOV 1000 /* msec */
@@ -5332,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
fcdiag->bfa = bfa;
fcdiag->trcmod = bfa->trcmod;
/* The common DIAG attach bfa_diag_attach() will do all memory claim */
+ dport->bfa = bfa;
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
+ dport->cbfn = NULL;
+ dport->cbarg = NULL;
}
static void
bfa_fcdiag_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
bfa_trc(fcdiag, fcdiag->lb.lock);
if (fcdiag->lb.lock) {
fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
@@ -5348,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa)
fcdiag->lb.lock = 0;
bfa_fcdiag_set_busy_status(fcdiag);
}
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
}
static void
@@ -5532,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
case BFI_DIAG_I2H_QTEST:
bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
break;
+ case BFI_DIAG_I2H_DPORT:
+ bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
+ break;
default:
bfa_trc(fcdiag, msg->mhdr.msg_id);
WARN_ON(1);
@@ -5601,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
}
}
+ /*
+ * For CT2, 1G is not supported
+ */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
/* For Mezz card, port speed entered needs to be checked */
if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
- if ((speed == BFA_PORT_SPEED_1GBPS) &&
- (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
- return BFA_STATUS_UNSUPP_SPEED;
if (!(speed == BFA_PORT_SPEED_1GBPS ||
speed == BFA_PORT_SPEED_2GBPS ||
speed == BFA_PORT_SPEED_4GBPS ||
@@ -5719,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
}
+
+/*
+ * D-port
+ */
+static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
+ enum bfi_dport_req req);
+static void
+bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
+{
+ if (dport->cbfn != NULL) {
+ dport->cbfn(dport->cbarg, bfa_status);
+ dport->cbfn = NULL;
+ dport->cbarg = NULL;
+ }
+}
+
+static void
+bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_ENABLE:
+ bfa_fcport_dportenable(dport->bfa);
+ if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+ else
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
+ break;
+
+ case BFA_DPORT_SM_DISABLE:
+ /* Already disabled */
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ /* ignore */
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+ bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_FWRSP:
+ bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_ENABLE:
+ /* Already enabled */
+ break;
+
+ case BFA_DPORT_SM_DISABLE:
+ bfa_fcport_dportdisable(dport->bfa);
+ if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+ else
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+ bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_FWRSP:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+
+static bfa_boolean_t
+bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
+{
+ struct bfi_diag_dport_req_s *m;
+
+ /*
+ * Increment message tag before queue check, so that responses to old
+ * requests are discarded.
+ */
+ dport->msgtag++;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
+ if (!m) {
+ bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
+ bfa_fn_lpu(dport->bfa));
+ m->req = req;
+ m->msgtag = dport->msgtag;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
+
+ return BFA_TRUE;
+}
+
+static void
+bfa_dport_qresume(void *cbarg)
+{
+ struct bfa_dport_s *dport = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
+}
+
+static void
+bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
+{
+ bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+ bfa_cb_fcdiag_dport(dport, msg->status);
+}
+
+/*
+ * Dport enable
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ /*
+ * Dport is not support in MEZZ card
+ */
+ if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+ }
+
+ /*
+ * Check to see if IOC is down
+ */
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ /*
+ * Check if port mode is FC port
+ */
+ if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
+ bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
+ return BFA_STATUS_CMD_NOTSUPP_CNA;
+ }
+
+ /*
+ * Check if port is in LOOP mode
+ */
+ if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
+ (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_TOPOLOGY_LOOP;
+ }
+
+ /*
+ * Check if port is TRUNK mode
+ */
+ if (bfa_fcport_is_trunk_enabled(bfa)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_ERROR_TRUNK_ENABLED;
+ }
+
+ /*
+ * Check to see if port is disable or in dport state
+ */
+ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+ (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ /*
+ * Check if dport is already enabled
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_ENABLED;
+ }
+
+ dport->cbfn = cbfn;
+ dport->cbarg = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Dport disable
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ /*
+ * Check to see if port is disable or in dport state
+ */
+ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+ (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
+ return BFA_STATUS_DEVBUSY;
+
+ /*
+ * Check if dport is already disabled
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_DISABLED;
+ }
+
+ dport->cbfn = cbfn;
+ dport->cbarg = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get D-port state
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+
+bfa_status_t
+bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
+ *state = BFA_DPORT_ST_ENABLED;
+ else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
+ *state = BFA_DPORT_ST_ENABLING;
+ else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
+ *state = BFA_DPORT_ST_DISABLED;
+ else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
+ *state = BFA_DPORT_ST_DISABLING;
+ else {
+ bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}