diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 1251 |
1 files changed, 712 insertions, 539 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 9dc9afe1c255..f7cfac0da9b6 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * - * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -36,10 +36,6 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> -#include <linux/nvme.h> -#include <linux/nvme-fc-driver.h> -#include <linux/nvme-fc.h> - #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -50,29 +46,25 @@ #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" -#include "lpfc_nvmet.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, - struct lpfc_nvmet_rcv_ctx *, + struct lpfc_async_xchg_ctx *, dma_addr_t rspbuf, uint16_t rspsize); static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, - struct lpfc_nvmet_rcv_ctx *); + struct lpfc_async_xchg_ctx *); static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, - struct lpfc_nvmet_rcv_ctx *, + struct lpfc_async_xchg_ctx *, uint32_t, uint16_t); static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, - struct lpfc_nvmet_rcv_ctx *, + struct lpfc_async_xchg_ctx *, uint32_t, uint16_t); -static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, - struct lpfc_nvmet_rcv_ctx *, - uint32_t, uint16_t); static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_nvmet_rcv_ctx *); + struct lpfc_async_xchg_ctx *); static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); @@ -113,7 +105,7 @@ lpfc_nvmet_cmd_template(void) /* Word 9 - reqtag, rcvoxid is variable */ /* Word 10 - wqes, xc is variable */ - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); @@ -161,7 +153,7 @@ lpfc_nvmet_cmd_template(void) /* Word 10 - xc is variable */ bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); @@ -203,7 +195,7 @@ lpfc_nvmet_cmd_template(void) /* Word 10 wqes, xc is variable */ bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); - bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); @@ -221,10 +213,10 @@ lpfc_nvmet_cmd_template(void) } #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) -static struct lpfc_nvmet_rcv_ctx * +static struct lpfc_async_xchg_ctx * lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; unsigned long iflag; bool found = false; @@ -243,10 +235,10 @@ lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) return NULL; } -static struct lpfc_nvmet_rcv_ctx * +static struct lpfc_async_xchg_ctx * lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; unsigned long iflag; bool found = false; @@ -267,7 +259,8 @@ lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) #endif static void -lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) +lpfc_nvmet_defer_release(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp) { lockdep_assert_held(&ctxp->ctxlock); @@ -275,10 +268,10 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) "6313 NVMET Defer ctx release oxid x%x flg x%x\n", ctxp->oxid, ctxp->flag); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) + if (ctxp->flag & LPFC_NVME_CTX_RLS) return; - ctxp->flag |= LPFC_NVMET_CTX_RLS; + ctxp->flag |= LPFC_NVME_CTX_RLS; spin_lock(&phba->sli4_hba.t_active_list_lock); list_del(&ctxp->list); spin_unlock(&phba->sli4_hba.t_active_list_lock); @@ -288,40 +281,79 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) } /** - * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response + * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the + * transmission of an NVME LS response. * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no - * lock held. This function is the completion handler for NVME LS commands - * The function frees memory resources used for the NVME commands. + * lock held. The function frees memory resources used for the command + * used to send the NVME LS RSP. **/ -static void -lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) +void +__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) { - struct lpfc_nvmet_tgtport *tgtp; - struct nvmefc_tgt_ls_req *rsp; - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; uint32_t status, result; - status = bf_get(lpfc_wcqe_c_status, wcqe); + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; result = wcqe->parameter; - ctxp = cmdwqe->context2; - if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6410 NVMET LS cmpl state mismatch IO x%x: " + if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6410 NVMEx LS cmpl state mismatch IO x%x: " "%d %d\n", - ctxp->oxid, ctxp->state, ctxp->entry_cnt); + axchg->oxid, axchg->state, axchg->entry_cnt); } + lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n", + axchg->oxid, status, result); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n", + status, result, axchg->oxid); + + lpfc_nlp_put(cmdwqe->ndlp); + cmdwqe->context_un.axchg = NULL; + cmdwqe->bpl_dmabuf = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + ls_rsp->done(ls_rsp); + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n", + status, axchg->oxid); + kfree(axchg); +} + +/** + * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME LS commands + * The function updates any states and statistics, then calls the + * generic completion handler to free resources. + **/ +static void +lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + if (!phba->targetport) - goto out; + goto finish; - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + result = wcqe->parameter; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (tgtp) { if (status) { atomic_inc(&tgtp->xmt_ls_rsp_error); @@ -334,29 +366,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } } -out: - rsp = &ctxp->ctx.ls_req; - - lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", - ctxp->oxid, status, result); - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", - status, result, ctxp->oxid); - - lpfc_nlp_put(cmdwqe->context1); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; - lpfc_sli_release_iocbq(phba, cmdwqe); - rsp->done(rsp); - kfree(ctxp); +finish: + __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe); } /** * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context * @phba: HBA buffer is associated with - * @ctxp: context to clean up - * @mp: Buffer to free + * @ctx_buf: ctx buffer context * * Description: Frees the given DMA buffer in the appropriate way given by * reposting it to its associated RQ so it can be reused. @@ -369,7 +386,7 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct rqb_dmabuf *nvmebuf; @@ -378,8 +395,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) int cpu; unsigned long iflag; - if (ctxp->state == LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + if (ctxp->state == LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } @@ -390,8 +407,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) /* check if freed in another path whilst acquiring lock */ if (nvmebuf) { ctxp->rqb_buffer = NULL; - if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { - ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ; + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { + ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); @@ -404,7 +421,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } } - ctxp->state = LPFC_NVMET_STE_FREE; + ctxp->state = LPFC_NVME_STE_FREE; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); if (phba->sli4_hba.nvmet_io_wait_cnt) { @@ -421,14 +438,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) size = nvmebuf->bytes_recv; sid = sli4_sid_from_fc_hdr(fc_hdr); - ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; ctxp->wqeq = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; ctxp->oxid = oxid; ctxp->sid = sid; - ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->state = LPFC_NVME_STE_RCV; ctxp->entry_cnt = 1; ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; @@ -453,12 +470,12 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) /* Indicate that a replacement buffer has been posted */ spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ; + ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6181 Unable to queue deferred work " "for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", @@ -495,7 +512,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS static void lpfc_nvmet_ktime(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp) + struct lpfc_async_xchg_ctx *ctxp) { uint64_t seg1, seg2, seg3, seg4, seg5; uint64_t seg6, seg7, seg8, seg9, seg10; @@ -692,7 +709,7 @@ out: * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME FCP commands @@ -700,20 +717,21 @@ out: **/ static void lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_nvmet_tgtport *tgtp; struct nvmefc_tgt_fcp_req *rsp; - struct lpfc_nvmet_rcv_ctx *ctxp; - uint32_t status, result, op, start_clean, logerr; + struct lpfc_async_xchg_ctx *ctxp; + uint32_t status, result, op, logerr; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint32_t id; + int id; #endif - ctxp = cmdwqe->context2; - ctxp->flag &= ~LPFC_NVMET_IO_INP; + ctxp = cmdwqe->context_un.axchg; + ctxp->flag &= ~LPFC_NVME_IO_INP; - rsp = &ctxp->ctx.fcp_req; + rsp = &ctxp->hdlrctx.fcp_req; op = rsp->op; status = bf_get(lpfc_wcqe_c_status, wcqe); @@ -740,13 +758,13 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* pick up SLI4 exhange busy condition */ if (bf_get(lpfc_wcqe_c_xb, wcqe)) { - ctxp->flag |= LPFC_NVMET_XBUSY; + ctxp->flag |= LPFC_NVME_XBUSY; logerr |= LOG_NVME_ABTS; if (tgtp) atomic_inc(&tgtp->xmt_fcp_rsp_xb_set); } else { - ctxp->flag &= ~LPFC_NVMET_XBUSY; + ctxp->flag &= ~LPFC_NVME_XBUSY; } lpfc_printf_log(phba, KERN_INFO, logerr, @@ -768,7 +786,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if ((op == NVMET_FCOP_READDATA_RSP) || (op == NVMET_FCOP_RSP)) { /* Sanity check */ - ctxp->state = LPFC_NVMET_STE_DONE; + ctxp->state = LPFC_NVME_STE_DONE; ctxp->entry_cnt++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -802,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ } else { ctxp->entry_cnt++; - start_clean = offsetof(struct lpfc_iocbq, iocb_flag); - memset(((char *)cmdwqe) + start_clean, 0, - (sizeof(struct lpfc_iocbq) - start_clean)); + memset_startat(cmdwqe, 0, cmd_flag); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { ctxp->ts_isr_data = cmdwqe->isr_timestamp; @@ -814,31 +830,44 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, rsp->done(rsp); } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { id = raw_smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) { - if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6704 CPU Check cmdcmpl: " - "cpu %d expect %d\n", - id, ctxp->cpu); - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++; - } + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6704 CPU Check cmdcmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); } #endif } -static int -lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, - struct nvmefc_tgt_ls_req *rsp) +/** + * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit + * an NVME LS rsp for a prior NVME LS request that was received. + * @axchg: pointer to exchange context for the NVME LS request the response + * is for. + * @ls_rsp: pointer to the transport LS RSP that is to be sent + * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done + * + * This routine is used to format and send a WQE to transmit a NVME LS + * Response. The response is for a prior NVME LS request that was + * received and posted to the transport. + * + * Returns: + * 0 : if response successfully transmit + * non-zero : if response failed to transmit, of the form -Exxx. + **/ +int +__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, + struct nvmefc_ls_rsp *ls_rsp, + void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe)) { - struct lpfc_nvmet_rcv_ctx *ctxp = - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); - struct lpfc_hba *phba = ctxp->phba; - struct hbq_dmabuf *nvmebuf = - (struct hbq_dmabuf *)ctxp->rqb_buffer; + struct lpfc_hba *phba = axchg->phba; + struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; struct lpfc_iocbq *nvmewqeq; - struct lpfc_nvmet_tgtport *nvmep = tgtport->private; struct lpfc_dmabuf dmabuf; struct ulp_bde64 bpl; int rc; @@ -846,76 +875,134 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, if (phba->pport->load_flag & FC_UNLOADING) return -ENODEV; - if (phba->pport->load_flag & FC_UNLOADING) - return -ENODEV; - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, - "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); + "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); - if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || - (ctxp->entry_cnt != 1)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6412 NVMET LS rsp state mismatch " + if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6412 NVMEx LS rsp state mismatch " "oxid x%x: %d %d\n", - ctxp->oxid, ctxp->state, ctxp->entry_cnt); + axchg->oxid, axchg->state, axchg->entry_cnt); + return -EALREADY; } - ctxp->state = LPFC_NVMET_STE_LS_RSP; - ctxp->entry_cnt++; + axchg->state = LPFC_NVME_STE_LS_RSP; + axchg->entry_cnt++; - nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, - rsp->rsplen); + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, + ls_rsp->rsplen); if (nvmewqeq == NULL) { - atomic_inc(&nvmep->xmt_ls_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6150 LS Drop IO x%x: Prep\n", - ctxp->oxid); - lpfc_in_buf_free(phba, &nvmebuf->dbuf); - atomic_inc(&nvmep->xmt_ls_abort); - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, - ctxp->sid, ctxp->oxid); - return -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6150 NVMEx LS Drop Rsp x%x: Prep\n", + axchg->oxid); + rc = -ENOMEM; + goto out_free_buf; } /* Save numBdes for bpl2sgl */ - nvmewqeq->rsvd2 = 1; + nvmewqeq->num_bdes = 1; nvmewqeq->hba_wqidx = 0; - nvmewqeq->context3 = &dmabuf; + nvmewqeq->bpl_dmabuf = &dmabuf; dmabuf.virt = &bpl; bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; - bpl.tus.f.bdeSize = rsp->rsplen; + bpl.tus.f.bdeSize = ls_rsp->rsplen; bpl.tus.f.bdeFlags = 0; bpl.tus.w = le32_to_cpu(bpl.tus.w); + /* + * Note: although we're using stack space for the dmabuf, the + * call to lpfc_sli4_issue_wqe is synchronous, so it will not + * be referenced after it returns back to this routine. + */ - nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; - nvmewqeq->iocb_cmpl = NULL; - nvmewqeq->context2 = ctxp; + nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp; + nvmewqeq->context_un.axchg = axchg; - lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", - ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); + lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", + axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); + + rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); + + /* clear to be sure there's no reference */ + nvmewqeq->bpl_dmabuf = NULL; - rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { /* * Okay to repost buffer here, but wait till cmpl * before freeing ctxp and iocbq. */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); - atomic_inc(&nvmep->xmt_ls_rsp); return 0; } - /* Give back resources */ - atomic_inc(&nvmep->xmt_ls_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6151 LS Drop IO x%x: Issue %d\n", - ctxp->oxid, rc); - lpfc_nlp_put(nvmewqeq->context1); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6151 NVMEx LS RSP x%x: failed to transmit %d\n", + axchg->oxid, rc); + + rc = -ENXIO; + + lpfc_nlp_put(nvmewqeq->ndlp); +out_free_buf: + /* Give back resources */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); - atomic_inc(&nvmep->xmt_ls_abort); - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - return -ENXIO; + + /* + * As transport doesn't track completions of responses, if the rsp + * fails to send, the transport will effectively ignore the rsp + * and consider the LS done. However, the driver has an active + * exchange open for the LS - so be sure to abort the exchange + * if the response isn't sent. + */ + lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid); + return rc; +} + +/** + * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response + * @tgtport: pointer to target port that NVME LS is to be transmit from. + * @ls_rsp: pointer to the transport LS RSP that is to be sent + * + * Driver registers this routine to transmit responses for received NVME + * LS requests. + * + * This routine is used to format and send a WQE to transmit a NVME LS + * Response. The ls_rsp is used to reverse-map the LS to the original + * NVME LS request sequence, which provides addressing information for + * the remote port the LS to be sent to, as well as the exchange id + * that is the LS is bound to. + * + * Returns: + * 0 : if response successfully transmit + * non-zero : if response failed to transmit, of the form -Exxx. + **/ +static int +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, + struct nvmefc_ls_rsp *ls_rsp) +{ + struct lpfc_async_xchg_ctx *axchg = + container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; + int rc; + + if (axchg->phba->pport->load_flag & FC_UNLOADING) + return -ENODEV; + + rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp); + + if (rc) { + atomic_inc(&nvmep->xmt_ls_drop); + /* + * unless the failure is due to having already sent + * the response, an abort will be generated for the + * exchange if the rsp can't be sent. + */ + if (rc != -EALREADY) + atomic_inc(&nvmep->xmt_ls_abort); + return rc; + } + + atomic_inc(&nvmep->xmt_ls_rsp); + return 0; } static int @@ -923,19 +1010,17 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; - struct lpfc_nvmet_rcv_ctx *ctxp = - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; struct lpfc_queue *wq; struct lpfc_iocbq *nvmewqeq; struct lpfc_sli_ring *pring; unsigned long iflags; int rc; - - if (phba->pport->load_flag & FC_UNLOADING) { - rc = -ENODEV; - goto aerr; - } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int id; +#endif if (phba->pport->load_flag & FC_UNLOADING) { rc = -ENODEV; @@ -954,25 +1039,23 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - int id = raw_smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) { - if (rsp->hwqid != id) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6705 CPU Check OP: " - "cpu %d expect %d\n", - id, rsp->hwqid); - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++; - } + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + id = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); + if (rsp->hwqid != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); ctxp->cpu = id; /* Setup cpu for cmpl check */ } #endif /* Sanity check */ - if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || - (ctxp->state == LPFC_NVMET_STE_ABORT)) { + if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || + (ctxp->state == LPFC_NVME_STE_ABORT)) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6102 IO oxid x%x aborted\n", ctxp->oxid); rc = -ENXIO; @@ -982,23 +1065,22 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); if (nvmewqeq == NULL) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6152 FCP Drop IO x%x: Prep\n", ctxp->oxid); rc = -ENXIO; goto aerr; } - nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; - nvmewqeq->iocb_cmpl = NULL; - nvmewqeq->context2 = ctxp; - nvmewqeq->iocb_flag |= LPFC_IO_NVMET; + nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; + nvmewqeq->context_un.axchg = ctxp; + nvmewqeq->cmd_flag |= LPFC_IO_NVMET; ctxp->wqeq->hba_wqidx = rsp->hwqid; lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", ctxp->oxid, rsp->op, rsp->rsplen); - ctxp->flag |= LPFC_NVMET_IO_INP; + ctxp->flag |= LPFC_NVME_IO_INP; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -1017,7 +1099,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, * WQ was full, so queue nvmewqeq to be sent after * WQE release CQE */ - ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; + ctxp->flag |= LPFC_NVME_DEFER_WQFULL; wq = ctxp->hdwq->io_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); @@ -1030,13 +1112,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, /* Give back resources */ atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6153 FCP Drop IO x%x: Issue: %d\n", ctxp->oxid, rc); ctxp->wqeq->hba_wqidx = 0; - nvmewqeq->context2 = NULL; - nvmewqeq->context3 = NULL; + nvmewqeq->context_un.axchg = NULL; + nvmewqeq->bpl_dmabuf = NULL; rc = -EBUSY; aerr: return rc; @@ -1057,8 +1139,8 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *req) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; - struct lpfc_nvmet_rcv_ctx *ctxp = - container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_async_xchg_ctx *ctxp = + container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; struct lpfc_queue *wq; unsigned long flags; @@ -1066,9 +1148,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, if (phba->pport->load_flag & FC_UNLOADING) return; - if (phba->pport->load_flag & FC_UNLOADING) - return; - if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[0]; @@ -1086,13 +1165,13 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, /* Since iaab/iaar are NOT set, we need to check * if the firmware is in process of aborting IO */ - if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) { + if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) { spin_unlock_irqrestore(&ctxp->ctxlock, flags); return; } - ctxp->flag |= LPFC_NVMET_ABORT_OP; + ctxp->flag |= LPFC_NVME_ABORT_OP; - if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { + if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) { spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); @@ -1102,11 +1181,11 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, } spin_unlock_irqrestore(&ctxp->ctxlock, flags); - /* An state of LPFC_NVMET_STE_RCV means we have just received + /* A state of LPFC_NVME_STE_RCV means we have just received * the NVME command and have not started processing it. * (by issuing any IO WQEs on this exchange yet) */ - if (ctxp->state == LPFC_NVMET_STE_RCV) + if (ctxp->state == LPFC_NVME_STE_RCV) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); else @@ -1119,26 +1198,26 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; - struct lpfc_nvmet_rcv_ctx *ctxp = - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; unsigned long flags; bool aborting = false; spin_lock_irqsave(&ctxp->ctxlock, flags); - if (ctxp->flag & LPFC_NVMET_XBUSY) + if (ctxp->flag & LPFC_NVME_XBUSY) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6027 NVMET release with XBUSY flag x%x" " oxid x%x\n", ctxp->flag, ctxp->oxid); - else if (ctxp->state != LPFC_NVMET_STE_DONE && - ctxp->state != LPFC_NVMET_STE_ABORT) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + else if (ctxp->state != LPFC_NVME_STE_DONE && + ctxp->state != LPFC_NVME_STE_ABORT) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); - if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || - (ctxp->flag & LPFC_NVMET_XBUSY)) { + if ((ctxp->flag & LPFC_NVME_ABORT_OP) || + (ctxp->flag & LPFC_NVME_XBUSY)) { aborting = true; /* let the abort path do the real release */ lpfc_nvmet_defer_release(phba, ctxp); @@ -1149,7 +1228,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ctxp->state, aborting); atomic_inc(&lpfc_nvmep->xmt_fcp_release); - ctxp->flag &= ~LPFC_NVMET_TNOTIFY; + ctxp->flag &= ~LPFC_NVME_TNOTIFY; if (aborting) return; @@ -1162,8 +1241,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *tgtp; - struct lpfc_nvmet_rcv_ctx *ctxp = - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_hba *phba = ctxp->phba; unsigned long iflag; @@ -1191,6 +1270,122 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } +/** + * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request + * @phba: Pointer to HBA context object + * @cmdwqe: Pointer to driver command WQE object. + * @rspwqe: Pointer to driver response WQE object. + * + * This function is the completion handler for NVME LS requests. + * The function updates any states and statistics, then calls the + * generic completion handler to finish completion of the request. + **/ +static void +lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_iocbq *rspwqe) +{ + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; + __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); +} + +/** + * lpfc_nvmet_ls_req - Issue an Link Service request + * @targetport: pointer to target instance registered with nvmet transport. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. + * Driver sets this value to the ndlp pointer. + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * non-zero: various error codes, in form of -Exxx + **/ +static int +lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, + void *hosthandle, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + int ret; + u32 hstate; + + if (!lpfc_nvmet) + return -EINVAL; + + phba = lpfc_nvmet->phba; + if (phba->pport->load_flag & FC_UNLOADING) + return -EINVAL; + + hstate = atomic_read(&lpfc_nvmet->state); + if (hstate == LPFC_NVMET_INV_HOST_ACTIVE) + return -EACCES; + + ndlp = (struct lpfc_nodelist *)hosthandle; + + ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, + lpfc_nvmet_ls_req_cmp); + + return ret; +} + +/** + * lpfc_nvmet_ls_abort - Abort a prior NVME LS request + * @targetport: Transport targetport, that LS was issued from. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. + * Driver sets this value to the ndlp pointer. + * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted + * + * Driver registers this routine to abort an NVME LS request that is + * in progress (from the transports perspective). + **/ +static void +lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, + void *hosthandle, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + int ret; + + phba = lpfc_nvmet->phba; + if (phba->pport->load_flag & FC_UNLOADING) + return; + + ndlp = (struct lpfc_nodelist *)hosthandle; + + ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); + if (!ret) + atomic_inc(&lpfc_nvmet->xmt_ls_abort); +} + +static void +lpfc_nvmet_host_release(void *hosthandle) +{ + struct lpfc_nodelist *ndlp = hosthandle; + struct lpfc_hba *phba = ndlp->phba; + struct lpfc_nvmet_tgtport *tgtp; + + if (!phba->targetport || !phba->targetport->private) + return; + + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6202 NVMET XPT releasing hosthandle x%px " + "DID x%x xflags x%x refcnt %d\n", + hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irq(&ndlp->lock); + ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + atomic_set(&tgtp->state, 0); +} + static void lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) { @@ -1202,7 +1397,7 @@ lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) phba = tgtp->phba; rc = lpfc_issue_els_rscn(phba->pport, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6420 NVMET subsystem change: Notification %s\n", (rc) ? "Failed" : "Sent"); } @@ -1215,6 +1410,9 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .fcp_req_release = lpfc_nvmet_xmt_fcp_release, .defer_rcv = lpfc_nvmet_defer_rcv, .discovery_event = lpfc_nvmet_discovery_event, + .ls_req = lpfc_nvmet_ls_req, + .ls_abort = lpfc_nvmet_ls_abort, + .host_release = lpfc_nvmet_host_release, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, @@ -1225,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), + .lsrqst_priv_sz = 0, }; static void @@ -1241,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, list_del_init(&ctx_buf->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock(&phba->hbalock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); + spin_unlock(&phba->hbalock); + ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; @@ -1296,7 +1498,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6419 Failed allocate memory for " "nvmet context lists\n"); return -ENOMEM; @@ -1354,7 +1556,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6404 Ran out of memory for NVMET\n"); return -ENOMEM; } @@ -1363,30 +1565,30 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) GFP_KERNEL); if (!ctx_buf->context) { kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6405 Ran out of NVMET " "context memory\n"); return -ENOMEM; } ctx_buf->context->ctxbuf = ctx_buf; - ctx_buf->context->state = LPFC_NVMET_STE_FREE; + ctx_buf->context->state = LPFC_NVME_STE_FREE; ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); if (!ctx_buf->iocbq) { kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6406 Ran out of NVMET iocb/WQEs\n"); return -ENOMEM; } - ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET; nvmewqe = ctx_buf->iocbq; wqe = &nvmewqe->wqe; /* Initialize WQE */ memset(wqe, 0, sizeof(union lpfc_wqe)); - ctx_buf->iocbq->context1 = NULL; + ctx_buf->iocbq->cmd_dmabuf = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); spin_unlock(&phba->sli4_hba.sgl_list_lock); @@ -1394,7 +1596,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } @@ -1473,7 +1675,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) error = -ENOENT; #endif if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6025 Cannot register NVME targetport x%x: " "portnm %llx nodenm %llx segs %d qs %d\n", error, @@ -1569,7 +1771,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; struct lpfc_nvmet_tgtport *tgtp; struct nvmefc_tgt_fcp_req *req = NULL; struct lpfc_nodelist *ndlp; @@ -1588,31 +1790,33 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); } - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_lock(&ctxp->ctxlock); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); /* Check if we already received a free context call * and we have completed processing an abort situation. */ - if (ctxp->flag & LPFC_NVMET_CTX_RLS && - !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { + if (ctxp->flag & LPFC_NVME_CTX_RLS && + !(ctxp->flag & LPFC_NVME_ABORT_OP)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } - ctxp->flag &= ~LPFC_NVMET_XBUSY; - spin_unlock(&ctxp->ctxlock); - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + ctxp->flag &= ~LPFC_NVME_XBUSY; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + if (ndlp && (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { lpfc_set_rrq_active(phba, ndlp, @@ -1631,9 +1835,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, lpfc_worker_wake_up(phba); return; } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); if (ctxp) { /* @@ -1647,15 +1849,15 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, rxid); spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= LPFC_NVMET_ABTS_RCV; - ctxp->state = LPFC_NVMET_STE_ABORT; + ctxp->flag |= LPFC_NVME_ABTS_RCV; + ctxp->state = LPFC_NVME_STE_ABORT; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", xri, raw_smp_processor_id(), 0); - req = &ctxp->ctx.fcp_req; + req = &ctxp->hdlrctx.fcp_req; if (req) nvmet_fc_rcv_fcp_abort(phba->targetport, req); } @@ -1668,7 +1870,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_hba *phba = vport->phba; - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; struct nvmefc_tgt_fcp_req *rsp; uint32_t sid; uint16_t oxid, xri; @@ -1677,8 +1879,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, sid = sli4_sid_from_fc_hdr(fc_hdr); oxid = be16_to_cpu(fc_hdr->fh_ox_id); - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { @@ -1687,11 +1888,10 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, xri = ctxp->ctxbuf->sglq->sli4_xritag; - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= LPFC_NVMET_ABTS_RCV; + ctxp->flag |= LPFC_NVME_ABTS_RCV; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, @@ -1701,16 +1901,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); - rsp = &ctxp->ctx.fcp_req; + rsp = &ctxp->hdlrctx.fcp_req; nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); /* Respond with BA_ACC accordingly */ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); /* check the wait list */ if (phba->sli4_hba.nvmet_io_wait_cnt) { struct rqb_dmabuf *nvmebuf; @@ -1760,7 +1958,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, xri = ctxp->ctxbuf->sglq->sli4_xritag; spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP); + ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, @@ -1772,10 +1970,10 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, "flag x%x state x%x\n", ctxp->oxid, xri, ctxp->flag, ctxp->state); - if (ctxp->flag & LPFC_NVMET_TNOTIFY) { + if (ctxp->flag & LPFC_NVME_TNOTIFY) { /* Notify the transport */ nvmet_fc_rcv_fcp_abort(phba->targetport, - &ctxp->ctx.fcp_req); + &ctxp->hdlrctx.fcp_req); } else { cancel_work_sync(&ctxp->ctxbuf->defer_work); spin_lock_irqsave(&ctxp->ctxlock, iflag); @@ -1803,7 +2001,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, - struct lpfc_nvmet_rcv_ctx *ctxp) + struct lpfc_async_xchg_ctx *ctxp) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; @@ -1825,12 +2023,14 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, &wq->wqfull_list, list) { if (ctxp) { /* Checking for a specific IO to flush */ - if (nvmewqeq->context2 == ctxp) { + if (nvmewqeq->context_un.axchg == ctxp) { list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, + sizeof(*wcqep)); lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, - wcqep); + nvmewqeq); return; } continue; @@ -1838,7 +2038,8 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, /* Flush all IOs */ list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); } } @@ -1854,7 +2055,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; unsigned long iflags; int rc; @@ -1868,7 +2069,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2; + ctxp = nvmewqeq->context_un.axchg; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); if (rc == -EBUSY) { @@ -1880,7 +2081,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { - if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP) + if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP) ctxp->ts_status_wqput = ktime_get_ns(); else ctxp->ts_data_wqput = ktime_get_ns(); @@ -1915,9 +2116,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6179 Unreg targetport x%px timeout " "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); @@ -1927,114 +2128,61 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } /** - * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer + * lpfc_nvmet_handle_lsreq - Process an NVME LS request * @phba: pointer to lpfc hba data structure. - * @pring: pointer to a SLI ring. - * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * @axchg: pointer to exchange context for the NVME LS request * - * This routine is used for processing the WQE associated with a unsolicited - * event. It first determines whether there is an existing ndlp that matches - * the DID from the unsolicited WQE. If not, it will create a new one with - * the DID from the unsolicited WQE. The ELS command from the unsolicited - * WQE is then used to invoke the proper routine and to set up proper state - * of the discovery state machine. - **/ -static void -lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct hbq_dmabuf *nvmebuf) + * This routine is used for processing an asychronously received NVME LS + * request. Any remaining validation is done and the LS is then forwarded + * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). + * + * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing) + * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done. + * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. + * + * Returns 0 if LS was handled and delivered to the transport + * Returns 1 if LS failed to be handled and should be dropped + */ +int +lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *axchg) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_nvmet_tgtport *tgtp; - struct fc_frame_header *fc_hdr; - struct lpfc_nvmet_rcv_ctx *ctxp; - uint32_t *payload; - uint32_t size, oxid, sid, rc; - - - if (!nvmebuf || !phba->targetport) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6154 LS Drop IO\n"); - oxid = 0; - size = 0; - sid = 0; - ctxp = NULL; - goto dropit; - } - - fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); - oxid = be16_to_cpu(fc_hdr->fh_ox_id); - - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - payload = (uint32_t *)(nvmebuf->dbuf.virt); - size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); - sid = sli4_sid_from_fc_hdr(fc_hdr); + struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; + uint32_t *payload = axchg->payload; + int rc; - ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); - if (ctxp == NULL) { - atomic_inc(&tgtp->rcv_ls_req_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6155 LS Drop IO x%x: Alloc\n", - oxid); -dropit: - lpfc_nvmeio_data(phba, "NVMET LS DROP: " - "xri x%x sz %d from %06x\n", - oxid, size, sid); - lpfc_in_buf_free(phba, &nvmebuf->dbuf); - return; - } - ctxp->phba = phba; - ctxp->size = size; - ctxp->oxid = oxid; - ctxp->sid = sid; - ctxp->wqeq = NULL; - ctxp->state = LPFC_NVMET_STE_LS_RCV; - ctxp->entry_cnt = 1; - ctxp->rqb_buffer = (void *)nvmebuf; - ctxp->hdwq = &phba->sli4_hba.hdwq[0]; + atomic_inc(&tgtp->rcv_ls_req_in); - lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", - oxid, size, sid); /* - * The calling sequence should be: - * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done - * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. + * Driver passes the ndlp as the hosthandle argument allowing + * the transport to generate LS requests for any associateions + * that are created. */ - atomic_inc(&tgtp->rcv_ls_req_in); - rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, - payload, size); + rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp, + axchg->payload, axchg->size); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " - "%08x %08x %08x\n", size, rc, + "%08x %08x %08x\n", axchg->size, rc, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5)); - if (rc == 0) { + if (!rc) { atomic_inc(&tgtp->rcv_ls_req_out); - return; + return 0; } - lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", - oxid, size, sid); - atomic_inc(&tgtp->rcv_ls_req_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", - ctxp->oxid, rc); - - /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ - lpfc_in_buf_free(phba, &nvmebuf->dbuf); - - atomic_inc(&tgtp->xmt_ls_abort); - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); #endif + return 1; } static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; struct lpfc_hba *phba = ctxp->phba; struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_nvmet_tgtport *tgtp; @@ -2043,7 +2191,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) unsigned long iflags; if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6159 process_rcv_fcp_req, nvmebuf is NULL, " "oxid: x%x flg: x%x state: x%x\n", ctxp->oxid, ctxp->flag, ctxp->state); @@ -2055,8 +2203,8 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) return; } - if (ctxp->flag & LPFC_NVMET_ABTS_RCV) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + if (ctxp->flag & LPFC_NVME_ABTS_RCV) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6324 IO oxid x%x aborted\n", ctxp->oxid); return; @@ -2064,7 +2212,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) payload = (uint32_t *)(nvmebuf->dbuf.virt); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - ctxp->flag |= LPFC_NVMET_TNOTIFY; + ctxp->flag |= LPFC_NVME_TNOTIFY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_isr_cmd) ctxp->ts_cmd_nvme = ktime_get_ns(); @@ -2078,13 +2226,13 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) * A buffer has already been reposted for this IO, so just free * the nvmebuf. */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req, payload, ctxp->size); /* Process FCP command */ if (rc == 0) { atomic_inc(&tgtp->rcv_fcp_cmd_out); spin_lock_irqsave(&ctxp->ctxlock, iflags); - if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) || + if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) || (nvmebuf != ctxp->rqb_buffer)) { spin_unlock_irqrestore(&ctxp->ctxlock, iflags); return; @@ -2103,7 +2251,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->defer_fod); spin_lock_irqsave(&ctxp->ctxlock, iflags); - if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { spin_unlock_irqrestore(&ctxp->ctxlock, iflags); return; } @@ -2118,9 +2266,9 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); return; } - ctxp->flag &= ~LPFC_NVMET_TNOTIFY; + ctxp->flag &= ~LPFC_NVME_TNOTIFY; atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", ctxp->oxid, rc, atomic_read(&tgtp->rcv_fcp_cmd_in), @@ -2225,7 +2373,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, uint64_t isr_timestamp, uint8_t cqflag) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_ctxbuf *ctx_buf; @@ -2239,7 +2387,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6157 NVMET FCP Drop IO\n"); if (nvmebuf) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); @@ -2270,15 +2418,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, size = nvmebuf->bytes_recv; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { - if (current_cpu < LPFC_CHECK_CPU_CNT) { - if (idx != current_cpu) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6703 CPU Check rcv: " - "cpu %d expect %d\n", - current_cpu, idx); - phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++; - } + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); + if (idx != current_cpu) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6703 CPU Check rcv: " + "cpu %d expect %d\n", + current_cpu, idx); } #endif @@ -2309,12 +2455,12 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, sid = sli4_sid_from_fc_hdr(fc_hdr); - ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); - if (ctxp->state != LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + if (ctxp->state != LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6414 NVMET Context corrupt %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); } @@ -2325,7 +2471,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->oxid = oxid; ctxp->sid = sid; ctxp->idx = idx; - ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->state = LPFC_NVME_STE_RCV; ctxp->entry_cnt = 1; ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; @@ -2356,7 +2502,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6325 Unable to queue work for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", ctxp->oxid, @@ -2372,40 +2518,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, } /** - * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport - * @phba: pointer to lpfc hba data structure. - * @pring: pointer to a SLI ring. - * @nvmebuf: pointer to received nvme data structure. - * - * This routine is used to process an unsolicited event received from a SLI - * (Service Level Interface) ring. The actual processing of the data buffer - * associated with the unsolicited event is done by invoking the routine - * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the - * SLI RQ on which the unsolicited event was received. - **/ -void -lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq *piocb) -{ - struct lpfc_dmabuf *d_buf; - struct hbq_dmabuf *nvmebuf; - - d_buf = piocb->context2; - nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); - - if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "3015 LS Drop IO\n"); - return; - } - if (phba->nvmet_support == 0) { - lpfc_in_buf_free(phba, &nvmebuf->dbuf); - return; - } - lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); -} - -/** * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport * @phba: pointer to lpfc hba data structure. * @idx: relative index of MRQ vector @@ -2427,7 +2539,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint8_t cqflag) { if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3167 NVMET FCP Drop IO\n"); return; } @@ -2465,7 +2577,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, **/ static struct lpfc_iocbq * lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_async_xchg_ctx *ctxp, dma_addr_t rspbuf, uint16_t rspsize) { struct lpfc_nodelist *ndlp; @@ -2473,7 +2585,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, union lpfc_wqe128 *wqe; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6104 NVMET prep LS wqe: link err: " "NPORT x%x oxid:x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2483,7 +2595,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = lpfc_sli_get_iocbq(phba); if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6105 NVMET prep LS wqe: No WQE: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2491,10 +2603,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6106 NVMET prep LS wqe: No ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2503,10 +2615,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, ctxp->wqeq = nvmewqe; /* prevent preparing wqe with NULL ndlp reference */ - nvmewqe->context1 = lpfc_nlp_get(ndlp); - if (nvmewqe->context1 == NULL) + nvmewqe->ndlp = lpfc_nlp_get(ndlp); + if (!nvmewqe->ndlp) goto nvme_wqe_free_wqeq_exit; - nvmewqe->context2 = ctxp; + nvmewqe->context_un.axchg = ctxp; wqe = &nvmewqe->wqe; memset(wqe, 0, sizeof(union lpfc_wqe)); @@ -2567,7 +2679,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; - nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; + nvmewqe->cmd_flag |= LPFC_IO_NVME_LS; /* Xmit NVMET response to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, @@ -2578,8 +2690,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, return nvmewqe; nvme_wqe_free_wqeq_exit: - nvmewqe->context2 = NULL; - nvmewqe->context3 = NULL; + nvmewqe->context_un.axchg = NULL; + nvmewqe->ndlp = NULL; + nvmewqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, nvmewqe); return NULL; } @@ -2587,9 +2700,9 @@ nvme_wqe_free_wqeq_exit: static struct lpfc_iocbq * lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp) + struct lpfc_async_xchg_ctx *ctxp) { - struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; + struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req; struct lpfc_nvmet_tgtport *tgtp; struct sli4_sge *sgl; struct lpfc_nodelist *ndlp; @@ -2598,12 +2711,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, union lpfc_wqe128 *wqe; struct ulp_bde64 *bde; dma_addr_t physaddr; - int i, cnt; - int do_pbde; + int i, cnt, nsegs; + bool use_pbde = false; int xc = 1; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6107 NVMET prep FCP wqe: link err:" "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2611,10 +2724,10 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6108 NVMET prep FCP wqe: no ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2622,13 +2735,14 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6109 NVMET prep FCP wqe: seg cnt err: " "NPORT x%x oxid x%x ste %d cnt %d\n", ctxp->sid, ctxp->oxid, ctxp->state, phba->cfg_nvme_seg_cnt); return NULL; } + nsegs = rsp->sg_cnt; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; nvmewqe = ctxp->wqeq; @@ -2636,7 +2750,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6110 NVMET prep FCP wqe: No " "WQE: NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2649,12 +2763,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } /* Sanity check */ - if (((ctxp->state == LPFC_NVMET_STE_RCV) && + if (((ctxp->state == LPFC_NVME_STE_RCV) && (ctxp->entry_cnt == 1)) || - (ctxp->state == LPFC_NVMET_STE_DATA)) { + (ctxp->state == LPFC_NVME_STE_DATA)) { wqe = &nvmewqe->wqe; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6111 Wrong state NVMET FCP: %d cnt %d\n", ctxp->state, ctxp->entry_cnt); return NULL; @@ -2706,9 +2820,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!xc) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); - /* Word 11 - set sup, irsp, irsplen later */ - do_pbde = 0; - /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; @@ -2786,12 +2897,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!xc) bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); - /* Word 11 - set pbde later */ - if (phba->cfg_enable_pbde) { - do_pbde = 1; + /* Word 11 - check for pbde */ + if (nsegs == 1 && phba->cfg_enable_pbde) { + use_pbde = true; + /* Word 11 - PBDE bit already preset by template */ } else { + /* Overwrite default template setting */ bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); - do_pbde = 0; } /* Word 12 */ @@ -2862,13 +2974,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, ((rsp->rsplen >> 2) - 1)); memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); } - do_pbde = 0; /* Word 12 */ wqe->fcp_trsp.rsvd_12_15[0] = 0; /* Use rspbuf, NOT sg list */ - rsp->sg_cnt = 0; + nsegs = 0; sgl->word2 = 0; atomic_inc(&tgtp->xmt_fcp_rsp); break; @@ -2883,9 +2994,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; - nvmewqe->context1 = ndlp; + nvmewqe->ndlp = ndlp; - for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) { + for_each_sg(rsp->sg, sgel, nsegs, i) { physaddr = sg_dma_address(sgel); cnt = sg_dma_len(sgel); sgl->addr_hi = putPaddrHigh(physaddr); @@ -2897,24 +3008,25 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(cnt); - if (i == 0) { - bde = (struct ulp_bde64 *)&wqe->words[13]; - if (do_pbde) { - /* Words 13-15 (PBDE) */ - bde->addrLow = sgl->addr_lo; - bde->addrHigh = sgl->addr_hi; - bde->tus.f.bdeSize = - le32_to_cpu(sgl->sge_len); - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bde->tus.w = cpu_to_le32(bde->tus.w); - } else { - memset(bde, 0, sizeof(struct ulp_bde64)); - } - } sgl++; ctxp->offset += cnt; } - ctxp->state = LPFC_NVMET_STE_DATA; + + bde = (struct ulp_bde64 *)&wqe->words[13]; + if (use_pbde) { + /* decrement sgl ptr backwards once to first data sge */ + sgl--; + + /* Words 13-15 (PBDE) */ + bde->addrLow = sgl->addr_lo; + bde->addrHigh = sgl->addr_hi; + bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } else { + memset(bde, 0, sizeof(struct ulp_bde64)); + } + ctxp->state = LPFC_NVME_STE_DATA; ctxp->entry_cnt++; return nvmewqe; } @@ -2923,7 +3035,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds @@ -2931,35 +3043,36 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, **/ static void lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; unsigned long flags; bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - if (ctxp->flag & LPFC_NVMET_ABORT_OP) + if (ctxp->flag & LPFC_NVME_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->state = LPFC_NVMET_STE_DONE; + ctxp->state = LPFC_NVME_STE_DONE; /* Check if we already received a free context call * and we have completed processing an abort situation. */ - if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && - !(ctxp->flag & LPFC_NVMET_XBUSY)) { + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && + !(ctxp->flag & LPFC_NVME_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp); @@ -2970,8 +3083,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. @@ -2983,7 +3096,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, lpfc_sli_release_iocbq(phba, cmdwqe); /* Since iaab/iaar are NOT set, there is no work left. - * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted * should have been called already. */ } @@ -2992,7 +3105,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds @@ -3000,15 +3113,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, **/ static void lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; unsigned long flags; uint32_t result; bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; if (!ctxp) { @@ -3022,12 +3136,12 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; spin_lock_irqsave(&ctxp->ctxlock, flags); - if (ctxp->flag & LPFC_NVMET_ABORT_OP) + if (ctxp->flag & LPFC_NVME_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); /* Sanity check */ - if (ctxp->state != LPFC_NVMET_STE_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + if (ctxp->state != LPFC_NVME_STE_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6112 ABTS Wrong state:%d oxid x%x\n", ctxp->state, ctxp->oxid); } @@ -3035,15 +3149,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* Check if we already received a free context call * and we have completed processing an abort situation. */ - ctxp->state = LPFC_NVMET_STE_DONE; - if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && - !(ctxp->flag & LPFC_NVMET_XBUSY)) { + ctxp->state = LPFC_NVME_STE_DONE; + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && + !(ctxp->flag & LPFC_NVME_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp); @@ -3054,8 +3168,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. @@ -3064,7 +3178,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); /* Since iaab/iaar are NOT set, there is no work left. - * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted * should have been called already. */ } @@ -3073,7 +3187,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for LS cmds @@ -3081,17 +3195,20 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, **/ static void lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { - struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - atomic_inc(&tgtp->xmt_ls_abort_cmpl); + if (phba->nvmet_support) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_ls_abort_cmpl); + } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n", @@ -3099,7 +3216,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result, wcqe->word3); if (!ctxp) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6415 NVMET LS Abort No ctx: WCQE: " "%08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, @@ -3109,25 +3226,25 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, return; } - if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6416 NVMET LS abort cmpl state mismatch: " "oxid x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, cmdwqe); kfree(ctxp); } static int lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { - struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_nvmet_tgtport *tgtp = NULL; struct lpfc_iocbq *abts_wqeq; union lpfc_wqe128 *wqe_abts; struct lpfc_nodelist *ndlp; @@ -3136,14 +3253,16 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, "6067 ABTS: sid %x xri x%x/x%x\n", sid, xri, ctxp->wqeq->sli4_xritag); - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (phba->nvmet_support && phba->targetport) + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; ndlp = lpfc_findnode_did(phba->pport, sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + if (tgtp) + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6134 Drop ABTS - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -3189,7 +3308,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); /* Word 10 */ - bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); @@ -3203,10 +3321,10 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, OTHER_COMMAND); abts_wqeq->vport = phba->pport; - abts_wqeq->context1 = ndlp; - abts_wqeq->context2 = ctxp; - abts_wqeq->context3 = NULL; - abts_wqeq->rsvd2 = 0; + abts_wqeq->ndlp = ndlp; + abts_wqeq->context_un.axchg = ctxp; + abts_wqeq->bpl_dmabuf = NULL; + abts_wqeq->num_bdes = 0; /* hba_wqidx should already be setup from command we are aborting */ abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; abts_wqeq->iocb.ulpLe = 1; @@ -3219,14 +3337,14 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; struct lpfc_nodelist *ndlp; unsigned long flags; - u8 opt; + bool ia; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3236,17 +3354,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6160 Drop ABORT - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); /* No failure to an ABTS request. */ spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } @@ -3256,17 +3374,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } abts_wqeq = ctxp->abort_wqeq; - ctxp->state = LPFC_NVMET_STE_ABORT; - opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0; + ctxp->state = LPFC_NVME_STE_ABORT; + ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false; spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ @@ -3283,43 +3401,44 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Outstanding abort is in progress */ - if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { + if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6164 Outstanding NVME I/O Abort Request " "still pending on oxid x%x\n", ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Ready - mark outstanding as aborted by driver. */ - abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; + abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED; - lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt); + lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag, + abts_wqeq->iotag, CLASS3, + LPFC_WQE_CQ_ID_DEFAULT, ia, true); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; - abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVME; - abts_wqeq->context2 = ctxp; + abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME; + abts_wqeq->context_un.axchg = ctxp; abts_wqeq->vport = phba->pport; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; @@ -3333,10 +3452,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, atomic_inc(&tgtp->xmt_abort_rsp_error); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6166 Failed ABORT issue_wqe with status x%x " "for oxid x%x.\n", rc, ctxp->oxid); @@ -3345,7 +3464,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp; @@ -3360,14 +3479,14 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, ctxp->wqeq->hba_wqidx = 0; } - if (ctxp->state == LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + if (ctxp->state == LPFC_NVME_STE_FREE) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); rc = WQE_BUSY; goto aerr; } - ctxp->state = LPFC_NVMET_STE_ABORT; + ctxp->state = LPFC_NVME_STE_ABORT; ctxp->entry_cnt++; rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); if (rc == 0) @@ -3375,9 +3494,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq = ctxp->wqeq; - abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVMET; + abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVMET; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; @@ -3389,17 +3507,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, aerr: spin_lock_irqsave(&ctxp->ctxlock, flags); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) { + if (ctxp->flag & LPFC_NVME_CTX_RLS) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } - ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); + ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS); spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6135 Failed to Issue ABTS for oxid x%x. Status x%x " "(%x)\n", ctxp->oxid, rc, released); @@ -3408,34 +3526,44 @@ aerr: return 1; } -static int -lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, - struct lpfc_nvmet_rcv_ctx *ctxp, +/** + * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received + * via async frame receive where the frame is not handled. + * @phba: pointer to adapter structure + * @ctxp: pointer to the asynchronously received received sequence + * @sid: address of the remote port to send the ABTS to + * @xri: oxid value to for the ABTS (other side's exchange id). + **/ +int +lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, + struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { - struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_nvmet_tgtport *tgtp = NULL; struct lpfc_iocbq *abts_wqeq; unsigned long flags; int rc; - if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || - (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { - ctxp->state = LPFC_NVMET_STE_LS_ABORT; + if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) || + (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) { + ctxp->state = LPFC_NVME_STE_LS_ABORT; ctxp->entry_cnt++; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6418 NVMET LS abort state mismatch " "IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); - ctxp->state = LPFC_NVMET_STE_LS_ABORT; + ctxp->state = LPFC_NVME_STE_LS_ABORT; } - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (phba->nvmet_support && phba->targetport) + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { /* Issue ABTS for this WQE based on iotag */ ctxp->wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->wqeq) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6068 Abort failed: No wqeqs: " "xri: x%x\n", xri); /* No failure to an ABTS request. */ @@ -3451,22 +3579,67 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, } spin_lock_irqsave(&phba->hbalock, flags); - abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; + abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { - atomic_inc(&tgtp->xmt_abort_unsol); + if (tgtp) + atomic_inc(&tgtp->xmt_abort_unsol); return 0; } out: - atomic_inc(&tgtp->xmt_abort_rsp_error); - abts_wqeq->context2 = NULL; - abts_wqeq->context3 = NULL; + if (tgtp) + atomic_inc(&tgtp->xmt_abort_rsp_error); + abts_wqeq->rsp_dmabuf = NULL; + abts_wqeq->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, abts_wqeq); - kfree(ctxp); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6056 Failed to Issue ABTS. Status x%x\n", rc); - return 0; + return 1; +} + +/** + * lpfc_nvmet_invalidate_host + * + * @phba: pointer to the driver instance bound to an adapter port. + * @ndlp: pointer to an lpfc_nodelist type + * + * This routine upcalls the nvmet transport to invalidate an NVME + * host to which this target instance had active connections. + */ +void +lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + u32 ndlp_has_hh; + struct lpfc_nvmet_tgtport *tgtp; + + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, + "6203 Invalidating hosthandle x%px\n", + ndlp); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); + + spin_lock_irq(&ndlp->lock); + ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + + /* Do not invalidate any nodes that do not have a hosthandle. + * The host_release callbk will cause a node reference + * count imbalance and a crash. + */ + if (!ndlp_has_hh) { + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, + "6204 Skip invalidate on node x%px DID x%x\n", + ndlp, ndlp->nlp_DID); + return; + } + +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + /* Need to get the nvmet_fc_target_port pointer here.*/ + nvmet_fc_invalidate_host(phba->targetport, ndlp); +#endif } |