aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_hw.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c299
1 files changed, 240 insertions, 59 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 1664d648cbfc..3bbf2010a821 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -32,7 +32,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_addr.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -243,6 +242,23 @@ static int ocrdma_get_mbx_errno(u32 status)
return err_num;
}
+char *port_speed_string(struct ocrdma_dev *dev)
+{
+ char *str = "";
+ u16 speeds_supported;
+
+ speeds_supported = dev->phy.fixed_speeds_supported |
+ dev->phy.auto_speeds_supported;
+ if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
+ str = "40Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
+ str = "10Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
+ str = "1Gbps ";
+
+ return str;
+}
+
static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
{
int err_num = -EINVAL;
@@ -332,6 +348,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
return mqe;
}
+static void *ocrdma_alloc_mqe(void)
+{
+ return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+}
+
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
{
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
@@ -364,8 +385,8 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
}
}
-static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
- int queue_type)
+static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *q, int queue_type)
{
u8 opcode = 0;
int status;
@@ -444,7 +465,7 @@ mbx_err:
return status;
}
-static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
int irq;
@@ -574,6 +595,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
if (status)
goto alloc_err;
+ dev->eq_tbl[0].cq_cnt++;
status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
if (status)
goto mbx_cq_free;
@@ -639,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
- struct ib_event ib_evt;
+ struct ib_event ib_evt = { 0 };
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
@@ -664,6 +686,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
case OCRDMA_CQ_OVERRUN_ERROR:
ib_evt.element.cq = &cq->ibcq;
ib_evt.event = IB_EVENT_CQ_ERR;
+ cq_event = 1;
+ qp_event = 0;
break;
case OCRDMA_CQ_QPCAT_ERROR:
ib_evt.element.qp = &qp->ibqp;
@@ -725,6 +749,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
+ pr_err("%s: Fatal event received\n", dev->ibdev.name);
ib_dispatch_event(&ib_evt);
}
@@ -752,7 +777,6 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
}
}
-
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
@@ -799,8 +823,6 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
ocrdma_process_acqe(dev, cqe);
else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
ocrdma_process_mcqe(dev, cqe);
- else
- pr_err("%s() cqe->compl is not set.\n", __func__);
memset(cqe, 0, sizeof(struct ocrdma_mcqe));
ocrdma_mcq_inc_tail(dev);
}
@@ -858,16 +880,8 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
BUG();
cq = dev->cq_tbl[cq_idx];
- if (cq == NULL) {
- pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
+ if (cq == NULL)
return;
- }
- spin_lock_irqsave(&cq->cq_lock, flags);
- cq->armed = false;
- cq->solicited = false;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
-
- ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
if (cq->ibcq.comp_handler) {
spin_lock_irqsave(&cq->comp_handler_lock, flags);
@@ -892,27 +906,35 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
struct ocrdma_dev *dev = eq->dev;
struct ocrdma_eqe eqe;
struct ocrdma_eqe *ptr;
- u16 eqe_popped = 0;
u16 cq_id;
- while (1) {
+ int budget = eq->cq_cnt;
+
+ do {
ptr = ocrdma_get_eqe(eq);
eqe = *ptr;
ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
break;
- eqe_popped += 1;
+
ptr->id_valid = 0;
+ /* ring eq doorbell as soon as its consumed. */
+ ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
/* check whether its CQE or not. */
if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
ocrdma_cq_handler(dev, cq_id);
}
ocrdma_eq_inc_tail(eq);
- }
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
- /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+
+ /* There can be a stale EQE after the last bound CQ is
+ * destroyed. EQE valid and budget == 0 implies this.
+ */
+ if (budget)
+ budget--;
+
+ } while (budget);
+
+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
return IRQ_HANDLED;
}
@@ -949,7 +971,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
{
int status = 0;
u16 cqe_status, ext_status;
- struct ocrdma_mqe *rsp;
+ struct ocrdma_mqe *rsp_mqe;
+ struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
ocrdma_post_mqe(dev, mqe);
@@ -958,23 +981,61 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
goto mbx_err;
cqe_status = dev->mqe_ctx.cqe_status;
ext_status = dev->mqe_ctx.ext_status;
- rsp = ocrdma_get_mqe_rsp(dev);
- ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
+ rsp_mqe = ocrdma_get_mqe_rsp(dev);
+ ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ rsp = &mqe->u.rsp;
+
if (cqe_status || ext_status) {
- pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
- __func__,
- (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
- OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ __func__, cqe_status, ext_status);
+ if (rsp) {
+ /* This is for embedded cmds. */
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ }
status = ocrdma_get_mbx_cqe_errno(cqe_status);
goto mbx_err;
}
- if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
+ /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
+ if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
mbx_err:
mutex_unlock(&dev->mqe_ctx.lock);
return status;
}
+static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
+ void *payload_va)
+{
+ int status = 0;
+ struct ocrdma_mbx_rsp *rsp = payload_va;
+
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ BUG();
+
+ status = ocrdma_mbx_cmd(dev, mqe);
+ if (!status)
+ /* For non embedded, only CQE failures are handled in
+ * ocrdma_mbx_cmd. We need to check for RSP errors.
+ */
+ if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
+ status = ocrdma_get_mbx_errno(rsp->status);
+
+ if (status)
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ return status;
+}
+
static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
@@ -985,6 +1046,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_qp =
(rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_send_sge = ((rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
@@ -1000,9 +1064,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
- attr->max_srq =
- (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
@@ -1015,6 +1076,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
+ attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ~0ull;
attr->max_fmr = 0;
@@ -1036,7 +1098,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
@@ -1110,6 +1172,96 @@ mbx_err:
return status;
}
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
+{
+ struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
+ struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
+ struct ocrdma_rdma_stats_resp *old_stats = NULL;
+ int status;
+
+ old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
+ if (old_stats == NULL)
+ return -ENOMEM;
+
+ memset(mqe, 0, sizeof(*mqe));
+ mqe->hdr.pyld_len = dev->stats_mem.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
+ mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
+
+ /* Cache the old stats */
+ memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
+ memset(req, 0, dev->stats_mem.size);
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
+ OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_SUBSYS_ROCE,
+ dev->stats_mem.size);
+ if (reset)
+ req->reset_stats = reset;
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
+ if (status)
+ /* Copy from cache, if mbox fails */
+ memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
+ else
+ ocrdma_le32_to_cpu(req, dev->stats_mem.size);
+
+ kfree(old_stats);
+ return status;
+}
+
+static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dma_mem dma;
+ struct ocrdma_mqe *mqe;
+ struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
+ struct mgmt_hba_attribs *hba_attribs;
+
+ mqe = ocrdma_alloc_mqe();
+ if (!mqe)
+ return status;
+ memset(mqe, 0, sizeof(*mqe));
+
+ dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
+ dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
+ dma.size, &dma.pa, GFP_KERNEL);
+ if (!dma.va)
+ goto free_mqe;
+
+ mqe->hdr.pyld_len = dma.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
+ mqe->u.nonemb_req.sge[0].len = dma.size;
+
+ memset(dma.va, 0, dma.size);
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
+ OCRDMA_SUBSYS_COMMON,
+ dma.size);
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
+ if (!status) {
+ ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
+ hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
+
+ dev->hba_port_num = hba_attribs->phy_port;
+ strncpy(dev->model_number,
+ hba_attribs->controller_model_number, 31);
+ }
+ dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
+free_mqe:
+ kfree(mqe);
+ return status;
+}
+
static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
@@ -1157,6 +1309,35 @@ mbx_err:
return status;
}
+static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_get_phy_info_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
+ sizeof(*cmd));
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
+ dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
+ dev->phy.auto_speeds_supported =
+ le16_to_cpu(rsp->auto_speeds_supported);
+ dev->phy.fixed_speeds_supported =
+ le16_to_cpu(rsp->fixed_speeds_supported);
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
int status = -ENOMEM;
@@ -1226,7 +1407,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
- int i ;
+ int i;
int status = 0;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
@@ -1357,12 +1538,10 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
int i;
mutex_lock(&dev->dev_lock);
- for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->eq_tbl[i].q.id != eq_id)
- continue;
- dev->eq_tbl[i].cq_cnt -= 1;
- break;
- }
+ i = ocrdma_get_eq_table_index(dev, eq_id);
+ if (i == -EINVAL)
+ BUG();
+ dev->eq_tbl[i].cq_cnt -= 1;
mutex_unlock(&dev->dev_lock);
}
@@ -1380,7 +1559,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
__func__, dev->id, dev->attr.max_cqe, entries);
return -EINVAL;
}
- if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
+ if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
return -EINVAL;
if (dpp_cq) {
@@ -1417,6 +1596,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->eqn = ocrdma_bind_eq(dev);
cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
+ cq->cqe_cnt = cqe_count;
if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
@@ -1439,7 +1619,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
}
/* shared eq between all the consumer cqs. */
cmd->cmd.eqn = cq->eqn;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
if (dpp_cq)
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
@@ -1484,12 +1664,9 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
(cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
OCRDMA_DESTROY_CQ_QID_MASK;
- ocrdma_unbind_eq(dev, cq->eqn);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
+ ocrdma_unbind_eq(dev, cq->eqn);
dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
-mbx_err:
kfree(cmd);
return status;
}
@@ -2029,8 +2206,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
- if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
- (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
+ if (pd->dpp_enabled && pd->num_dpp_qp) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
}
@@ -2099,7 +2275,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
status = ocrdma_query_gid(&qp->dev->ibdev, 1,
- ah_attr->grh.sgid_index, &sgid);
+ ah_attr->grh.sgid_index, &sgid);
if (status)
return status;
@@ -2127,8 +2303,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = 0;
@@ -2233,8 +2408,7 @@ pmtu_err:
}
int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = -ENOMEM;
struct ocrdma_modify_qp *cmd;
@@ -2257,7 +2431,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_QP_PARAMS_STATE_MASK;
}
- status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
+ status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
if (status)
goto mbx_err;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
@@ -2488,7 +2662,7 @@ static int ocrdma_create_eqs(struct ocrdma_dev *dev)
for (i = 0; i < num_eq; i++) {
status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
- OCRDMA_EQ_LEN);
+ OCRDMA_EQ_LEN);
if (status) {
status = -EINVAL;
break;
@@ -2533,6 +2707,13 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
status = ocrdma_mbx_create_ah_tbl(dev);
if (status)
goto conf_err;
+ status = ocrdma_mbx_get_phy_info(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_get_ctrl_attribs(dev);
+ if (status)
+ goto conf_err;
+
return 0;
conf_err: