aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c15
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c458
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h13
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h210
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c883
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h6
9 files changed, 1111 insertions, 535 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index d540180a8e42..adc11d14f878 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -56,10 +56,12 @@ struct ocrdma_dev_attr {
u16 max_qp;
u16 max_wqe;
u16 max_rqe;
+ u16 max_srq;
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
int max_srq_sge;
+ int max_rdma_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
@@ -130,8 +132,7 @@ struct ocrdma_dev {
struct ocrdma_cq **cq_tbl;
struct ocrdma_qp **qp_tbl;
- struct ocrdma_eq meq;
- struct ocrdma_eq *qp_eq_tbl;
+ struct ocrdma_eq *eq_tbl;
int eq_cnt;
u16 base_eqid;
u16 max_eq;
@@ -168,11 +169,12 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
+ u64 stag_arr[OCRDMA_MAX_STAG];
+ u16 pvid;
};
struct ocrdma_cq {
struct ib_cq ibcq;
- struct ocrdma_dev *dev;
struct ocrdma_cqe *va;
u32 phase;
u32 getp; /* pointer to pending wrs to
@@ -214,7 +216,6 @@ struct ocrdma_pd {
struct ocrdma_ah {
struct ib_ah ibah;
- struct ocrdma_dev *dev;
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
@@ -234,7 +235,6 @@ struct ocrdma_qp_hwq_info {
struct ocrdma_srq {
struct ib_srq ibsrq;
- struct ocrdma_dev *dev;
u8 __iomem *db;
struct ocrdma_qp_hwq_info rq;
u64 *rqe_wr_id_tbl;
@@ -290,10 +290,11 @@ struct ocrdma_qp {
u32 qkey;
bool dpp_enabled;
u8 *ird_q_va;
+ bool signaled;
+ u16 db_cache;
};
struct ocrdma_hw_mr {
- struct ocrdma_dev *dev;
u32 lkey;
u8 fr_mr;
u8 remote_atomic;
@@ -317,15 +318,16 @@ struct ocrdma_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct ocrdma_hw_mr hwmr;
- struct ocrdma_pd *pd;
};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
- struct ocrdma_dev *dev;
struct list_head mm_head;
struct mutex mm_list_lock; /* protects list entries of mm type */
+ struct ocrdma_pd *cntxt_pd;
+ int pd_in_use;
+
struct {
u32 *va;
dma_addr_t pa;
@@ -386,14 +388,14 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
{
return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
- qp->id < 64) ? 24 : 16);
+ qp->id < 128) ? 24 : 16);
}
static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
{
int cqe_valid;
cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
- return ((cqe_valid == cq->phase) ? 1 : 0);
+ return (cqe_valid == cq->phase);
}
static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 517ab20b727c..fbac8eb44036 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -28,6 +28,9 @@
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
+#define OCRDMA_ABI_VERSION 1
+/* user kernel communication data structures. */
+
struct ocrdma_alloc_ucontext_resp {
u32 dev_id;
u32 wqe_size;
@@ -35,16 +38,16 @@ struct ocrdma_alloc_ucontext_resp {
u32 dpp_wqe_size;
u64 ah_tbl_page;
u32 ah_tbl_len;
- u32 rsvd;
- u8 fw_ver[32];
u32 rqe_size;
+ u8 fw_ver[32];
+ /* for future use/new features in progress */
u64 rsvd1;
-} __packed;
+ u64 rsvd2;
+};
-/* user kernel communication data structures. */
struct ocrdma_alloc_pd_ureq {
u64 rsvd1;
-} __packed;
+};
struct ocrdma_alloc_pd_uresp {
u32 id;
@@ -52,12 +55,12 @@ struct ocrdma_alloc_pd_uresp {
u32 dpp_page_addr_hi;
u32 dpp_page_addr_lo;
u64 rsvd1;
-} __packed;
+};
struct ocrdma_create_cq_ureq {
u32 dpp_cq;
- u32 rsvd;
-} __packed;
+ u32 rsvd; /* pad */
+};
#define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp {
@@ -69,9 +72,10 @@ struct ocrdma_create_cq_uresp {
u64 db_page_addr;
u32 db_page_size;
u32 phase_change;
+ /* for future use/new features in progress */
u64 rsvd1;
u64 rsvd2;
-} __packed;
+};
#define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8
@@ -80,14 +84,14 @@ struct ocrdma_create_qp_ureq {
u8 enable_dpp_cq;
u8 rsvd;
u16 dpp_cq_id;
- u32 rsvd1;
+ u32 rsvd1; /* pad */
};
struct ocrdma_create_qp_uresp {
u16 qp_id;
u16 sq_dbid;
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 sq_page_size;
u32 rq_page_size;
u32 num_sq_pages;
@@ -98,19 +102,19 @@ struct ocrdma_create_qp_uresp {
u32 db_page_size;
u32 dpp_credit;
u32 dpp_offset;
- u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
+ u64 rsvd1;
u64 rsvd2;
u64 rsvd3;
} __packed;
struct ocrdma_create_srq_uresp {
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 resv1;
u32 rq_page_size;
@@ -126,6 +130,6 @@ struct ocrdma_create_srq_uresp {
u64 rsvd2;
u64 rsvd3;
-} __packed;
+};
#endif /* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed7907..ee499d942257 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,17 @@
#include <net/netevent.h>
#include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
-static inline int set_av_attr(struct ocrdma_ah *ah,
+static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
- struct ocrdma_dev *dev = ah->dev;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
@@ -52,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index;
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+ if (!vlan_tag || (vlan_tag > 0xFFF))
+ vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
@@ -93,7 +93,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
int status;
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
@@ -101,12 +101,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
ah = kzalloc(sizeof *ah, GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
- ah->dev = pd->dev;
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = set_av_attr(ah, attr, pd->id);
+ status = set_av_attr(dev, ah, attr, pd->id);
if (status)
goto av_conf_err;
@@ -127,7 +126,9 @@ av_err:
int ocrdma_destroy_ah(struct ib_ah *ibah)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
- ocrdma_free_av(ah->dev, ah);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+ ocrdma_free_av(dev, ah);
kfree(ah);
return 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0965278dd2ed..4ed8235d2d36 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -94,7 +94,7 @@ enum cqe_status {
static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
{
- return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+ return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
}
static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
@@ -105,8 +105,7 @@ static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
{
struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
- ((u8 *) dev->mq.cq.va +
- (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+ (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
return NULL;
@@ -120,9 +119,7 @@ static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
{
- return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
- (dev->mq.sq.head *
- sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
}
static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
@@ -132,8 +129,7 @@ static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
{
- return (void *)((u8 *) dev->mq.sq.va +
- (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
}
enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
@@ -181,7 +177,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
static int ocrdma_get_mbx_errno(u32 status)
{
- int err_num = -EFAULT;
+ int err_num;
u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
OCRDMA_MBX_RSP_STATUS_SHIFT;
u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
@@ -260,10 +256,11 @@ static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
break;
case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
- err_num = -EAGAIN;
+ err_num = -EINVAL;
break;
case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
- err_num = -EIO;
+ default:
+ err_num = -EINVAL;
break;
}
return err_num;
@@ -367,22 +364,6 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
}
}
-static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
- struct ocrdma_eq *eq)
-{
- /* assign vector and update vector id for next EQ */
- eq->vector = dev->nic_info.msix.start_vector;
- dev->nic_info.msix.start_vector += 1;
-}
-
-static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
-{
- /* this assumes that EQs are freed in exactly reverse order
- * as its allocation.
- */
- dev->nic_info.msix.start_vector -= 1;
-}
-
static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
int queue_type)
{
@@ -423,11 +404,8 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
memset(cmd, 0, sizeof(*cmd));
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
sizeof(*cmd));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- cmd->req.rsvd_version = 0;
- else
- cmd->req.rsvd_version = 2;
+ cmd->req.rsvd_version = 2;
cmd->num_pages = 4;
cmd->valid = OCRDMA_CREATE_EQ_VALID;
cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
@@ -438,12 +416,7 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
NULL);
if (!status) {
eq->q.id = rsp->vector_eqid & 0xffff;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_assign_eq_vect_gen2(dev, eq);
- else {
- eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
- dev->nic_info.msix.start_vector += 1;
- }
+ eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
eq->q.created = true;
}
return status;
@@ -486,8 +459,6 @@ static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
if (eq->q.created) {
ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_free_eq_vect_gen2(dev);
ocrdma_free_q(dev, &eq->q);
}
}
@@ -506,13 +477,12 @@ static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
_ocrdma_destroy_eq(dev, eq);
}
-static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
+static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
{
int i;
- /* deallocate the data path eqs */
for (i = 0; i < dev->eq_cnt; i++)
- ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
+ ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
}
static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
@@ -527,16 +497,21 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
+ cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+ cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
+
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
+ cmd->eqn = eq->id;
+ cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
- ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
+ ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
status = be_roce_mcc_cmd(dev->nic_info.netdev,
cmd, sizeof(*cmd), NULL, NULL);
if (!status) {
- cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+ cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
cq->created = true;
}
return status;
@@ -569,7 +544,10 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->cqid_pages = num_pages;
cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
- cmd->async_event_bitmap = Bit(20);
+
+ cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
+ cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
+
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
@@ -596,7 +574,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
if (status)
goto alloc_err;
- status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
if (status)
goto mbx_cq_free;
@@ -653,7 +631,7 @@ static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
if (qp == NULL)
BUG();
- ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
+ ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
}
static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
@@ -746,11 +724,35 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.event_handler(&ib_evt,
qp->srq->ibsrq.
srq_context);
- } else if (dev_event)
+ } else if (dev_event) {
ib_dispatch_event(&ib_evt);
+ }
}
+static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_pvid_mcqe *evt;
+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+
+ switch (type) {
+ case OCRDMA_ASYNC_EVENT_PVID_STATE:
+ evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
+ if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
+ dev->pvid = ((evt->tag_enabled &
+ OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
+ break;
+ default:
+ /* Not interested evts. */
+ break;
+ }
+}
+
+
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
@@ -758,8 +760,10 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
- if (evt_code == OCRDMA_ASYNC_EVE_CODE)
+ if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
ocrdma_dispatch_ibevent(dev, cqe);
+ else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
+ ocrdma_process_grp5_aync(dev, cqe);
else
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code);
@@ -957,9 +961,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = ocrdma_get_mqe_rsp(dev);
ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
if (cqe_status || ext_status) {
- pr_err
- ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
- __func__,
+ pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
+ __func__,
(rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
status = ocrdma_get_mbx_cqe_errno(cqe_status);
@@ -991,9 +994,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
+ attr->max_rdma_sge = (rsp->max_write_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
@@ -1013,6 +1022,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
+ attr->max_cq = (rsp->max_cq_cqes_per_cq &
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
@@ -1045,7 +1057,6 @@ static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
return -EINVAL;
dev->base_eqid = conf->base_eqid;
dev->max_eq = conf->max_eq;
- dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
return 0;
}
@@ -1118,6 +1129,34 @@ mbx_err:
return status;
}
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
+{
+ int status = -ENOMEM;
+ struct ocrdma_get_link_speed_rsp *rsp;
+ struct ocrdma_mqe *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ sizeof(*cmd));
+ if (!cmd)
+ return status;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
+ *lnk_speed = rsp->phys_port_speed;
+
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
int status = -ENOMEM;
@@ -1296,19 +1335,19 @@ static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
u16 eq_id;
mutex_lock(&dev->dev_lock);
- cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
- eq_id = dev->qp_eq_tbl[0].q.id;
+ cq_cnt = dev->eq_tbl[0].cq_cnt;
+ eq_id = dev->eq_tbl[0].q.id;
/* find the EQ which is has the least number of
* CQs associated with it.
*/
for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
- cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
- eq_id = dev->qp_eq_tbl[i].q.id;
+ if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
+ cq_cnt = dev->eq_tbl[i].cq_cnt;
+ eq_id = dev->eq_tbl[i].q.id;
selected_eq = i;
}
}
- dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
+ dev->eq_tbl[selected_eq].cq_cnt += 1;
mutex_unlock(&dev->dev_lock);
return eq_id;
}
@@ -1319,16 +1358,16 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
mutex_lock(&dev->dev_lock);
for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].q.id != eq_id)
+ if (dev->eq_tbl[i].q.id != eq_id)
continue;
- dev->qp_eq_tbl[i].cq_cnt -= 1;
+ dev->eq_tbl[i].cq_cnt -= 1;
break;
}
mutex_unlock(&dev->dev_lock);
}
int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- int entries, int dpp_cq)
+ int entries, int dpp_cq, u16 pd_id)
{
int status = -ENOMEM; int max_hw_cqe;
struct pci_dev *pdev = dev->nic_info.pdev;
@@ -1336,8 +1375,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
struct ocrdma_create_cq_rsp *rsp;
u32 hw_pages, cqe_size, page_size, cqe_count;
- if (dpp_cq)
- return -EINVAL;
if (entries > dev->attr.max_cqe) {
pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
__func__, dev->id, dev->attr.max_cqe, entries);
@@ -1377,15 +1414,13 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= hw_pages;
cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- if (dev->eq_cnt < 0)
- goto eq_err;
cq->eqn = ocrdma_bind_eq(dev);
- cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
- if (cqe_count > 1024)
+ if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
- else {
+ } else {
u8 count = 0;
switch (cqe_count) {
case 256:
@@ -1416,6 +1451,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->phase_change = true;
}
+ cmd->cmd.pd_id = pd_id; /* valid only for v3 */
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -1427,7 +1463,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return 0;
mbx_err:
ocrdma_unbind_eq(dev, cq->eqn);
-eq_err:
dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
mem_err:
kfree(cmd);
@@ -1524,6 +1559,7 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
return -ENOMEM;
cmd->num_pbl_pdid =
pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
+ cmd->fr_mr = hwmr->fr_mr;
cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
@@ -1678,8 +1714,16 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
}
-int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
- enum ib_qp_state *old_ib_state)
+static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
+{
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+}
+
+int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+ enum ib_qp_state *old_ib_state)
{
unsigned long flags;
int status = 0;
@@ -1696,96 +1740,15 @@ int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
return 1;
}
- switch (qp->state) {
- case OCRDMA_QPS_RST:
- switch (new_state) {
- case OCRDMA_QPS_RST:
- case OCRDMA_QPS_INIT:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_INIT:
- /* qps: INIT->XXX */
- switch (new_state) {
- case OCRDMA_QPS_INIT:
- case OCRDMA_QPS_RTR:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTR:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTS:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_SQD:
- case OCRDMA_QPS_SQE:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQD:
- /* qps: SQD->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_SQE:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQE:
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_ERR:
- /* qps: ERR->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RST:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- default:
- status = -EINVAL;
- break;
- };
- if (!status)
- qp->state = new_state;
+
+ if (new_state == OCRDMA_QPS_INIT) {
+ ocrdma_init_hwq_ptr(qp);
+ ocrdma_del_flush_qp(qp);
+ } else if (new_state == OCRDMA_QPS_ERR) {
+ ocrdma_flush_qp(qp);
+ }
+
+ qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
return status;
@@ -1819,10 +1782,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
u32 max_wqe_allocated;
u32 max_sges = attrs->cap.max_send_sge;
- max_wqe_allocated = attrs->cap.max_send_wr;
- /* need to allocate one extra to for GEN1 family */
- if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
- max_wqe_allocated += 1;
+ /* QP1 may exceed 127 */
+ max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1,
+ dev->attr.max_wqe);
status = ocrdma_build_q_conf(&max_wqe_allocated,
dev->attr.wqe_size, &hw_pages, &hw_page_size);
@@ -1934,6 +1896,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
dma_addr_t pa = 0;
int ird_page_size = dev->attr.ird_page_size;
int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
+ struct ocrdma_hdr_wqe *rqe;
+ int i = 0;
if (dev->attr.ird == 0)
return 0;
@@ -1945,6 +1909,15 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
+ for (; i < ird_q_len / dev->attr.rqe_size; i++) {
+ rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
+ (i * dev->attr.rqe_size));
+ rqe->cw = 0;
+ rqe->cw |= 2;
+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
+ }
return 0;
}
@@ -2057,9 +2030,10 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
qp->rq_cq = cq;
if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
- (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
+ }
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -2108,38 +2082,48 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
struct in6_addr in6;
memcpy(&in6, dgid, sizeof in6);
- if (rdma_is_multicast_addr(&in6))
+ if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, mac_addr);
- else if (rdma_link_local_addr(&in6))
+ } else if (rdma_link_local_addr(&in6)) {
rdma_get_ll_mac(&in6, mac_addr);
- else {
+ } else {
pr_err("%s() fail to resolve mac_addr.\n", __func__);
return -EINVAL;
}
return 0;
}
-static void ocrdma_set_av_params(struct ocrdma_qp *qp,
+static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs)
{
+ int status;
struct ib_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid;
+ union ib_gid sgid, zgid;
u32 vlan_id;
u8 mac_addr[6];
+
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
- return;
+ return -EINVAL;
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
(ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
cmd->params.hop_lmt_rq_psn |=
(ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
- ocrdma_query_gid(&qp->dev->ibdev, 1,
+ status = ocrdma_query_gid(&qp->dev->ibdev, 1,
ah_attr->grh.sgid_index, &sgid);
+ if (status)
+ return status;
+
+ memset(&zgid, 0, sizeof(zgid));
+ if (!memcmp(&sgid, &zgid, sizeof(zgid)))
+ return -EINVAL;
+
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
@@ -2155,6 +2139,7 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
}
+ return 0;
}
static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
@@ -2163,8 +2148,6 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
enum ib_qp_state old_qps)
{
int status = 0;
- struct net_device *netdev = qp->dev->nic_info.netdev;
- int eth_mtu = iboe_get_mtu(netdev->mtu);
if (attr_mask & IB_QP_PKEY_INDEX) {
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2176,9 +2159,11 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->params.qkey = attrs->qkey;
cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
}
- if (attr_mask & IB_QP_AV)
- ocrdma_set_av_params(qp, cmd, attrs);
- else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+ if (attr_mask & IB_QP_AV) {
+ status = ocrdma_set_av_params(qp, cmd, attrs);
+ if (status)
+ return status;
+ } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
/* set the default mac address for UD, GSI QPs */
cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
(qp->dev->nic_info.mac_addr[1] << 8) |
@@ -2199,8 +2184,8 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
}
if (attr_mask & IB_QP_PATH_MTU) {
- if (ib_mtu_enum_to_int(eth_mtu) <
- ib_mtu_enum_to_int(attrs->path_mtu)) {
+ if (attrs->path_mtu < IB_MTU_256 ||
+ attrs->path_mtu > IB_MTU_4096) {
status = -EINVAL;
goto pmtu_err;
}
@@ -2283,10 +2268,12 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
- } else
+ } else {
cmd->params.max_sge_recv_flags |=
(qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
+ }
+
status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
if (status)
goto mbx_err;
@@ -2324,7 +2311,7 @@ mbx_err:
return status;
}
-int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
struct ib_srq_init_attr *srq_attr,
struct ocrdma_pd *pd)
{
@@ -2334,7 +2321,6 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
struct ocrdma_create_srq_rsp *rsp;
struct ocrdma_create_srq *cmd;
dma_addr_t pa;
- struct ocrdma_dev *dev = srq->dev;
struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_rqe_allocated;
@@ -2404,13 +2390,16 @@ int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_modify_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_pd *pd = srq->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->id;
cmd->limit_max_rqe |= srq_attr->srq_limit <<
OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
kfree(cmd);
return status;
}
@@ -2419,11 +2408,13 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_query_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->rq.dbid;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status == 0) {
struct ocrdma_query_srq_rsp *rsp =
(struct ocrdma_query_srq_rsp *)cmd;
@@ -2448,7 +2439,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
if (!cmd)
return status;
cmd->id = srq->id;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (srq->rq.va)
dma_free_coherent(&pdev->dev, srq->rq.len,
srq->rq.va, srq->rq.pa);
@@ -2490,38 +2481,7 @@ int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
return 0;
}
-static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
-{
- int status;
- int irq;
- unsigned long flags = 0;
- int num_eq = 0;
-
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- flags = IRQF_SHARED;
- else {
- num_eq = dev->nic_info.msix.num_vectors -
- dev->nic_info.msix.start_vector;
- /* minimum two vectors/eq are required for rdma to work.
- * one for control path and one for data path.
- */
- if (num_eq < 2)
- return -EBUSY;
- }
-
- status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
- if (status)
- return status;
- sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
- irq = ocrdma_get_irq(dev, &dev->meq);
- status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
- &dev->meq);
- if (status)
- _ocrdma_destroy_eq(dev, &dev->meq);
- return status;
-}
-
-static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
+static int ocrdma_create_eqs(struct ocrdma_dev *dev)
{
int num_eq, i, status = 0;
int irq;
@@ -2532,49 +2492,47 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
num_eq = 1;
flags = IRQF_SHARED;
- } else
+ } else {
num_eq = min_t(u32, num_eq, num_online_cpus());
- dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
- if (!dev->qp_eq_tbl)
+ }
+
+ if (!num_eq)
+ return -EINVAL;
+
+ dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+ if (!dev->eq_tbl)
return -ENOMEM;
for (i = 0; i < num_eq; i++) {
- status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
+ status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
OCRDMA_EQ_LEN);
if (status) {
status = -EINVAL;
break;
}
- sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
+ sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
dev->id, i);
- irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
+ irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
status = request_irq(irq, ocrdma_irq_handler, flags,
- dev->qp_eq_tbl[i].irq_name,
- &dev->qp_eq_tbl[i]);
- if (status) {
- _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
- status = -EINVAL;
- break;
- }
+ dev->eq_tbl[i].irq_name,
+ &dev->eq_tbl[i]);
+ if (status)
+ goto done;
dev->eq_cnt += 1;
}
/* one eq is sufficient for data path to work */
- if (dev->eq_cnt >= 1)
- return 0;
- if (status)
- ocrdma_destroy_qp_eqs(dev);
+ return 0;
+done:
+ ocrdma_destroy_eqs(dev);
return status;
}
int ocrdma_init_hw(struct ocrdma_dev *dev)
{
int status;
- /* set up control path eq */
- status = ocrdma_create_mq_eq(dev);
- if (status)
- return status;
- /* set up data path eq */
- status = ocrdma_create_qp_eqs(dev);
+
+ /* create the eqs */
+ status = ocrdma_create_eqs(dev);
if (status)
goto qpeq_err;
status = ocrdma_create_mq(dev);
@@ -2597,9 +2555,8 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
- ocrdma_destroy_qp_eqs(dev);
+ ocrdma_destroy_eqs(dev);
qpeq_err:
- ocrdma_destroy_eq(dev, &dev->meq);
pr_err("%s() status=%d\n", __func__, status);
return status;
}
@@ -2608,10 +2565,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
{
ocrdma_mbx_delete_ah_tbl(dev);
- /* cleanup the data path eqs */
- ocrdma_destroy_qp_eqs(dev);
+ /* cleanup the eqs */
+ ocrdma_destroy_eqs(dev);
/* cleanup the control path */
ocrdma_destroy_mq(dev);
- ocrdma_destroy_eq(dev, &dev->meq);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index be5db77404db..f2a89d4cc7c4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -78,6 +78,11 @@ static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
#endif
}
+static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
+{
+ return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
+}
+
int ocrdma_init_hw(struct ocrdma_dev *);
void ocrdma_cleanup_hw(struct ocrdma_dev *);
@@ -86,6 +91,7 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
@@ -100,7 +106,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
- int entries, int dpp_cq);
+ int entries, int dpp_cq, u16 pd_id);
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
@@ -112,8 +118,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
-
-int ocrdma_mbx_create_srq(struct ocrdma_srq *,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ib_srq_init_attr *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
@@ -123,7 +128,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
-int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
+int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index ded416f1adea..56e004940f18 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -39,6 +39,7 @@
#include "ocrdma_ah.h"
#include "be_roce.h"
#include "ocrdma_hw.h"
+#include "ocrdma_abi.h"
MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
@@ -265,6 +266,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask =
OCRDMA_UVERBS(GET_CONTEXT) |
OCRDMA_UVERBS(QUERY_DEVICE) |
@@ -326,9 +328,14 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.req_notify_cq = ocrdma_arm_cq;
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
+ dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
+ dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
+ dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
+ dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
+
/* mandatory to support user space verbs consumer. */
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 36b062da2aea..9f9570ec3c2e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -70,6 +70,7 @@ enum {
#define OCRDMA_SUBSYS_COMMON 1
enum {
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1 = 5,
OCRDMA_CMD_CREATE_CQ = 12,
OCRDMA_CMD_CREATE_EQ = 13,
OCRDMA_CMD_CREATE_MQ = 21,
@@ -91,15 +92,15 @@ enum {
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
+#define OCRDMA_MAX_STAG 8192
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
- OCRDMA_DB_GEN2_RQ1_OFFSET = 0x100,
- OCRDMA_DB_GEN2_RQ2_OFFSET = 0xC0,
+ OCRDMA_DB_GEN2_RQ_OFFSET = 0x100,
OCRDMA_DB_SQ_OFFSET = 0x60,
OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0,
OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET,
- OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ1_OFFSET,
+ OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
OCRDMA_DB_CQ_OFFSET = 0x120,
OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
OCRDMA_DB_MQ_OFFSET = 0x140
@@ -143,8 +144,11 @@ enum {
# 2: 16K Bytes
# 3: 32K Bytes
# 4: 64K Bytes
+# 5: 128K Bytes
+# 6: 256K Bytes
+# 7: 512K Bytes
*/
-#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5)
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (8)
#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
#define MAX_OCRDMA_QP_PAGES (8)
@@ -177,7 +181,7 @@ struct ocrdma_mbx_hdr {
u32 timeout; /* in seconds */
u32 cmd_len;
u32 rsvd_version;
-} __packed;
+};
enum {
OCRDMA_MBX_RSP_OPCODE_SHIFT = 0,
@@ -197,7 +201,7 @@ struct ocrdma_mbx_rsp {
u32 status;
u32 rsp_len;
u32 add_rsp_len;
-} __packed;
+};
enum {
OCRDMA_MQE_EMBEDDED = 1,
@@ -208,7 +212,7 @@ struct ocrdma_mqe_sge {
u32 pa_lo;
u32 pa_hi;
u32 len;
-} __packed;
+};
enum {
OCRDMA_MQE_HDR_EMB_SHIFT = 0,
@@ -225,12 +229,12 @@ struct ocrdma_mqe_hdr {
u32 tag_lo;
u32 tag_hi;
u32 rsvd3;
-} __packed;
+};
struct ocrdma_mqe_emb_cmd {
struct ocrdma_mbx_hdr mch;
u8 pyld[220];
-} __packed;
+};
struct ocrdma_mqe {
struct ocrdma_mqe_hdr hdr;
@@ -242,7 +246,7 @@ struct ocrdma_mqe {
u8 cmd[236];
struct ocrdma_mbx_rsp rsp;
} u;
-} __packed;
+};
#define OCRDMA_EQ_LEN 4096
#define OCRDMA_MQ_CQ_LEN 256
@@ -259,12 +263,12 @@ struct ocrdma_mqe {
struct ocrdma_delete_q_req {
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_pa {
u32 lo;
u32 hi;
-} __packed;
+};
#define MAX_OCRDMA_EQ_PAGES (8)
struct ocrdma_create_eq_req {
@@ -275,7 +279,7 @@ struct ocrdma_create_eq_req {
u32 delay;
u32 rsvd;
struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_EQ_VALID = Bit(29),
@@ -310,7 +314,7 @@ struct ocrdma_mcqe {
u32 tag_lo;
u32 tag_hi;
u32 valid_ae_cmpl_cons;
-} __packed;
+};
enum {
OCRDMA_AE_MCQE_QPVALID = Bit(31),
@@ -332,7 +336,21 @@ struct ocrdma_ae_mcqe {
u32 cqvalid_cqid;
u32 evt_tag;
u32 valid_ae_event;
-} __packed;
+};
+
+enum {
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0,
+ OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF,
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16,
+ OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT
+};
+
+struct ocrdma_ae_pvid_mcqe {
+ u32 tag_enabled;
+ u32 event_tag;
+ u32 rsvd1;
+ u32 rsvd2;
+};
enum {
OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16,
@@ -356,7 +374,7 @@ struct ocrdma_ae_mpa_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
enum {
OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0,
@@ -382,9 +400,11 @@ struct ocrdma_ae_qp_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
-#define OCRDMA_ASYNC_EVE_CODE 0x14
+#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
+#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
+#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
@@ -487,7 +507,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
- u64 max_mr_size;
+ u32 max_mr_size_lo;
+ u32 max_mr_size_hi;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
@@ -502,14 +523,14 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
-} __packed;
+};
struct ocrdma_fw_ver_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u8 running_ver[32];
-} __packed;
+};
struct ocrdma_fw_conf_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -535,14 +556,41 @@ struct ocrdma_fw_conf_rsp {
u32 base_eqid;
u32 max_eq;
-} __packed;
+};
enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+struct ocrdma_get_link_speed_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u8 pt_port_num;
+ u8 link_duplex;
+ u8 phys_port_speed;
+ u8 phys_port_fault;
+ u16 rsvd1;
+ u16 qos_lnk_speed;
+ u8 logical_lnk_status;
+ u8 rsvd2[3];
+};
+
+enum {
+ OCRDMA_PHYS_LINK_SPEED_ZERO = 0x0,
+ OCRDMA_PHYS_LINK_SPEED_10MBPS = 0x1,
+ OCRDMA_PHYS_LINK_SPEED_100MBPS = 0x2,
+ OCRDMA_PHYS_LINK_SPEED_1GBPS = 0x3,
+ OCRDMA_PHYS_LINK_SPEED_10GBPS = 0x4,
+ OCRDMA_PHYS_LINK_SPEED_20GBPS = 0x5,
+ OCRDMA_PHYS_LINK_SPEED_25GBPS = 0x6,
+ OCRDMA_PHYS_LINK_SPEED_40GBPS = 0x7,
+ OCRDMA_PHYS_LINK_SPEED_100GBPS = 0x8
+};
+
enum {
OCRDMA_CREATE_CQ_VER2 = 2,
+ OCRDMA_CREATE_CQ_VER3 = 3,
OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
@@ -576,7 +624,8 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
- u32 cqe_count;
+ u16 cqe_count;
+ u16 pd_id;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
@@ -584,7 +633,7 @@ struct ocrdma_create_cq_cmd {
struct ocrdma_create_cq {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd cmd;
-} __packed;
+};
enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
@@ -593,12 +642,12 @@ enum {
struct ocrdma_create_cq_cmd_rsp {
struct ocrdma_mbx_rsp rsp;
u32 cq_id;
-} __packed;
+};
struct ocrdma_create_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22,
@@ -617,12 +666,12 @@ struct ocrdma_create_mq_req {
u32 async_cqid_valid;
u32 rsvd;
struct ocrdma_pa pa[8];
-} __packed;
+};
struct ocrdma_create_mq_rsp {
struct ocrdma_mbx_rsp rsp;
u32 id;
-} __packed;
+};
enum {
OCRDMA_DESTROY_CQ_QID_SHIFT = 0,
@@ -637,12 +686,12 @@ struct ocrdma_destroy_cq {
struct ocrdma_mbx_hdr req;
u32 bypass_flush_qid;
-} __packed;
+};
struct ocrdma_destroy_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_QPT_GSI = 1,
@@ -766,7 +815,7 @@ struct ocrdma_create_qp_req {
u32 dpp_credits_cqid;
u32 rpir_lkey;
struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0,
@@ -820,18 +869,18 @@ struct ocrdma_create_qp_rsp {
u32 max_ord_ird;
u32 sq_rq_id;
u32 dpp_response;
-} __packed;
+};
struct ocrdma_destroy_qp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 qp_id;
-} __packed;
+};
struct ocrdma_destroy_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_ID_SHIFT = 0,
@@ -975,7 +1024,7 @@ struct ocrdma_qp_params {
u32 dmac_b0_to_b3;
u32 vlan_dmac_b4_to_b5;
u32 qkey;
-} __packed;
+};
struct ocrdma_modify_qp {
@@ -986,7 +1035,7 @@ struct ocrdma_modify_qp {
u32 flags;
u32 rdma_flags;
u32 num_outstanding_atomic_rd;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0,
@@ -1007,7 +1056,7 @@ struct ocrdma_modify_qp_rsp {
u32 max_wqe_rqe;
u32 max_ord_ird;
-} __packed;
+};
struct ocrdma_query_qp {
struct ocrdma_mqe_hdr hdr;
@@ -1016,13 +1065,13 @@ struct ocrdma_query_qp {
#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
u32 qp_id;
-} __packed;
+};
struct ocrdma_query_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
struct ocrdma_qp_params params;
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0,
@@ -1051,7 +1100,7 @@ struct ocrdma_create_srq {
u32 max_sge_rqe;
u32 pages_rqe_sz;
struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0,
@@ -1070,7 +1119,7 @@ struct ocrdma_create_srq_rsp {
u32 id;
u32 max_sge_rqe_allocated;
-} __packed;
+};
enum {
OCRDMA_MODIFY_SRQ_ID_SHIFT = 0,
@@ -1089,7 +1138,7 @@ struct ocrdma_modify_srq {
u32 id;
u32 limit_max_rqe;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_ID_SHIFT = 0,
@@ -1101,7 +1150,7 @@ struct ocrdma_query_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0,
@@ -1123,7 +1172,7 @@ struct ocrdma_query_srq_rsp {
u32 max_rqe_pdid;
u32 srq_lmt_max_sge;
-} __packed;
+};
enum {
OCRDMA_DESTROY_SRQ_ID_SHIFT = 0,
@@ -1135,7 +1184,7 @@ struct ocrdma_destroy_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
@@ -1147,7 +1196,7 @@ struct ocrdma_alloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 enable_dpp_rsvd;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_RSP_DPP = Bit(16),
@@ -1159,18 +1208,18 @@ struct ocrdma_alloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 dpp_page_pdid;
-} __packed;
+};
struct ocrdma_dealloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_dealloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_ADDR_CHECK_ENABLE = 1,
@@ -1206,7 +1255,7 @@ struct ocrdma_alloc_lkey {
u32 pdid;
u32 pbl_sz_flags;
-} __packed;
+};
struct ocrdma_alloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -1214,7 +1263,7 @@ struct ocrdma_alloc_lkey_rsp {
u32 lrkey;
u32 num_pbl_rsvd;
-} __packed;
+};
struct ocrdma_dealloc_lkey {
struct ocrdma_mqe_hdr hdr;
@@ -1222,12 +1271,12 @@ struct ocrdma_dealloc_lkey {
u32 lkey;
u32 rsvd_frmr;
-} __packed;
+};
struct ocrdma_dealloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
#define MAX_OCRDMA_NSMR_PBL (u32)22
#define MAX_OCRDMA_PBL_SIZE 65536
@@ -1273,7 +1322,7 @@ struct ocrdma_reg_nsmr {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr cmd;
- u32 lrkey_key_index;
+ u32 fr_mr;
u32 num_pbl_pdid;
u32 flags_hpage_pbe_sz;
u32 totlen_low;
@@ -1283,7 +1332,7 @@ struct ocrdma_reg_nsmr {
u32 va_loaddr;
u32 va_hiaddr;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0,
@@ -1305,12 +1354,12 @@ struct ocrdma_reg_nsmr_cont {
u32 last;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
struct ocrdma_pbe {
u32 pa_hi;
u32 pa_lo;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16,
@@ -1322,7 +1371,7 @@ struct ocrdma_reg_nsmr_rsp {
u32 lrkey;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1342,7 +1391,7 @@ struct ocrdma_reg_nsmr_cont_rsp {
u32 lrkey_key_index;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0,
@@ -1354,7 +1403,7 @@ struct ocrdma_alloc_mw {
struct ocrdma_mbx_hdr req;
u32 pdid;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1366,7 +1415,7 @@ struct ocrdma_alloc_mw_rsp {
struct ocrdma_mbx_rsp rsp;
u32 lrkey_index;
-} __packed;
+};
struct ocrdma_attach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1375,12 +1424,12 @@ struct ocrdma_attach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_attach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
struct ocrdma_detach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1389,12 +1438,12 @@ struct ocrdma_detach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_detach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19,
@@ -1418,24 +1467,24 @@ struct ocrdma_create_ah_tbl {
u32 ah_conf;
struct ocrdma_pa tbl_addr[8];
-} __packed;
+};
struct ocrdma_create_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_EQE_VALID_SHIFT = 0,
@@ -1448,7 +1497,7 @@ enum {
struct ocrdma_eqe {
u32 id_valid;
-} __packed;
+};
enum OCRDMA_CQE_STATUS {
OCRDMA_CQE_SUCCESS = 0,
@@ -1532,14 +1581,14 @@ struct ocrdma_cqe {
} cmn;
};
u32 flags_status_srcqpn; /* w3 */
-} __packed;
+};
struct ocrdma_sge {
u32 addr_hi;
u32 addr_lo;
u32 lrkey;
u32 len;
-} __packed;
+};
enum {
OCRDMA_FLAG_SIG = 0x1,
@@ -1563,6 +1612,7 @@ enum OCRDMA_WQE_OPCODE {
OCRDMA_SEND = 0x00,
OCRDMA_CMP_SWP = 0x14,
OCRDMA_BIND_MW = 0x10,
+ OCRDMA_FR_MR = 0x11,
OCRDMA_RESV1 = 0x0A,
OCRDMA_LKEY_INV = 0x15,
OCRDMA_FETCH_ADD = 0x13,
@@ -1600,14 +1650,26 @@ struct ocrdma_hdr_wqe {
u32 lkey;
};
u32 total_len;
-} __packed;
+};
struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
u32 rsvd;
-} __packed;
+};
+
+/* extended wqe followed by hdr_wqe for Fast Memory register */
+struct ocrdma_ewqe_fr {
+ u32 va_hi;
+ u32 va_lo;
+ u32 fbo_hi;
+ u32 fbo_lo;
+ u32 size_sge;
+ u32 num_sges;
+ u32 rsvd;
+ u32 rsvd2;
+};
struct ocrdma_eth_basic {
u8 dmac[6];
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index dcfbab177faa..6e982bb43c31 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -75,14 +75,15 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->vendor_part_id = dev->nic_info.pdev->device;
attr->hw_ver = 0;
attr->max_qp = dev->attr.max_qp;
- attr->max_ah = dev->attr.max_qp;
+ attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
@@ -96,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
- attr->max_srq = (dev->attr.max_qp - 1);
+ attr->max_srq = dev->attr.max_srq;
attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
@@ -105,6 +106,45 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
return 0;
}
+static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
+ u8 *ib_speed, u8 *ib_width)
+{
+ int status;
+ u8 speed;
+
+ status = ocrdma_mbx_get_link_speed(dev, &speed);
+ if (status)
+ speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
+
+ switch (speed) {
+ case OCRDMA_PHYS_LINK_SPEED_1GBPS:
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_10GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_20GBPS:
+ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_40GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ };
+}
+
+
int ocrdma_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
@@ -141,8 +181,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- props->active_width = IB_WIDTH_1X;
- props->active_speed = 4;
+ get_link_speed_and_width(dev, &props->active_speed,
+ &props->active_width);
props->max_msg_sz = 0x80000000;
props->max_vl_num = 4;
return 0;
@@ -186,7 +226,7 @@ static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
list_del(&mm->entry);
@@ -204,7 +244,7 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
found = true;
@@ -214,6 +254,108 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
return found;
}
+static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ocrdma_pd *pd = NULL;
+ int status = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata && uctx) {
+ pd->dpp_enabled =
+ dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
+ pd->num_dpp_qp =
+ pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ }
+
+retry:
+ status = ocrdma_mbx_alloc_pd(dev, pd);
+ if (status) {
+ if (pd->dpp_enabled) {
+ pd->dpp_enabled = false;
+ pd->num_dpp_qp = 0;
+ goto retry;
+ } else {
+ kfree(pd);
+ return ERR_PTR(status);
+ }
+ }
+
+ return pd;
+}
+
+static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
+ struct ocrdma_pd *pd)
+{
+ return (uctx->cntxt_pd == pd ? true : false);
+}
+
+static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_pd *pd)
+{
+ int status = 0;
+
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ return status;
+}
+
+static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ int status = 0;
+
+ uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(uctx->cntxt_pd)) {
+ status = PTR_ERR(uctx->cntxt_pd);
+ uctx->cntxt_pd = NULL;
+ goto err;
+ }
+
+ uctx->cntxt_pd->uctx = uctx;
+ uctx->cntxt_pd->ibpd.device = &dev->ibdev;
+err:
+ return status;
+}
+
+static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ int status = 0;
+ struct ocrdma_pd *pd = uctx->cntxt_pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ BUG_ON(uctx->pd_in_use);
+ uctx->cntxt_pd = NULL;
+ status = _ocrdma_dealloc_pd(dev, pd);
+ return status;
+}
+
+static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ struct ocrdma_pd *pd = NULL;
+
+ mutex_lock(&uctx->mm_list_lock);
+ if (!uctx->pd_in_use) {
+ uctx->pd_in_use = true;
+ pd = uctx->cntxt_pd;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+
+ return pd;
+}
+
+static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ mutex_lock(&uctx->mm_list_lock);
+ uctx->pd_in_use = false;
+ mutex_unlock(&uctx->mm_list_lock);
+}
+
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@@ -229,7 +371,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
- ctx->dev = dev;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
@@ -242,18 +383,23 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
+ memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = ctx->ah_tbl.pa;
status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
if (status)
goto map_err;
+
+ status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
+ if (status)
+ goto pd_err;
+
resp.dev_id = dev->id;
resp.max_inline_data = dev->attr.max_inline_data;
resp.wqe_size = dev->attr.wqe_size;
resp.rqe_size = dev->attr.rqe_size;
resp.dpp_wqe_size = dev->attr.wqe_size;
- resp.rsvd = 0;
memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -262,6 +408,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
return &ctx->ibucontext;
cpy_err:
+pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err:
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
@@ -272,9 +419,13 @@ map_err:
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
+ int status = 0;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
- struct pci_dev *pdev = uctx->dev->nic_info.pdev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ status = ocrdma_dealloc_ucontext_pd(uctx);
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -285,13 +436,13 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
kfree(mm);
}
kfree(uctx);
- return 0;
+ return status;
}
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
- struct ocrdma_dev *dev = ucontext->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
@@ -307,7 +458,10 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
dev->nic_info.db_total_size)) &&
(len <= dev->nic_info.db_page_size)) {
- /* doorbell mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else if (dev->nic_info.dpp_unmapped_len &&
@@ -315,19 +469,20 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
dev->nic_info.dpp_unmapped_len)) &&
(len <= dev->nic_info.dpp_unmapped_len)) {
- /* dpp area mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else {
- /* queue memory mapping */
status = remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff, len, vma->vm_page_prot);
}
return status;
}
-static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
+static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
@@ -338,21 +493,21 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
struct ocrdma_alloc_pd_uresp rsp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
rsp.dpp_enabled = pd->dpp_enabled;
- db_page_addr = pd->dev->nic_info.unmapped_db +
- (pd->id * pd->dev->nic_info.db_page_size);
- db_page_size = pd->dev->nic_info.db_page_size;
+ db_page_addr = ocrdma_get_db_addr(dev, pd->id);
+ db_page_size = dev->nic_info.db_page_size;
status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
if (status)
return status;
if (pd->dpp_enabled) {
- dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
+ (pd->id * PAGE_SIZE);
status = ocrdma_add_mmap(uctx, dpp_page_addr,
- OCRDMA_DPP_PAGE_SIZE);
+ PAGE_SIZE);
if (status)
goto dpp_map_err;
rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
@@ -368,7 +523,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
ucopy_err:
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
+ ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
dpp_map_err:
ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
return status;
@@ -380,76 +535,75 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
{
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
+ struct ocrdma_ucontext *uctx = NULL;
int status;
+ u8 is_uctx_pd = false;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
- pd->dev = dev;
if (udata && context) {
- pd->dpp_enabled = (dev->nic_info.dev_family ==
- OCRDMA_GEN2_FAMILY) ? true : false;
- pd->num_dpp_qp =
- pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ uctx = get_ocrdma_ucontext(context);
+ pd = ocrdma_get_ucontext_pd(uctx);
+ if (pd) {
+ is_uctx_pd = true;
+ goto pd_mapping;
+ }
}
- status = ocrdma_mbx_alloc_pd(dev, pd);
- if (status) {
- kfree(pd);
- return ERR_PTR(status);
+
+ pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(pd)) {
+ status = PTR_ERR(pd);
+ goto exit;
}
+pd_mapping:
if (udata && context) {
- status = ocrdma_copy_pd_uresp(pd, context, udata);
+ status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
if (status)
goto err;
}
return &pd->ibpd;
err:
- ocrdma_dealloc_pd(&pd->ibpd);
+ if (is_uctx_pd) {
+ ocrdma_release_ucontext_pd(uctx);
+ } else {
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ }
+exit:
return ERR_PTR(status);
}
int ocrdma_dealloc_pd(struct ib_pd *ibpd)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
- int status;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_ucontext *uctx = NULL;
+ int status = 0;
u64 usr_db;
- status = ocrdma_mbx_dealloc_pd(dev, pd);
- if (pd->uctx) {
+ uctx = pd->uctx;
+ if (uctx) {
u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ (pd->id * PAGE_SIZE);
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
- usr_db = dev->nic_info.unmapped_db +
- (pd->id * dev->nic_info.db_page_size);
+ ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
+ usr_db = ocrdma_get_db_addr(dev, pd->id);
ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
+
+ if (is_ucontext_pd(uctx, pd)) {
+ ocrdma_release_ucontext_pd(uctx);
+ return status;
+ }
}
- kfree(pd);
+ status = _ocrdma_dealloc_pd(dev, pd);
return status;
}
-static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
- int acc, u32 num_pbls,
- u32 addr_check)
+static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 pdid, int acc, u32 num_pbls, u32 addr_check)
{
int status;
- struct ocrdma_mr *mr;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
-
- if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
- pr_err("%s(%d) leaving err, invalid access rights\n",
- __func__, dev->id);
- return ERR_PTR(-EINVAL);
- }
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
- mr->hwmr.dev = dev;
mr->hwmr.fr_mr = 0;
mr->hwmr.local_rd = 1;
mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
@@ -459,25 +613,38 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
mr->hwmr.num_pbls = num_pbls;
- status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
- if (status) {
- kfree(mr);
- return ERR_PTR(-ENOMEM);
- }
- mr->pd = pd;
+ status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
+ if (status)
+ return status;
+
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
- return mr;
+ return 0;
}
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
{
+ int status;
struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
- if (IS_ERR(mr))
- return ERR_CAST(mr);
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+ pr_err("%s err, invalid access rights\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
+ OCRDMA_ADDR_CHECK_DISABLE);
+ if (status) {
+ kfree(mr);
+ return ERR_PTR(status);
+ }
return &mr->ibmr;
}
@@ -501,7 +668,8 @@ static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
}
}
-static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
+static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 num_pbes)
{
u32 num_pbls = 0;
u32 idx = 0;
@@ -517,7 +685,7 @@ static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
num_pbls = num_pbls / (pbl_size / sizeof(u64));
idx++;
- } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
+ } while (num_pbls >= dev->attr.max_num_mr_pbl);
mr->hwmr.num_pbes = num_pbes;
mr->hwmr.num_pbls = num_pbls;
@@ -612,13 +780,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 usr_addr, int acc, struct ib_udata *udata)
{
int status = -ENOMEM;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
u32 num_pbes;
pd = get_ocrdma_pd(ibpd);
- dev = pd->dev;
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
return ERR_PTR(-EINVAL);
@@ -626,14 +793,13 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->hwmr.dev = dev;
mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
}
num_pbes = ib_umem_page_count(mr->umem);
- status = ocrdma_get_pbl_info(mr, num_pbes);
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
if (status)
goto umem_err;
@@ -653,7 +819,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
if (status)
goto mbx_err;
- mr->pd = pd;
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
@@ -670,7 +835,7 @@ umem_err:
int ocrdma_dereg_mr(struct ib_mr *ib_mr)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
- struct ocrdma_dev *dev = mr->hwmr.dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
int status;
status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
@@ -685,28 +850,29 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
return status;
}
-static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
+static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ struct ib_udata *udata,
struct ib_ucontext *ib_ctx)
{
int status;
- struct ocrdma_ucontext *uctx;
+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
struct ocrdma_create_cq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
- uresp.page_size = cq->len;
+ uresp.page_size = PAGE_ALIGN(cq->len);
uresp.num_pages = 1;
uresp.max_hw_cqe = cq->max_hw_cqe;
uresp.page_addr[0] = cq->pa;
- uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
- uresp.db_page_size = cq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.phase_change = cq->phase_change ? 1 : 0;
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
pr_err("%s(%d) copy error cqid=0x%x.\n",
- __func__, cq->dev->id, cq->id);
+ __func__, dev->id, cq->id);
goto err;
}
- uctx = get_ocrdma_ucontext(ib_ctx);
status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
if (status)
goto err;
@@ -726,6 +892,8 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
{
struct ocrdma_cq *cq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct ocrdma_ucontext *uctx = NULL;
+ u16 pd_id = 0;
int status;
struct ocrdma_create_cq_ureq ureq;
@@ -742,15 +910,19 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- cq->dev = dev;
- status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
+ if (ib_ctx) {
+ uctx = get_ocrdma_ucontext(ib_ctx);
+ pd_id = uctx->cntxt_pd->id;
+ }
+
+ status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
if (status) {
kfree(cq);
return ERR_PTR(status);
}
if (ib_ctx) {
- status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
+ status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
if (status)
goto ctx_err;
}
@@ -784,13 +956,17 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int pdid = 0;
status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
- ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
- ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
+ pdid = cq->ucontext->cntxt_pd->id;
+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
+ PAGE_ALIGN(cq->len));
+ ocrdma_del_mmap(cq->ucontext,
+ ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
dev->cq_tbl[cq->id] = NULL;
@@ -818,14 +994,17 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_GSI &&
- attrs->qp_type != IB_QPT_RC &&
- attrs->qp_type != IB_QPT_UD) {
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->qp_type != IB_QPT_RC) &&
+ (attrs->qp_type != IB_QPT_UC) &&
+ (attrs->qp_type != IB_QPT_UD)) {
pr_err("%s(%d) unsupported qp type=0x%x requested\n",
__func__, dev->id, attrs->qp_type);
return -EINVAL;
}
- if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
+ /* Skip the check for QP1 to support CM size of 128 */
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
__func__, dev->id, attrs->cap.max_send_wr);
pr_err("%s(%d) supported send_wr=0x%x\n",
@@ -876,11 +1055,9 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
- __func__, dev->id);
+ __func__, dev->id);
return -EINVAL;
}
}
@@ -903,13 +1080,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.qp_id = qp->id;
uresp.sq_dbid = qp->sq.dbid;
uresp.num_sq_pages = 1;
- uresp.sq_page_size = qp->sq.len;
+ uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
uresp.sq_page_addr[0] = qp->sq.pa;
uresp.num_wqe_allocated = qp->sq.max_cnt;
if (!srq) {
uresp.rq_dbid = qp->rq.dbid;
uresp.num_rq_pages = 1;
- uresp.rq_page_size = qp->rq.len;
+ uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
uresp.rq_page_addr[0] = qp->rq.pa;
uresp.num_rqe_allocated = qp->rq.max_cnt;
}
@@ -917,9 +1094,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.db_page_size = dev->nic_info.db_page_size;
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
- uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
- uresp.db_shift = (qp->id < 128) ? 24 : 16;
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = 24;
} else {
uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
@@ -962,8 +1138,7 @@ static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_DB_GEN2_SQ_OFFSET;
qp->rq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
- ((qp->id < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
+ OCRDMA_DB_GEN2_RQ_OFFSET;
} else {
qp->sq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
@@ -1004,6 +1179,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
}
@@ -1024,7 +1200,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
int status;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_qp *qp;
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
@@ -1044,6 +1220,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
}
qp->dev = dev;
ocrdma_set_qp_init_params(qp, pd, attrs);
+ if (udata == NULL)
+ qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
+ OCRDMA_QP_FAST_REG);
mutex_lock(&dev->dev_lock);
status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
@@ -1054,8 +1233,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
/* user space QP's wr_id table are managed in library */
if (udata == NULL) {
- qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
- OCRDMA_QP_FAST_REG);
status = ocrdma_alloc_wr_id_tbl(qp);
if (status)
goto map_err;
@@ -1091,6 +1268,17 @@ gen_err:
return ERR_PTR(status);
}
+
+static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
+{
+ if (qp->db_cache) {
+ u32 val = qp->rq.dbid | (qp->db_cache <<
+ ocrdma_get_num_posted_shift(qp));
+ iowrite32(val, qp->rq_db);
+ qp->db_cache = 0;
+ }
+}
+
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
@@ -1102,13 +1290,16 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
if (attr_mask & IB_QP_STATE)
- status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
+ status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
/* if new and previous states are same hw doesn't need to
* know about it.
*/
if (status < 0)
return status;
status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+ if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
+ ocrdma_flush_rq_db(qp);
+
return status;
}
@@ -1213,7 +1404,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
sizeof(params.dgid));
@@ -1276,23 +1467,17 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
{
- int free_cnt;
- if (q->head >= q->tail)
- free_cnt = (q->max_cnt - q->head) + q->tail;
- else
- free_cnt = q->tail - q->head;
- return free_cnt;
+ return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
}
static int is_hw_sq_empty(struct ocrdma_qp *qp)
{
- return (qp->sq.tail == qp->sq.head &&
- ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
+ return (qp->sq.tail == qp->sq.head);
}
static int is_hw_rq_empty(struct ocrdma_qp *qp)
{
- return (qp->rq.tail == qp->rq.head) ? 1 : 0;
+ return (qp->rq.tail == qp->rq.head);
}
static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
@@ -1358,17 +1543,18 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
*/
discard_cnt += 1;
cqe->cmn.qpn = 0;
- if (is_cqe_for_sq(cqe))
+ if (is_cqe_for_sq(cqe)) {
ocrdma_hwq_inc_tail(&qp->sq);
- else {
+ } else {
if (qp->srq) {
spin_lock_irqsave(&qp->srq->q_lock, flags);
ocrdma_hwq_inc_tail(&qp->srq->rq);
ocrdma_srq_toggle_bit(qp->srq, cur_getp);
spin_unlock_irqrestore(&qp->srq->q_lock, flags);
- } else
+ } else {
ocrdma_hwq_inc_tail(&qp->rq);
+ }
}
skip_cqe:
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
@@ -1376,7 +1562,7 @@ skip_cqe:
spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
}
-static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
{
int found = false;
unsigned long flags;
@@ -1442,9 +1628,11 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
mutex_unlock(&dev->dev_lock);
if (pd->uctx) {
- ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
+ PAGE_ALIGN(qp->sq.len));
if (!qp->srq)
- ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
+ PAGE_ALIGN(qp->rq.len));
}
ocrdma_del_flush_qp(qp);
@@ -1455,21 +1643,23 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
return status;
}
-static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
+static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ struct ib_udata *udata)
{
int status;
struct ocrdma_create_srq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1;
uresp.rq_page_addr[0] = srq->rq.pa;
uresp.rq_page_size = srq->rq.len;
- uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
- (srq->pd->id * srq->dev->nic_info.db_page_size);
- uresp.db_page_size = srq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = dev->nic_info.unmapped_db +
+ (srq->pd->id * dev->nic_info.db_page_size);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
uresp.db_shift = 24;
} else {
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
@@ -1492,7 +1682,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
{
int status = -ENOMEM;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_srq *srq;
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
@@ -1505,10 +1695,9 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
return ERR_PTR(status);
spin_lock_init(&srq->q_lock);
- srq->dev = dev;
srq->pd = pd;
srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
- status = ocrdma_mbx_create_srq(srq, init_attr, pd);
+ status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
if (status)
goto err;
@@ -1535,7 +1724,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
}
if (udata) {
- status = ocrdma_copy_srq_uresp(srq, udata);
+ status = ocrdma_copy_srq_uresp(dev, srq, udata);
if (status)
goto arm_err;
}
@@ -1581,15 +1770,15 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
{
int status;
struct ocrdma_srq *srq;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
srq = get_ocrdma_srq(ibsrq);
- dev = srq->dev;
status = ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
- ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
+ ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
+ PAGE_ALIGN(srq->rq.len));
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
@@ -1631,23 +1820,43 @@ static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
memset(sge, 0, sizeof(*sge));
}
+static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
+{
+ uint32_t total_len = 0, i;
+
+ for (i = 0; i < num_sge; i++)
+ total_len += sg_list[i].length;
+ return total_len;
+}
+
+
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
struct ib_send_wr *wr, u32 wqe_size)
{
- if (wr->send_flags & IB_SEND_INLINE) {
- if (wr->sg_list[0].length > qp->max_inline_data) {
+ int i;
+ char *dpp_addr;
+
+ if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
+ hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
+ if (unlikely(hdr->total_len > qp->max_inline_data)) {
pr_err("%s() supported_len=0x%x,\n"
" unspported len req=0x%x\n", __func__,
- qp->max_inline_data, wr->sg_list[0].length);
+ qp->max_inline_data, hdr->total_len);
return -EINVAL;
}
- memcpy(sge,
- (void *)(unsigned long)wr->sg_list[0].addr,
- wr->sg_list[0].length);
- hdr->total_len = wr->sg_list[0].length;
+ dpp_addr = (char *)sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dpp_addr,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ dpp_addr += wr->sg_list[i].length;
+ }
+
wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
+ if (0 == hdr->total_len)
+ wqe_size += sizeof(struct ocrdma_sge);
hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
} else {
ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
@@ -1672,8 +1881,9 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ocrdma_build_ud_hdr(qp, hdr, wr);
sge = (struct ocrdma_sge *)(hdr + 2);
wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
- } else
+ } else {
sge = (struct ocrdma_sge *)(hdr + 1);
+ }
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
return status;
@@ -1716,6 +1926,96 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ext_rw->len = hdr->total_len;
}
+static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ buf_addr = wr->wr.fast_reg.page_list->page_list[i];
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
+ num_pbes += 1;
+ pbe++;
+
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ }
+ }
+ return;
+}
+
+static int get_encoded_page_size(int pg_sz)
+{
+ /* Max size is 256M 4096 << 16 */
+ int i = 0;
+ for (; i < 17; i++)
+ if (pg_sz == (4096 << i))
+ break;
+ return i;
+}
+
+
+static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ struct ib_send_wr *wr)
+{
+ u64 fbo;
+ struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
+ struct ocrdma_mr *mr;
+ u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
+
+ wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
+
+ if ((wr->wr.fast_reg.page_list_len >
+ qp->dev->attr.max_pages_per_frmr) ||
+ (wr->wr.fast_reg.length > 0xffffffffULL))
+ return -EINVAL;
+
+ hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+
+ if (wr->wr.fast_reg.page_list_len == 0)
+ BUG();
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
+ hdr->lkey = wr->wr.fast_reg.rkey;
+ hdr->total_len = wr->wr.fast_reg.length;
+
+ fbo = wr->wr.fast_reg.iova_start -
+ (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
+
+ fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
+ fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
+ fast_reg->fbo_hi = upper_32_bits(fbo);
+ fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
+ fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
+ fast_reg->size_sge =
+ get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
+ mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) &
+ (OCRDMA_MAX_STAG - 1)];
+ build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
+ return 0;
+}
+
static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
{
u32 val = qp->sq.dbid | (1 << 16);
@@ -1747,7 +2047,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
hdr = ocrdma_hwq_head(&qp->sq);
hdr->cw = 0;
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
if (wr->send_flags & IB_SEND_FENCE)
hdr->cw |=
@@ -1785,10 +2085,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_LOCAL_INV:
hdr->cw |=
(OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
- hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
+ hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge)) /
OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
hdr->lkey = wr->ex.invalidate_rkey;
break;
+ case IB_WR_FAST_REG_MR:
+ status = ocrdma_build_fr(qp, hdr, wr);
+ break;
default:
status = -EINVAL;
break;
@@ -1797,7 +2101,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
else
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
@@ -1821,7 +2125,10 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
- iowrite32(val, qp->rq_db);
+ if (qp->state != OCRDMA_QPS_INIT)
+ iowrite32(val, qp->rq_db);
+ else
+ qp->db_cache++;
}
static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
@@ -1955,7 +2262,7 @@ int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
{
- enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
+ enum ib_wc_status ibwc_status;
switch (status) {
case OCRDMA_CQE_GENERAL_ERR:
@@ -2052,6 +2359,9 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
case OCRDMA_SEND:
ibwc->opcode = IB_WC_SEND;
break;
+ case OCRDMA_FR_MR:
+ ibwc->opcode = IB_WC_FAST_REG_MR;
+ break;
case OCRDMA_LKEY_INV:
ibwc->opcode = IB_WC_LOCAL_INV;
break;
@@ -2105,7 +2415,7 @@ static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
ibwc->status = ocrdma_to_ibwc_err(status);
ocrdma_flush_qp(qp);
- ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
+ ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
/* if wqe/rqe pending for which cqe needs to be returned,
* trigger inflating it.
@@ -2190,7 +2500,8 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ocrdma_update_wc(qp, ibwc, tail);
*polled = true;
}
- wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
+ wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
+ OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
if (tail != wqe_idx)
expand = true; /* Coalesced CQE can't be consumed yet */
@@ -2239,7 +2550,8 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
u32 wqe_idx;
srq = get_ocrdma_srq(qp->ibqp.srq);
- wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
spin_lock_irqsave(&srq->q_lock, flags);
ocrdma_srq_toggle_bit(srq, wqe_idx);
@@ -2296,9 +2608,9 @@ static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
- if (qp->ibqp.srq)
+ if (qp->ibqp.srq) {
ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
- else {
+ } else {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
}
@@ -2311,13 +2623,14 @@ static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
bool expand = false;
ibwc->wc_flags = 0;
- if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >>
OCRDMA_CQE_UD_STATUS_SHIFT;
- else
+ } else {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+ }
if (status == OCRDMA_CQE_SUCCESS) {
*polled = true;
@@ -2335,9 +2648,10 @@ static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
if (cq->phase_change) {
if (cur_getp == 0)
cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
- } else
+ } else {
/* clear valid bit */
cqe->flags_status_srcqpn = 0;
+ }
}
static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
@@ -2348,7 +2662,7 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
bool expand = false;
int polled_hw_cqes = 0;
struct ocrdma_qp *qp = NULL;
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
struct ocrdma_cqe *cqe;
u16 cur_getp; bool polled = false; bool stop = false;
@@ -2414,8 +2728,9 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
- } else
+ } else {
return err_cqes;
+ }
ibwc->byte_len = 0;
ibwc->status = IB_WC_WR_FLUSH_ERR;
ibwc = ibwc + 1;
@@ -2428,14 +2743,11 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
int cqes_to_poll = num_entries;
- struct ocrdma_cq *cq = NULL;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
int num_os_cqe = 0, err_cqes = 0;
struct ocrdma_qp *qp;
-
- cq = get_ocrdma_cq(ibcq);
- dev = cq->dev;
+ unsigned long flags;
/* poll cqes from adapter CQ */
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -2466,16 +2778,14 @@ int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
{
- struct ocrdma_cq *cq;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
u16 cq_id;
u16 cur_getp;
struct ocrdma_cqe *cqe;
+ unsigned long flags;
- cq = get_ocrdma_cq(ibcq);
cq_id = cq->id;
- dev = cq->dev;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
@@ -2497,3 +2807,226 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0;
}
+
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ int status;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (max_page_list_len > dev->attr.max_pages_per_frmr)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
+ if (status)
+ goto pbl_err;
+ mr->hwmr.fr_mr = 1;
+ mr->hwmr.remote_rd = 0;
+ mr->hwmr.remote_wr = 0;
+ mr->hwmr.local_rd = 0;
+ mr->hwmr.local_wr = 0;
+ mr->hwmr.mw_bind = 0;
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
+ if (status)
+ goto mbx_err;
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
+ return &mr->ibmr;
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(-ENOMEM);
+}
+
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len)
+{
+ struct ib_fast_reg_page_list *frmr_list;
+ int size;
+
+ size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
+ frmr_list = kzalloc(size, GFP_KERNEL);
+ if (!frmr_list)
+ return ERR_PTR(-ENOMEM);
+ frmr_list->page_list = (u64 *)(frmr_list + 1);
+ return frmr_list;
+}
+
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
+{
+ kfree(page_list);
+}
+
+#define MAX_KERNEL_PBE_SIZE 65536
+static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
+ int buf_cnt, u32 *pbe_size)
+{
+ u64 total_size = 0;
+ u64 buf_size = 0;
+ int i;
+ *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
+ *pbe_size = roundup_pow_of_two(*pbe_size);
+
+ /* find the smallest PBE size that we can have */
+ for (i = 0; i < buf_cnt; i++) {
+ /* first addr may not be page aligned, so ignore checking */
+ if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
+ (buf_list[i].size & ~PAGE_MASK))) {
+ return 0;
+ }
+
+ /* if configured PBE size is greater then the chosen one,
+ * reduce the PBE size.
+ */
+ buf_size = roundup(buf_list[i].size, PAGE_SIZE);
+ /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
+ buf_size = roundup_pow_of_two(buf_size);
+ if (*pbe_size > buf_size)
+ *pbe_size = buf_size;
+
+ total_size += buf_size;
+ }
+ *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
+ (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
+
+ /* num_pbes = total_size / (*pbe_size); this is implemented below. */
+
+ return total_size >> ilog2(*pbe_size);
+}
+
+static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
+ u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ int idx;
+ int pbes_per_buf = 0;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+ int total_num_pbes = 0;
+
+ if (!hwmr->num_pbes)
+ return;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < ib_buf_cnt; i++) {
+ buf_addr = buf_list[i].addr;
+ pbes_per_buf =
+ roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
+ pbe_size;
+ hwmr->len += buf_list[i].size;
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ for (idx = 0; idx < pbes_per_buf; idx++) {
+ /* we program always page aligned addresses,
+ * first unaligned address is taken care by fbo.
+ */
+ if (i == 0) {
+ /* for non zero fbo, assign the
+ * start of the page.
+ */
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ } else {
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & 0xffffffff));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ }
+ buf_addr += pbe_size;
+ num_pbes += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ if (total_num_pbes == hwmr->num_pbes)
+ goto mr_tbl_done;
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+ }
+ }
+ }
+mr_tbl_done:
+ return;
+}
+
+struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
+ struct ib_phys_buf *buf_list,
+ int buf_cnt, int acc, u64 *iova_start)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ u32 num_pbes;
+ u32 pbe_size = 0;
+
+ if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(status);
+
+ num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
+ if (num_pbes == 0) {
+ status = -EINVAL;
+ goto pbl_err;
+ }
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
+ if (status)
+ goto pbl_err;
+
+ mr->hwmr.pbe_size = pbe_size;
+ mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
+ mr->hwmr.va = *iova_start;
+ mr->hwmr.local_rd = 1;
+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
+ &mr->hwmr);
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
+ if (status)
+ goto mbx_err;
+
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ return &mr->ibmr;
+
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(status);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 633f03d80274..b8f7853fd36c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -72,6 +72,7 @@ int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *);
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
struct ib_udata *);
@@ -89,5 +90,10 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len);
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* __OCRDMA_VERBS_H__ */