aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qedr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig2
-rw-r--r--drivers/infiniband/hw/qedr/main.c110
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h41
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c26
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c17
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h5
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c726
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h31
8 files changed, 564 insertions, 394 deletions
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 9c30325e1414..9e31d13b03c3 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -6,6 +6,6 @@ config INFINIBAND_QEDR
select QED_LL2
select QED_OOO
select QED_RDMA
- ---help---
+ help
This driver provides low-level InfiniBand over Ethernet
support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dcdc85a1ab25..ba0c3e4c07d8 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define QEDR_WQ_MULTIPLIER_DFT (3)
-static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event ibev;
@@ -66,7 +66,7 @@ static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
}
static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -81,7 +81,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
-static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -100,7 +100,7 @@ static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -110,7 +110,6 @@ static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
if (err)
return err;
- immutable->pkey_tbl_len = 1;
immutable->gid_tbl_len = 1;
immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
immutable->max_mad_size = 0;
@@ -125,7 +124,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
+ return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -135,10 +134,9 @@ static ssize_t hca_type_show(struct device *device,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
- dev->pdev->device,
- rdma_protocol_iwarp(&dev->ibdev, 1) ?
- "iWARP" : "RoCE");
+ return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
+ "RoCE");
}
static DEVICE_ATTR_RO(hca_type);
@@ -178,7 +176,10 @@ static int qedr_iw_register_device(struct qedr_dev *dev)
}
static const struct ib_device_ops qedr_roce_dev_ops = {
+ .alloc_xrcd = qedr_alloc_xrcd,
+ .dealloc_xrcd = qedr_dealloc_xrcd,
.get_port_immutable = qedr_roce_port_immutable,
+ .query_pkey = qedr_query_pkey,
};
static void qedr_roce_register_device(struct qedr_dev *dev)
@@ -207,6 +208,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.destroy_cq = qedr_destroy_cq,
.destroy_qp = qedr_destroy_qp,
.destroy_srq = qedr_destroy_srq,
+ .device_group = &qedr_attr_group,
.get_dev_fw_str = qedr_get_dev_fw_str,
.get_dma_mr = qedr_get_dma_mr,
.get_link_layer = qedr_link_layer,
@@ -221,18 +223,18 @@ static const struct ib_device_ops qedr_dev_ops = {
.post_srq_recv = qedr_post_srq_recv,
.process_mad = qedr_process_mad,
.query_device = qedr_query_device,
- .query_pkey = qedr_query_pkey,
.query_port = qedr_query_port,
.query_qp = qedr_query_qp,
.query_srq = qedr_query_srq,
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
- .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
};
@@ -243,31 +245,6 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
- dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
- QEDR_UVERBS(QUERY_DEVICE) |
- QEDR_UVERBS(QUERY_PORT) |
- QEDR_UVERBS(ALLOC_PD) |
- QEDR_UVERBS(DEALLOC_PD) |
- QEDR_UVERBS(CREATE_COMP_CHANNEL) |
- QEDR_UVERBS(CREATE_CQ) |
- QEDR_UVERBS(RESIZE_CQ) |
- QEDR_UVERBS(DESTROY_CQ) |
- QEDR_UVERBS(REQ_NOTIFY_CQ) |
- QEDR_UVERBS(CREATE_QP) |
- QEDR_UVERBS(MODIFY_QP) |
- QEDR_UVERBS(QUERY_QP) |
- QEDR_UVERBS(DESTROY_QP) |
- QEDR_UVERBS(CREATE_SRQ) |
- QEDR_UVERBS(DESTROY_SRQ) |
- QEDR_UVERBS(QUERY_SRQ) |
- QEDR_UVERBS(MODIFY_SRQ) |
- QEDR_UVERBS(POST_SRQ_RECV) |
- QEDR_UVERBS(REG_MR) |
- QEDR_UVERBS(DEREG_MR) |
- QEDR_UVERBS(POLL_CQ) |
- QEDR_UVERBS(POST_SEND) |
- QEDR_UVERBS(POST_RECV);
-
if (IS_IWARP(dev)) {
rc = qedr_iw_register_device(dev);
if (rc)
@@ -280,21 +257,21 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.num_comp_vectors = dev->num_cnq;
dev->ibdev.dev.parent = &dev->pdev->dev;
- rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
if (rc)
return rc;
- return ib_register_device(&dev->ibdev, "qedr%d");
+ dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
}
/* This function allocates fast-path status block memory */
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -346,9 +323,14 @@ static void qedr_free_resources(struct qedr_dev *dev)
static int qedr_alloc_resources(struct qedr_dev *dev)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct regpair *),
+ };
struct qedr_cnq *cnq;
__le16 *cons_pi;
- u16 n_entries;
int i, rc;
dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
@@ -362,6 +344,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (IS_IWARP(dev)) {
xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ if (!dev->iwarp_wq) {
+ rc = -ENOMEM;
+ goto err1;
+ }
}
/* Allocate Status blocks for CNQ */
@@ -369,7 +355,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
GFP_KERNEL);
if (!dev->sb_array) {
rc = -ENOMEM;
- goto err1;
+ goto err_destroy_wq;
}
dev->cnq_array = kcalloc(dev->num_cnq,
@@ -382,7 +368,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
/* Allocate CNQ PBLs */
- n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
+ QEDR_ROCE_MAX_CNQ_SIZE);
+
for (i = 0; i < dev->num_cnq; i++) {
cnq = &dev->cnq_array[i];
@@ -391,13 +379,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (rc)
goto err3;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- n_entries,
- sizeof(struct regpair *),
- &cnq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
+ &params);
if (rc)
goto err4;
@@ -423,6 +406,9 @@ err3:
kfree(dev->cnq_array);
err2:
kfree(dev->sb_array);
+err_destroy_wq:
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
err1:
kfree(dev->sgid_tbl);
return rc;
@@ -521,7 +507,6 @@ static void qedr_sync_free_irqs(struct qedr_dev *dev)
if (dev->int_info.msix_cnt) {
idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
vector = dev->int_info.msix[idx].vector;
- synchronize_irq(vector);
free_irq(vector, &dev->cnq_array[i]);
}
}
@@ -601,7 +586,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
/* Part 2 - check capabilities */
- page_size = ~dev->attr.page_size_caps + 1;
+ page_size = ~qed_attr->page_size_caps + 1;
if (page_size > PAGE_SIZE) {
DP_ERR(dev,
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
@@ -632,7 +617,6 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
attr->max_mr_size = qed_attr->max_mr_size;
attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
attr->max_mw = qed_attr->max_mw;
- attr->max_fmr = qed_attr->max_fmr;
attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
attr->max_pd = qed_attr->max_pd;
@@ -705,6 +689,18 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
event.event = IB_EVENT_SRQ_ERR;
event_type = EVENT_TYPE_SRQ;
break;
+ case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
default:
DP_ERR(dev, "unsupported event %d on handle=%llx\n",
e_code, roce_handle64);
@@ -776,6 +772,7 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
xa_unlock_irqrestore(&dev->srqs, flags);
DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
+ break;
default:
break;
}
@@ -1026,6 +1023,13 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
case QEDE_CHANGE_ADDR:
qedr_mac_address_change(dev);
break;
+ case QEDE_CHANGE_MTU:
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (dev->ndev->mtu != dev->iwarp_max_mtu)
+ DP_NOTICE(dev,
+ "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
+ dev->iwarp_max_mtu, dev->ndev->mtu);
+ break;
default:
pr_err("Event not supported\n");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 5488dbd59d3c..db9ef3e1eb97 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -103,7 +103,6 @@ struct qedr_device_attr {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
@@ -236,6 +235,7 @@ struct qedr_ucontext {
u32 dpi_size;
u16 dpi;
bool db_rec;
+ u8 edpm_mode;
};
union db_prod32 {
@@ -310,6 +310,11 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
+struct qedr_xrcd {
+ struct ib_xrcd ibxrcd;
+ u16 xrcd_id;
+};
+
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -345,10 +350,10 @@ struct qedr_srq_hwq_info {
u32 wqe_prod;
u32 sge_prod;
u32 wr_prod_cnt;
- u32 wr_cons_cnt;
+ atomic_t wr_cons_cnt;
u32 num_elems;
- u32 *virt_prod_pair_addr;
+ struct rdma_srq_producers *virt_prod_pair_addr;
dma_addr_t phy_prod_pair_addr;
};
@@ -361,6 +366,7 @@ struct qedr_srq {
struct ib_umem *prod_umem;
u16 srq_id;
u32 srq_limit;
+ bool is_xrc;
/* lock to protect srq recv post */
spinlock_t lock;
};
@@ -412,6 +418,7 @@ struct qedr_qp {
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
+ u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;
@@ -449,6 +456,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
+ struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
@@ -573,6 +581,11 @@ static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct qedr_pd, ibpd);
}
+static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct qedr_xrcd, ibxrcd);
+}
+
static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct qedr_cq, ibcq);
@@ -598,6 +611,28 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
+{
+ return qp->srq;
+}
+
+static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
+ return false;
+
+ return true;
+}
+
+static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
+ return false;
+
+ return true;
+}
+
static inline struct qedr_user_mmap_entry *
get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 792eecd206b6..a51fc6854984 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
- kfree(qp);
+ complete(&qp->qp_rel_comp);
}
static void
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
@@ -627,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
- &qp->iwarp_cm_flags))
+ &qp->iwarp_cm_flags)) {
+ rc = -ENODEV;
goto err; /* QP already being destroyed */
+ }
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
if (rc) {
@@ -718,6 +729,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle);
cm_id->rem_ref(cm_id);
+ kfree(listener);
return rc;
}
@@ -727,7 +739,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc = 0;
+ int rc;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
@@ -750,8 +762,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ord = conn_param->ord;
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
- &qp->iwarp_cm_flags))
+ &qp->iwarp_cm_flags)) {
+ rc = -EINVAL;
goto err; /* QP already destroyed */
+ }
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
if (rc) {
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index f5542d703ef9..05307c1488b8 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -319,20 +319,19 @@ err1:
return rc;
}
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs,
- struct qedr_qp *qp)
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
{
int rc;
rc = qedr_check_gsi_qp_attrs(dev, attrs);
if (rc)
- return ERR_PTR(rc);
+ return rc;
rc = qedr_ll2_start(dev, attrs, qp);
if (rc) {
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
- return ERR_PTR(rc);
+ return rc;
}
/* create QP */
@@ -359,7 +358,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
- return &qp->ibqp;
+ return 0;
err:
kfree(qp->rqe_wr_id);
@@ -368,7 +367,7 @@ err:
if (rc)
DP_ERR(dev, "create gsi qp: failed destroy on create\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
int qedr_destroy_gsi_qp(struct qedr_dev *dev)
@@ -586,8 +585,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qedr_inc_sw_prod(&qp->sq);
DP_DEBUG(qp->dev, QEDR_MSG_GSI,
- "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
- wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
+ wr->wr_id);
} else {
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
rc = -EAGAIN;
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index d46dcd3f6424..f3432f035ec6 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
@@ -50,9 +50,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs,
- struct qedr_qp *qp);
+int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
int qedr_destroy_gsi_qp(struct qedr_dev *dev);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 484b555150e0..d745ce9dc88a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -72,7 +72,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
return ib_copy_to_udata(udata, src, min_len);
}
-int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
@@ -81,7 +81,7 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *sgid)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -134,8 +134,11 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ attr->device_cap_flags |= IB_DEVICE_XRC;
attr->max_send_sge = qattr->max_sge;
attr->max_recv_sge = qattr->max_sge;
attr->max_sge_rd = qattr->max_sge;
@@ -145,8 +148,6 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_mw = qattr->max_mw;
attr->max_pd = qattr->max_pd;
attr->atomic_cap = dev->atomic_cap;
- attr->max_fmr = qattr->max_fmr;
- attr->max_map_per_fmr = 16;
attr->max_qp_init_rd_atom =
1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
attr->max_qp_rd_atom =
@@ -159,13 +160,13 @@ int qedr_query_device(struct ib_device *ibdev,
attr->local_ca_ack_delay = qattr->dev_ack_delay;
attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
- attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_pkeys = qattr->max_pkey;
attr->max_ah = qattr->max_ah;
return 0;
}
-static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
u8 *ib_width)
{
switch (speed) {
@@ -210,7 +211,8 @@ static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
}
}
-int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
{
struct qedr_dev *dev;
struct qed_rdma_port *rdma_port;
@@ -233,16 +235,16 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
attr->max_mtu = IB_MTU_4096;
- attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
attr->lid = 0;
attr->lmc = 0;
attr->sm_lid = 0;
attr->sm_sl = 0;
attr->ip_gids = true;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
attr->gid_tbl_len = 1;
- attr->pkey_tbl_len = 1;
} else {
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
attr->gid_tbl_len = QEDR_MAX_SGID;
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
}
@@ -277,7 +279,8 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
DP_ERR(dev, "Problem copying data from user space\n");
return -EFAULT;
}
-
+ ctx->edpm_mode = !!(ureq.context_flags &
+ QEDR_ALLOC_UCTX_EDPM_MODE);
ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
}
@@ -318,11 +321,15 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
else
uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
- QEDR_DPM_TYPE_ROCE_LEGACY;
+ QEDR_DPM_TYPE_ROCE_LEGACY |
+ QEDR_DPM_TYPE_ROCE_EDPM_MODE;
- uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
- uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
- uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
+ if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
+ uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
+ uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
+ uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
+ uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
+ }
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
@@ -469,15 +476,33 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+ return 0;
}
+
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
+
+ return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
+}
+
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
+
+ dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
+ return 0;
+}
static void qedr_free_pbl(struct qedr_dev *dev,
struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
{
@@ -598,11 +623,9 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl_info *pbl_info, u32 pg_shift)
{
int pbe_cnt, total_num_pbes = 0;
- u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
struct regpair *pbe;
- u64 pg_addr;
if (!pbl_info->num_pbes)
return;
@@ -623,32 +646,25 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0;
- fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
-
- for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- pg_addr = sg_page_iter_dma_address(&sg_iter);
- for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
- pbe->lo = cpu_to_le32(pg_addr);
- pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+ rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
+ u64 pg_addr = rdma_block_iter_dma_address(&biter);
- pg_addr += BIT(pg_shift);
- pbe_cnt++;
- total_num_pbes++;
- pbe++;
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
- if (total_num_pbes == pbl_info->num_pbes)
- return;
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
- /* If the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct regpair *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
- fw_pg_cnt++;
+ /* If the given pbl is full storing the pbes, move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
}
}
}
@@ -790,9 +806,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
return PTR_ERR(q->umem);
}
- fw_pages = ib_umem_page_count(q->umem) <<
- (PAGE_SHIFT - FW_PAGE_SHIFT);
-
+ fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc)
goto err0;
@@ -893,6 +907,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
udata, struct qedr_ucontext, ibucontext);
struct qed_rdma_destroy_cq_out_params destroy_oparams;
struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qed_chain_init_params chain_params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = sizeof(union rdma_cqe),
+ };
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_create_cq_in_params params;
struct qedr_create_cq_ureq ureq = {};
@@ -910,6 +930,9 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
"create_cq: called from %s. entries=%d, vector=%d\n",
udata ? "User Lib" : "Kernel", entries, vector);
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (entries > QEDR_MAX_CQES) {
DP_ERR(dev,
"create cq: the number of entries %d is too high. Must be equal or below %d.\n",
@@ -919,6 +942,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ chain_params.num_elems = chain_entries;
/* calc db offset. user will add DPI base, kernel will add db addr */
db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
@@ -953,13 +977,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- chain_entries,
- sizeof(union rdma_cqe),
- &cq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
+ &chain_params);
if (rc)
goto err0;
@@ -995,7 +1014,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/* Generate doorbell address. */
cq->db.data.icid = cq->icid;
cq->db_addr = dev->db_addr + db_offset;
- cq->db.data.params = DB_AGG_CMD_SET <<
+ cq->db.data.params = DB_AGG_CMD_MAX <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
/* point to the very last element, passing it we will toggle */
@@ -1034,20 +1053,10 @@ err0:
return -EINVAL;
}
-int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
-{
- struct qedr_dev *dev = get_qedr_dev(ibcq->device);
- struct qedr_cq *cq = get_qedr_cq(ibcq);
-
- DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
-
- return 0;
-}
-
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qed_rdma_destroy_cq_out_params oparams;
@@ -1062,7 +1071,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
- return;
+ return 0;
}
iparams.icid = cq->icid;
@@ -1110,6 +1119,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
* Since the destroy CQ ramrod has also been received on the EQ we can
* be certain that there's no event handler in process.
*/
+ return 0;
}
static inline int get_gid_info_from_table(struct ib_qp *ibqp,
@@ -1142,7 +1152,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
SET_FIELD(qp_params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
break;
- case RDMA_NETWORK_IB:
+ case RDMA_NETWORK_ROCE_V1:
memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
@@ -1162,6 +1172,8 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
qp_params->roce_mode = ROCE_V2_IPV4;
break;
+ default:
+ return -EINVAL;
}
for (i = 0; i < 4; i++) {
@@ -1182,11 +1194,14 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
struct qedr_device_attr *qattr = &dev->attr;
/* QP0... attrs->qp_type == IB_QPT_GSI */
- if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ if (attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_GSI &&
+ attrs->qp_type != IB_QPT_XRC_INI &&
+ attrs->qp_type != IB_QPT_XRC_TGT) {
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: unsupported qp type=0x%x requested\n",
attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (attrs->cap.max_send_wr > qattr->max_sqe) {
@@ -1217,12 +1232,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return -EINVAL;
}
- /* Unprivileged user space cannot create special QP */
- if (udata && attrs->qp_type == IB_QPT_GSI) {
- DP_ERR(dev,
- "create qp: userspace can't create special QPs of type=0x%x\n",
- attrs->qp_type);
- return -EINVAL;
+ /* verify consumer QPs are not trying to use GSI QP's CQ.
+ * TGT QP isn't associated with RQ/SQ
+ */
+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
+ (attrs->qp_type != IB_QPT_XRC_TGT) &&
+ (attrs->qp_type != IB_QPT_XRC_INI)) {
+ struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
+ struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
+
+ if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
+ (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
+ DP_ERR(dev,
+ "create qp: consumer QP cannot use GSI CQs.\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -1244,8 +1268,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1287,8 +1311,12 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
int rc;
memset(uresp, 0, sizeof(*uresp));
- qedr_copy_sq_uresp(dev, uresp, qp);
- qedr_copy_rq_uresp(dev, uresp, qp);
+
+ if (qedr_qp_has_sq(qp))
+ qedr_copy_sq_uresp(dev, uresp, qp);
+
+ if (qedr_qp_has_rq(qp))
+ qedr_copy_rq_uresp(dev, uresp, qp);
uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp->qp_id = qp->qp_id;
@@ -1302,6 +1330,15 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
return rc;
}
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct qedr_qp *qp,
struct qedr_pd *pd,
@@ -1311,19 +1348,32 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
+ init_completion(&qp->qp_rel_comp);
}
+
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
- qp->sq.max_sges = attrs->cap.max_send_sge;
qp->state = QED_ROCE_QP_STATE_RESET;
+
+ qp->prev_wqe_size = 0;
+
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
- qp->sq_cq = get_qedr_cq(attrs->send_cq);
qp->dev = dev;
+ if (qedr_qp_has_sq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ }
- if (attrs->srq) {
+ if (attrs->srq)
qp->srq = get_qedr_srq(attrs->srq);
- } else {
+
+ if (qedr_qp_has_rq(qp)) {
+ qedr_reset_qp_hwq_info(&qp->rq);
qp->rq_cq = get_qedr_cq(attrs->recv_cq);
qp->rq.max_sges = attrs->cap.max_recv_sge;
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -1342,30 +1392,26 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
- int rc;
+ int rc = 0;
- qp->sq.db = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
- qp->sq.db_data.data.icid = qp->icid + 1;
- rc = qedr_db_recovery_add(dev, qp->sq.db,
- &qp->sq.db_data,
- DB_REC_WIDTH_32B,
- DB_REC_KERNEL);
- if (rc)
- return rc;
+ if (qedr_qp_has_sq(qp)) {
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ return rc;
+ }
- if (!qp->srq) {
+ if (qedr_qp_has_rq(qp)) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
-
- rc = qedr_db_recovery_add(dev, qp->rq.db,
- &qp->rq.db_data,
- DB_REC_WIDTH_32B,
- DB_REC_KERNEL);
- if (rc)
- qedr_db_recovery_del(dev, qp->sq.db,
- &qp->sq.db_data);
+ rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc && qedr_qp_has_sq(qp))
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
}
return rc;
@@ -1388,6 +1434,10 @@ static int qedr_check_srq_params(struct qedr_dev *dev,
DP_ERR(dev,
"create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
attrs->attr.max_sge, qattr->max_sge);
+ }
+
+ if (!udata && attrs->srq_type == IB_SRQT_XRC) {
+ DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
return -EINVAL;
}
@@ -1437,7 +1487,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
return PTR_ERR(srq->prod_umem);
}
- sg = srq->prod_umem->sg_head.sgl;
+ sg = srq->prod_umem->sgt_append.sgt.sgl;
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
return 0;
@@ -1448,6 +1498,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
struct ib_srq_init_attr *init_attr)
{
struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
+ };
dma_addr_t phy_prod_pair_addr;
u32 num_elems;
void *va;
@@ -1466,13 +1522,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
hw_srq->virt_prod_pair_addr = va;
num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- num_elems,
- QEDR_SRQ_WQE_ELEM_SIZE,
- &hw_srq->pbl, NULL);
+ params.num_elems = num_elems;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
if (rc)
goto err0;
@@ -1505,11 +1557,16 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
"create SRQ called from %s (pd %p)\n",
(udata) ? "User lib" : "kernel", pd);
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
rc = qedr_check_srq_params(dev, init_attr, udata);
if (rc)
return -EINVAL;
srq->dev = dev;
+ srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
hw_srq = &srq->hw_srq;
spin_lock_init(&srq->lock);
@@ -1551,6 +1608,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
in_params.prod_pair_addr = phy_prod_pair_addr;
in_params.num_pages = page_cnt;
in_params.page_size = page_size;
+ if (srq->is_xrc) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
+ struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
+
+ in_params.is_xrc = 1;
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.cq_cid = cq->icid;
+ }
rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
if (rc)
@@ -1585,7 +1650,7 @@ err0:
return -EFAULT;
}
-void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct qed_rdma_destroy_srq_in_params in_params = {};
struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
@@ -1593,6 +1658,7 @@ void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
xa_erase_irq(&dev->srqs, srq->srq_id);
in_params.srq_id = srq->srq_id;
+ in_params.is_xrc = srq->is_xrc;
dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
if (ibsrq->uobject)
@@ -1603,6 +1669,7 @@ void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
DP_DEBUG(dev, QEDR_MSG_SRQ,
"destroy srq: destroyed srq with srq_id=0x%0x\n",
srq->srq_id);
+ return 0;
}
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -1643,6 +1710,20 @@ int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return 0;
}
+static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
+{
+ switch (ib_qp_type) {
+ case IB_QPT_RC:
+ return QED_RDMA_QP_TYPE_RC;
+ case IB_QPT_XRC_INI:
+ return QED_RDMA_QP_TYPE_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return QED_RDMA_QP_TYPE_XRC_TGT;
+ default:
+ return QED_RDMA_QP_TYPE_INVAL;
+ }
+}
+
static inline void
qedr_init_common_qp_in_params(struct qedr_dev *dev,
struct qedr_pd *pd,
@@ -1657,20 +1738,27 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev,
params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
- params->pd = pd->pd_id;
- params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
- params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
params->stats_queue = 0;
- params->srq_id = 0;
- params->use_srq = false;
- if (!qp->srq) {
+ if (pd) {
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ }
+
+ if (qedr_qp_has_sq(qp))
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+
+ if (qedr_qp_has_rq(qp))
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
- } else {
+ if (qedr_qp_has_srq(qp)) {
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
params->srq_id = qp->srq->srq_id;
params->use_srq = true;
+ } else {
+ params->srq_id = 0;
+ params->use_srq = false;
}
}
@@ -1684,8 +1772,10 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
"rq_len=%zd"
"\n",
qp,
- qp->usq.buf_addr,
- qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
+ qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
+ qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
}
static inline void
@@ -1711,11 +1801,15 @@ static void qedr_cleanup_user(struct qedr_dev *dev,
struct qedr_ucontext *ctx,
struct qedr_qp *qp)
{
- ib_umem_release(qp->usq.umem);
- qp->usq.umem = NULL;
+ if (qedr_qp_has_sq(qp)) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ }
- ib_umem_release(qp->urq.umem);
- qp->urq.umem = NULL;
+ if (qedr_qp_has_rq(qp)) {
+ ib_umem_release(qp->urq.umem);
+ qp->urq.umem = NULL;
+ }
if (rdma_protocol_roce(&dev->ibdev, 1)) {
qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
@@ -1750,28 +1844,38 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
{
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
- struct qedr_pd *pd = get_qedr_pd(ibpd);
- struct qedr_create_qp_uresp uresp;
- struct qedr_ucontext *ctx = NULL;
- struct qedr_create_qp_ureq ureq;
+ struct qedr_create_qp_uresp uresp = {};
+ struct qedr_create_qp_ureq ureq = {};
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
- int rc = -EINVAL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_pd *pd = NULL;
+ int rc = 0;
qp->create_type = QEDR_QP_CREATE_USER;
- memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
- if (rc) {
- DP_ERR(dev, "Problem copying data from user space\n");
- return rc;
+
+ if (ibpd) {
+ pd = get_qedr_pd(ibpd);
+ ctx = pd->uctx;
}
- /* SQ - read access only (0) */
- rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, true, 0, alloc_and_init);
- if (rc)
- return rc;
+ if (udata) {
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return rc;
+ }
+ }
- if (!qp->srq) {
+ if (qedr_qp_has_sq(qp)) {
+ /* SQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
+ ureq.sq_len, true, 0, alloc_and_init);
+ if (rc)
+ return rc;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
@@ -1783,13 +1887,28 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
in_params.qp_handle_lo = ureq.qp_handle_lo;
in_params.qp_handle_hi = ureq.qp_handle_hi;
- in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
- in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
- if (!qp->srq) {
+
+ if (qp->qp_type == IB_QPT_XRC_TGT) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
+
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.qp_handle_lo = qp->qp_id;
+ in_params.use_srq = 1;
+ }
+
+ if (qedr_qp_has_sq(qp)) {
+ in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
+ in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
}
+ if (ctx)
+ SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
+
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
&in_params, &out_params);
@@ -1804,41 +1923,45 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
- if (rc)
- goto err;
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+ }
/* db offset was calculated in copy_qp_uresp, now set in the user q */
- ctx = pd->uctx;
- qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
- qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ if (qedr_qp_has_sq(qp)) {
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
/* calculate the db_rec_db2 data since it is constant so no
- * need to reflect from user
+ * need to reflect from user
*/
qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
qp->urq.db_rec_db2_data.data.value =
cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
- }
-
- rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
- &qp->usq.db_rec_data->db_data,
- DB_REC_WIDTH_32B,
- DB_REC_USER);
- if (rc)
- goto err;
- rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
- &qp->urq.db_rec_data->db_data,
- DB_REC_WIDTH_32B,
- DB_REC_USER);
- if (rc)
- goto err;
-
- if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
&qp->urq.db_rec_db2_data,
DB_REC_WIDTH_32B,
@@ -1847,7 +1970,6 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
goto err;
}
qedr_qp_user_print(dev, qp);
-
return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -1903,29 +2025,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
return rc;
in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
return rc;
@@ -1951,14 +2072,19 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
- struct qed_chain_ext_pbl ext_pbl;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;
in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
QED_CHAIN_MODE_PBL);
in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
QED_CHAIN_MODE_PBL);
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
@@ -1968,31 +2094,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
return -EINVAL;
/* Now we allocate the chain */
- ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.sq_pbl_virt;
+ params.ext_pbl_phys = out_params.sq_pbl_phys;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
goto err;
- ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
-
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.rq_pbl_virt;
+ params.ext_pbl_phys = out_params.rq_pbl_phys;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
goto err;
@@ -2106,22 +2225,52 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
return rc;
}
-struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_udata *udata)
{
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ }
+
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
+ else
+ qedr_cleanup_kernel(dev, qp);
+
+ return 0;
+}
+
+int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_xrcd *xrcd = NULL;
+ struct ib_pd *ibpd = ibqp->pd;
struct qedr_pd *pd = get_qedr_pd(ibpd);
- struct qedr_qp *qp;
- struct ib_qp *ibqp;
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
int rc = 0;
+ if (attrs->create_flags)
+ return -EOPNOTSUPP;
+
+ if (attrs->qp_type == IB_QPT_XRC_TGT)
+ xrcd = get_qedr_xrcd(attrs->xrcd);
+ else
+ pd = get_qedr_pd(ibpd);
+
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
udata ? "user library" : "kernel", pd);
rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
if (rc)
- return ERR_PTR(rc);
+ return rc;
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
@@ -2131,43 +2280,32 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
get_qedr_cq(attrs->recv_cq),
attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- DP_ERR(dev, "create qp: failed allocating memory\n");
- return ERR_PTR(-ENOMEM);
- }
-
qedr_set_common_qp_params(dev, qp, pd, attrs);
- if (attrs->qp_type == IB_QPT_GSI) {
- ibqp = qedr_create_gsi_qp(dev, attrs, qp);
- if (IS_ERR(ibqp))
- kfree(qp);
- return ibqp;
- }
+ if (attrs->qp_type == IB_QPT_GSI)
+ return qedr_create_gsi_qp(dev, attrs, qp);
- if (udata)
+ if (udata || xrcd)
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
else
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
if (rc)
- goto err;
+ return rc;
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
- goto err;
+ goto out_free_qp_resources;
}
- return &qp->ibqp;
-
-err:
- kfree(qp);
+ return 0;
- return ERR_PTR(-EFAULT);
+out_free_qp_resources:
+ qedr_free_qp_resources(dev, qp, udata);
+ return -EFAULT;
}
static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
@@ -2212,15 +2350,6 @@ static enum qed_roce_qp_state qedr_get_state_from_ibqp(
}
}
-static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
-{
- qed_chain_reset(&qph->pbl);
- qph->prod = 0;
- qph->cons = 0;
- qph->wqe_cons = 0;
- qph->db_data.data.value = cpu_to_le16(0);
-}
-
static int qedr_update_qp_state(struct qedr_dev *dev,
struct qedr_qp *qp,
enum qed_roce_qp_state cur_state,
@@ -2235,9 +2364,6 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
case QED_ROCE_QP_STATE_RESET:
switch (new_state) {
case QED_ROCE_QP_STATE_INIT:
- qp->prev_wqe_size = 0;
- qedr_reset_qp_hwq_info(&qp->sq);
- qedr_reset_qp_hwq_info(&qp->rq);
break;
default:
status = -EINVAL;
@@ -2342,6 +2468,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
"modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
attr->qp_state);
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
old_qp_state = qedr_get_ibqp_state(qp->state);
if (attr_mask & IB_QP_STATE)
new_qp_state = attr->qp_state;
@@ -2484,6 +2613,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
+
+ qp->timeout = attr->timeout;
}
if (attr_mask & IB_QP_RETRY_CNT) {
@@ -2608,15 +2739,18 @@ int qedr_query_qp(struct ib_qp *ibqp,
int rc = 0;
memset(&params, 0, sizeof(params));
-
- rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
- if (rc)
- goto err;
-
memset(qp_attr, 0, sizeof(*qp_attr));
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
- qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ } else {
+ qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
+ }
+
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
@@ -2630,7 +2764,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+ qp_attr->cap.max_inline_data = dev->attr.max_inline;
qp_init_attr->cap = qp_attr->cap;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
@@ -2640,7 +2774,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
- qp_attr->timeout = params.timeout;
+ qp_attr->timeout = qp->timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
@@ -2665,28 +2799,6 @@ err:
return rc;
}
-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
- struct ib_udata *udata)
-{
- struct qedr_ucontext *ctx =
- rdma_udata_to_drv_context(udata, struct qedr_ucontext,
- ibucontext);
- int rc;
-
- if (qp->qp_type != IB_QPT_GSI) {
- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
- if (rc)
- return rc;
- }
-
- if (qp->create_type == QEDR_QP_CREATE_USER)
- qedr_cleanup_user(dev, ctx, qp);
- else
- qedr_cleanup_kernel(dev, qp);
-
- return 0;
-}
-
int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -2744,27 +2856,30 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
- if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
+ wait_for_completion(&qp->qp_rel_comp);
+ }
return 0;
}
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
- rdma_copy_ah_attr(&ah->attr, attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
return 0;
}
-void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
rdma_destroy_ah_attr(&ah->attr);
+ return 0;
}
static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
@@ -2855,7 +2970,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err0;
}
- rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ rc = init_mr_info(dev, &mr->info,
+ ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
if (rc)
goto err1;
@@ -2864,7 +2980,11 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
@@ -2882,10 +3002,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
mr->hw_mr.page_size_log = PAGE_SHIFT;
- mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr;
- mr->hw_mr.zbva = false;
mr->hw_mr.phy_mr = false;
mr->hw_mr.dma_mr = false;
@@ -2961,8 +3079,12 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
- goto err0;
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
+ goto err1;
}
/* Index only, 18 bit long, lkey = itid << 8 | key */
@@ -2978,17 +3100,15 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
mr->hw_mr.pbl_ptr = 0;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
- mr->hw_mr.fbo = 0;
mr->hw_mr.length = 0;
mr->hw_mr.vaddr = 0;
- mr->hw_mr.zbva = false;
mr->hw_mr.phy_mr = true;
mr->hw_mr.dma_mr = false;
rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
if (rc) {
DP_ERR(dev, "roce register tid returned an error %d\n", rc);
- goto err1;
+ goto err2;
}
mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
@@ -2997,15 +3117,17 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
return mr;
-err1:
+err2:
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
err0:
kfree(mr);
return ERR_PTR(rc);
}
struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
+ u32 max_num_sg)
{
struct qedr_mr *mr;
@@ -3093,7 +3215,11 @@ struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
@@ -3522,7 +3648,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case IB_WR_RDMA_READ_WITH_INV:
SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
- /* fallthrough -- same is identical to RDMA READ */
+ fallthrough; /* same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
@@ -3688,7 +3814,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
* count and consumer count and subtract it from max
* work request supported so that we get elements left.
*/
- used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
return hw_srq->max_wr - used;
}
@@ -3703,7 +3829,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
unsigned long flags;
int status = 0;
u32 num_sge;
- u32 offset;
spin_lock_irqsave(&srq->lock, flags);
@@ -3716,7 +3841,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (!qedr_srq_elem_left(hw_srq) ||
wr->num_sge > srq->hw_srq.max_sges) {
DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
- hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+ hw_srq->wr_prod_cnt,
+ atomic_read(&hw_srq->wr_cons_cnt),
wr->num_sge, srq->hw_srq.max_sges);
status = -ENOMEM;
*bad_wr = wr;
@@ -3750,22 +3876,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
hw_srq->sge_prod++;
}
- /* Flush WQE and SGE information before
+ /* Update WQE and SGE information before
* updating producer.
*/
- wmb();
+ dma_wmb();
/* SRQ producer is 8 bytes. Need to update SGE producer index
* in first 4 bytes and need to update WQE producer in
* next 4 bytes.
*/
- *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
- offset = offsetof(struct rdma_srq_producers, wqe_prod);
- *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
- hw_srq->wqe_prod;
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
+ /* Make sure sge producer is updated first */
+ dma_wmb();
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
- /* Flush producer after updating it. */
- wmb();
wr = wr->next;
}
@@ -3789,12 +3913,6 @@ int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if (qp->state == QED_ROCE_QP_STATE_RESET) {
- spin_unlock_irqrestore(&qp->q_lock, flags);
- *bad_wr = wr;
- return -EINVAL;
- }
-
while (wr) {
int i;
@@ -4184,7 +4302,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
} else {
__process_resp_one(dev, qp, cq, wc, resp, wr_id);
}
- srq->hw_srq.wr_cons_cnt++;
+ atomic_inc(&srq->hw_srq.wr_cons_cnt);
return 1;
}
@@ -4358,7 +4476,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 18027844eb87..081753df79ef 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -34,12 +34,13 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
-int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid);
-int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
@@ -47,15 +48,15 @@ void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-
+int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
- struct ib_udata *);
+int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
@@ -67,12 +68,12 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
@@ -84,18 +85,18 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata);
+ u32 max_num_sg);
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
const struct ib_send_wr **bad_wr);
int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in_mad,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+int qedr_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
#endif