aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/vmw_pvrdma
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-20 07:07:56 +0300
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2019-07-20 07:07:56 +0300
commitc39f2d9db0fd81ea20bb5cce9b3f082ca63753e2 (patch)
tree8e80ed5601b4fb8880a2ca8e08802bc8b1f850bd /drivers/infiniband/hw/vmw_pvrdma
parentMerge branch 'next' into for-linus (diff)
parentInput: alps - fix a mismatch between a condition check and its comment (diff)
downloadlinux-dev-c39f2d9db0fd81ea20bb5cce9b3f082ca63753e2.tar.xz
linux-dev-c39f2d9db0fd81ea20bb5cce9b3f082ca63753e2.zip
Merge branch 'next' into for-linus
Prepare second round of input updates for 5.3 merge window.
Diffstat (limited to 'drivers/infiniband/hw/vmw_pvrdma')
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Kconfig1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c28
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c43
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c56
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h27
9 files changed, 80 insertions, 111 deletions
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
index 5a9790ac0ede..b99c9f0fc06a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/Kconfig
+++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_VMWARE_PVRDMA
tristate "VMware Paravirtualized RDMA Driver"
depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
index 2f52e0a044a0..0f5fa4e8cfd0 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/Makefile
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 104c7db4704f..d7deb19a2800 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include "pvrdma.h"
@@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
* pvrdma_create_cq - create completion queue
* @ibdev: the device
* @attr: completion queue attributes
- * @context: user context
* @udata: user data
*
* @return: ib_cq completion queue pointer on success,
@@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
*/
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata)
{
int entries = attr->cqe;
@@ -116,6 +115,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -133,7 +134,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
- cq->is_kernel = !context;
+ cq->is_kernel = !udata;
if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -185,8 +186,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
cmd->nchunks = npages;
- cmd->ctx_handle = (context) ?
- (u64)to_vucontext(context)->ctx_handle : 0;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
cmd->cqe = entries;
cmd->pdir_dma = cq->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
@@ -204,13 +204,13 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (!cq->is_kernel) {
- cq->uar = &(to_vucontext(context)->uar);
+ cq->uar = &context->uar;
/* Copy udata back. */
if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
- pvrdma_destroy_cq(&cq->ibcq);
+ pvrdma_destroy_cq(&cq->ibcq, udata);
return ERR_PTR(-EINVAL);
}
}
@@ -245,10 +245,11 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
/**
* pvrdma_destroy_cq - destroy completion queue
* @cq: the completion queue to destroy.
+ * @udata: user data or null for kernel object
*
* @return: 0 for success.
*/
-int pvrdma_destroy_cq(struct ib_cq *cq)
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct pvrdma_cq *vcq = to_vcq(cq);
union pvrdma_cmd_req req;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index ec41400fec0c..40182297f87f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -143,24 +143,6 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
- u8 port_num)
-{
- struct net_device *netdev;
- struct pvrdma_dev *dev = to_vdev(ibdev);
-
- if (port_num != 1)
- return NULL;
-
- rcu_read_lock();
- netdev = dev->netdev;
- if (netdev)
- dev_hold(netdev);
- rcu_read_unlock();
-
- return netdev;
-}
-
static const struct ib_device_ops pvrdma_dev_ops = {
.add_gid = pvrdma_add_gid,
.alloc_mr = pvrdma_alloc_mr,
@@ -179,7 +161,6 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.get_dev_fw_str = pvrdma_get_fw_ver_str,
.get_dma_mr = pvrdma_get_dma_mr,
.get_link_layer = pvrdma_port_link_layer,
- .get_netdev = pvrdma_get_netdev,
.get_port_immutable = pvrdma_port_immutable,
.map_mr_sg = pvrdma_map_mr_sg,
.mmap = pvrdma_mmap,
@@ -195,6 +176,8 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
};
@@ -204,6 +187,8 @@ static const struct ib_device_ops pvrdma_dev_srq_ops = {
.destroy_srq = pvrdma_destroy_srq,
.modify_srq = pvrdma_modify_srq,
.query_srq = pvrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
};
static int pvrdma_register_device(struct pvrdma_dev *dev)
@@ -277,6 +262,9 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
goto err_qp_free;
}
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
+ ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
+ if (ret)
+ return ret;
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
@@ -720,6 +708,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_UNREGISTER:
+ ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);
dev->netdev = NULL;
break;
@@ -731,6 +720,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
if ((dev->netdev == NULL) &&
(pci_get_drvdata(pdev_net) == ndev)) {
/* this is our netdev */
+ ib_device_set_netdev(&dev->ib_dev, ndev, 1);
dev->netdev = ndev;
dev_hold(ndev);
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index a85884e90e84..65dc47ffb8f3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,7 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int ret;
+ int ret, npages;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -133,9 +133,10 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ npages = ib_umem_num_pages(umem);
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- umem->npages);
+ npages);
ret = -EINVAL;
goto err_umem;
}
@@ -150,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -167,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = umem->npages;
+ cmd->nchunks = npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
@@ -201,7 +202,7 @@ err_umem:
* @return: ib_mr pointer on success, otherwise returns an errno.
*/
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg)
+ u32 max_num_sg, struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
struct pvrdma_user_mr *mr;
@@ -272,7 +273,7 @@ freemr:
*
* @return: 0 on success.
*/
-int pvrdma_dereg_mr(struct ib_mr *ibmr)
+int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct pvrdma_user_mr *mr = to_vmr(ibmr);
struct pvrdma_dev *dev = to_vdev(ibmr->device);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 08f4257169bd..0eaaead5baec 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -446,10 +446,11 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
/**
* pvrdma_destroy_qp - destroy a queue pair
* @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
*
* @return: 0 on success.
*/
-int pvrdma_destroy_qp(struct ib_qp *qp)
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 951d9d68107a..6cac0c88cf39 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -94,19 +94,18 @@ int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
* @init_attr: shared receive queue attributes
* @udata: user data
*
- * @return: the ib_srq pointer on success, otherwise returns an errno.
+ * @return: 0 on success, otherwise returns an errno.
*/
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct pvrdma_srq *srq = NULL;
- struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
union pvrdma_cmd_req req;
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
- struct pvrdma_create_srq_resp srq_resp = {0};
+ struct pvrdma_create_srq_resp srq_resp = {};
struct pvrdma_create_srq ucmd;
unsigned long flags;
int ret;
@@ -115,31 +114,25 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
/* No support for kernel clients. */
dev_warn(&dev->pdev->dev,
"no shared receive queue support for kernel client\n");
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->srq_type != IB_SRQT_BASIC) {
dev_warn(&dev->pdev->dev,
"shared receive queue type %d not supported\n",
init_attr->srq_type);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
dev_warn(&dev->pdev->dev,
"shared receive queue size invalid\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
- return ERR_PTR(-ENOMEM);
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = -ENOMEM;
- goto err_srq;
- }
+ return -ENOMEM;
spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1);
@@ -181,7 +174,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
cmd->srq_type = init_attr->srq_type;
cmd->nchunks = srq->npages;
- cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
cmd->attrs.max_wr = init_attr->attr.max_wr;
cmd->attrs.max_sge = init_attr->attr.max_sge;
cmd->attrs.srq_limit = init_attr->attr.srq_limit;
@@ -204,21 +197,20 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
/* Copy udata back. */
if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
- pvrdma_destroy_srq(&srq->ibsrq);
- return ERR_PTR(-EINVAL);
+ pvrdma_destroy_srq(&srq->ibsrq, udata);
+ return -EINVAL;
}
- return &srq->ibsrq;
+ return 0;
err_page_dir:
pvrdma_page_dir_cleanup(dev, &srq->pdir);
err_umem:
ib_umem_release(srq->umem);
err_srq:
- kfree(srq);
atomic_dec(&dev->num_srqs);
- return ERR_PTR(ret);
+ return ret;
}
static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
@@ -246,10 +238,11 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
/**
* pvrdma_destroy_srq - destroy shared receive queue
* @srq: the shared receive queue to destroy
+ * @udata: user data or null for kernel object
*
* @return: 0 for success.
*/
-int pvrdma_destroy_srq(struct ib_srq *srq)
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct pvrdma_srq *vsrq = to_vsrq(srq);
union pvrdma_cmd_req req;
@@ -268,8 +261,6 @@ int pvrdma_destroy_srq(struct ib_srq *srq)
ret);
pvrdma_free_srq(dev, vsrq);
-
- return 0;
}
/**
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 42fe821f8d58..faf7ecd7b3fa 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/vmw_pvrdma-abi.h>
+#include <rdma/uverbs_ioctl.h>
#include "pvrdma.h"
@@ -70,8 +71,6 @@ int pvrdma_query_device(struct ib_device *ibdev,
if (uhw->inlen || uhw->outlen)
return -EINVAL;
- memset(props, 0, sizeof(*props));
-
props->fw_ver = dev->dsr->caps.fw_ver;
props->sys_image_guid = dev->dsr->caps.sys_image_guid;
props->max_mr_size = dev->dsr->caps.max_mr_size;
@@ -421,13 +420,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
/**
* pvrdma_alloc_pd - allocate protection domain
* @ibpd: PD pointer
- * @context: user context
* @udata: user data
*
* @return: the ib_pd protection domain pointer on success, otherwise errno.
*/
-int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
- struct ib_udata *udata)
+int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ibdev = ibpd->device;
struct pvrdma_pd *pd = to_vpd(ibpd);
@@ -438,13 +435,15 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
+ struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct pvrdma_ucontext, ibucontext);
/* Check allowed max pds */
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
return -ENOMEM;
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
- cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
+ cmd->ctx_handle = context ? context->ctx_handle : 0;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
if (ret < 0) {
dev_warn(&dev->pdev->dev,
@@ -453,16 +452,16 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
goto err;
}
- pd->privileged = !context;
+ pd->privileged = !udata;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
- if (context) {
+ if (udata) {
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
- pvrdma_dealloc_pd(&pd->ibpd);
+ pvrdma_dealloc_pd(&pd->ibpd, udata);
return -EFAULT;
}
}
@@ -478,10 +477,11 @@ err:
/**
* pvrdma_dealloc_pd - deallocate protection domain
* @pd: the protection domain to be released
+ * @udata: user data or null for kernel object
*
* @return: 0 on success, otherwise errno.
*/
-void pvrdma_dealloc_pd(struct ib_pd *pd)
+void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
union pvrdma_cmd_req req = {};
@@ -507,34 +507,28 @@ void pvrdma_dealloc_pd(struct ib_pd *pd)
* @udata: user data blob
* @flags: create address handle flags (see enum rdma_create_ah_flags)
*
- * @return: the ib_ah pointer on success, otherwise errno.
+ * @return: 0 on success, otherwise errno.
*/
-struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata)
{
- struct pvrdma_dev *dev = to_vdev(pd->device);
- struct pvrdma_ah *ah;
+ struct pvrdma_dev *dev = to_vdev(ibah->device);
+ struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
u8 port_num = rdma_ah_get_port_num(ah_attr);
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
grh = rdma_ah_read_grh(ah_attr);
if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
- return ERR_PTR(-ENOMEM);
-
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- atomic_dec(&dev->num_ahs);
- return ERR_PTR(-ENOMEM);
- }
+ return -ENOMEM;
- ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24);
+ ah->av.port_pd = to_vpd(ibah->pd)->pd_handle | (port_num << 24);
ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
ah->av.src_path_bits |= 0x80;
ah->av.gid_index = grh->sgid_index;
@@ -544,11 +538,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
memcpy(ah->av.dgid, grh->dgid.raw, 16);
memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);
- ah->ibah.device = pd->device;
- ah->ibah.pd = pd;
- ah->ibah.uobject = NULL;
-
- return &ah->ibah;
+ return 0;
}
/**
@@ -556,14 +546,10 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
* @ah: the address handle to destroyed
* @flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
*
- * @return: 0 on success.
*/
-int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
+void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
struct pvrdma_dev *dev = to_vdev(ah->device);
- kfree(to_vah(ah));
atomic_dec(&dev->num_ahs);
-
- return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 607aa131d67c..9d7b021e1c59 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -398,36 +398,33 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
-int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
-void pvrdma_dealloc_pd(struct ib_pd *ibpd);
+int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
-int pvrdma_dereg_mr(struct ib_mr *mr);
+int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
+ u32 max_num_sg, struct ib_udata *udata);
int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
-int pvrdma_destroy_cq(struct ib_cq *cq);
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
-struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
-int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
+void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
+int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int pvrdma_destroy_srq(struct ib_srq *srq);
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
@@ -436,7 +433,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-int pvrdma_destroy_qp(struct ib_qp *qp);
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,