aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
diff options
context:
space:
mode:
authorNaresh Gottumukkala <bgottumukkala@emulex.com>2013-08-26 15:27:44 +0530
committerRoland Dreier <roland@purestorage.com>2013-09-02 21:18:32 -0700
commitcffce99051b80c90630a9fff662a1b25e278069d (patch)
tree58257cc50f34458649c412d5a723063d53b417a3 /drivers/infiniband/hw/ocrdma/ocrdma_hw.c
parentRDMA/ocrdma: FRMA code cleanup (diff)
downloadlinux-dev-cffce99051b80c90630a9fff662a1b25e278069d.tar.xz
linux-dev-cffce99051b80c90630a9fff662a1b25e278069d.zip
RDMA/ocrdma: Dont use PD 0 for userpace CQ DB
Create_CQ verb doesn't provide a PD pointer. So, until now we are creating all (both userspace and kernel) CQ DB regions from PD0. This will result in mmapping PD0 to applications. A rogue userspace application can mess things up. Also more serious issues is even the be2net NIC uses PD0. This patch addresses this problem by: 1) Create a PD page for every userspace application when the alloc_ucontext is called. This will be destroyed in dealloc_ucontext. 2) All CQs for that context will use the PD allocated in ucontext. 3) The first create_PD call from application will result in returning the PD address from its ucontext (no new PD will be created). 4) For subsecquent create_pd calls from application, we create new PDs for the application. Signed-off-by: Naresh Gottumukkala <bgottumukkala@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_hw.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 16ce664dc466..618c2124e619 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1309,7 +1309,7 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
}
int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- int entries, int dpp_cq)
+ int entries, int dpp_cq, u16 pd_id)
{
int status = -ENOMEM; int max_hw_cqe;
struct pci_dev *pdev = dev->nic_info.pdev;
@@ -1357,7 +1357,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
cq->eqn = ocrdma_bind_eq(dev);
- cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
@@ -1393,6 +1393,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->phase_change = true;
}
+ cmd->cmd.pd_id = pd_id; /* valid only for v3 */
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)