aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_qp.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-09-14 11:42:03 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-09-24 10:14:59 -0300
commitae6e843fe08d0ea8e158815809dcc20e3a1afc22 (patch)
tree723ed25b8a0edfde968f72ace5f837daf880e61d /drivers/infiniband/sw/rxe/rxe_qp.c
parentRDMA/bnxt_re: Check if the vlan is valid before reporting (diff)
downloadlinux-dev-ae6e843fe08d0ea8e158815809dcc20e3a1afc22.tar.xz
linux-dev-ae6e843fe08d0ea8e158815809dcc20e3a1afc22.zip
RDMA/rxe: Add memory barriers to kernel queues
Earlier patches added memory barriers to protect user space to kernel space communications. The user space queues were previously shown to have occasional memory synchonization errors which were removed by adding smp_load_acquire, smp_store_release barriers. This patch extends that to the case where queues are used between kernel space threads. This patch also extends the queue types to include kernel ULP queues which access the other end of the queues in kernel verbs calls like poll_cq and post_send/recv. Link: https://lore.kernel.org/r/20210914164206.19768-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_qp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index fa97ce9eaea3..c8f4790083d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -229,7 +229,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
wqe_size += sizeof(struct rxe_send_wqe);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
wqe_size, type);
if (!qp->sq.queue)
@@ -246,12 +246,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- if (qp->is_user)
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
@@ -291,7 +287,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
wqe_size, type);
if (!qp->rq.queue)