aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_req.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-09-14 11:42:03 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-09-24 10:14:59 -0300
commitae6e843fe08d0ea8e158815809dcc20e3a1afc22 (patch)
tree723ed25b8a0edfde968f72ace5f837daf880e61d /drivers/infiniband/sw/rxe/rxe_req.c
parentRDMA/bnxt_re: Check if the vlan is valid before reporting (diff)
downloadlinux-dev-ae6e843fe08d0ea8e158815809dcc20e3a1afc22.tar.xz
linux-dev-ae6e843fe08d0ea8e158815809dcc20e3a1afc22.zip
RDMA/rxe: Add memory barriers to kernel queues
Earlier patches added memory barriers to protect user space to kernel space communications. The user space queues were previously shown to have occasional memory synchonization errors which were removed by adding smp_load_acquire, smp_store_release barriers. This patch extends that to the case where queues are used between kernel space threads. This patch also extends the queue types to include kernel ULP queues which access the other end of the queues in kernel verbs calls like poll_cq and post_send/recv. Link: https://lore.kernel.org/r/20210914164206.19768-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c37
1 files changed, 14 insertions, 23 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 3894197a82f6..801e36cefc29 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -49,21 +49,16 @@ static void req_retry(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
qp->req.wqe_index = cons;
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
for (wqe_index = cons; wqe_index != prod;
- wqe_index = next_index(q, wqe_index)) {
- wqe = addr_from_index(qp->sq.queue, wqe_index);
+ wqe_index = queue_next_index(q, wqe_index)) {
+ wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
mask = wr_opcode_mask(wqe->wr.opcode, qp);
if (wqe->state == wqe_state_posted)
@@ -121,15 +116,9 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- wqe = queue_head(q, QUEUE_TYPE_KERNEL);
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* check to see if we are drained;
@@ -170,7 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (index == prod)
return NULL;
- wqe = addr_from_index(q, index);
+ wqe = queue_addr_from_index(q, index);
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
qp->req.state == QP_STATE_DRAINED) &&
@@ -560,7 +549,8 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
qp->req.opcode = pkt->opcode;
if (pkt->mask & RXE_END_MASK)
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
+ qp->req.wqe_index);
qp->need_req_skb = 0;
@@ -614,7 +604,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -645,7 +635,8 @@ next_wqe:
goto exit;
if (unlikely(qp->req.state == QP_STATE_RESET)) {
- qp->req.wqe_index = consumer_index(q, q->type);
+ qp->req.wqe_index = queue_get_consumer(q,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1;
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
@@ -711,7 +702,7 @@ next_wqe:
wqe->last_psn = qp->req.psn;
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
- qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;