aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-05-27 14:47:46 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-06-03 15:53:01 -0300
commit59daff49f25fbb3197c03c879e23a31ddd23d98f (patch)
tree280429a279bac368f0982e47b01c33da32f9bb7c /drivers/infiniband/sw
parentMerge branch 'irdma' into rdma.git for-next (diff)
downloadlinux-dev-59daff49f25fbb3197c03c879e23a31ddd23d98f.tar.xz
linux-dev-59daff49f25fbb3197c03c879e23a31ddd23d98f.zip
RDMA/rxe: Add a type flag to rxe_queue structs
To create optimal code only want to use smp_load_acquire() and smp_store_release() for user indices in rxe_queue APIs since kernel indices are protected by locks which also act as memory barriers. By adding a type to the queues we can determine which indices need to be protected. Link: https://lore.kernel.org/r/20210527194748.662636-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c4
5 files changed, 28 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index b315ebf041ac..1d4d8a31bc12 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -59,9 +59,11 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
struct rxe_create_cq_resp __user *uresp)
{
int err;
+ enum queue_type type;
+ type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
cq->queue = rxe_queue_init(rxe, &cqe,
- sizeof(struct rxe_cqe));
+ sizeof(struct rxe_cqe), type);
if (!cq->queue) {
pr_warn("unable to create cq\n");
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 34ae957a315c..9bd6bf8f9bd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -206,6 +206,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
{
int err;
int wqe_size;
+ enum queue_type type;
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
@@ -231,7 +232,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
wqe_size += sizeof(struct rxe_send_wqe);
- qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
+ wqe_size, type);
if (!qp->sq.queue)
return -ENOMEM;
@@ -273,6 +276,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
{
int err;
int wqe_size;
+ enum queue_type type;
if (!qp->srq) {
qp->rq.max_wr = init->cap.max_recv_wr;
@@ -283,9 +287,9 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
- qp->rq.queue = rxe_queue_init(rxe,
- &qp->rq.max_wr,
- wqe_size);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
+ wqe_size, type);
if (!qp->rq.queue)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index fa69241b1187..8f844d0b9e77 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -52,9 +52,8 @@ inline void rxe_queue_reset(struct rxe_queue *q)
memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
}
-struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size)
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type)
{
struct rxe_queue *q;
size_t buf_size;
@@ -69,6 +68,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
goto err1;
q->rxe = rxe;
+ q->type = type;
/* used in resize, only need to copy used part of queue */
q->elem_size = elem_size;
@@ -136,7 +136,7 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
int err;
unsigned long flags = 0, flags1;
- new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
+ new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 2902ca7b288c..4512745419f8 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -19,6 +19,13 @@
* of the queue is one less than the number of element slots
*/
+/* type of queue */
+enum queue_type {
+ QUEUE_TYPE_KERNEL,
+ QUEUE_TYPE_TO_USER,
+ QUEUE_TYPE_FROM_USER,
+};
+
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
@@ -27,6 +34,7 @@ struct rxe_queue {
size_t elem_size;
unsigned int log2_elem_size;
u32 index_mask;
+ enum queue_type type;
};
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
@@ -35,9 +43,8 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
void rxe_queue_reset(struct rxe_queue *q);
-struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size);
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type);
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 41b0d1e11baf..52c5593741ec 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -78,6 +78,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
int err;
int srq_wqe_size;
struct rxe_queue *q;
+ enum queue_type type;
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
@@ -91,8 +92,9 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
q = rxe_queue_init(rxe, &srq->rq.max_wr,
- srq_wqe_size);
+ srq_wqe_size, type);
if (!q) {
pr_warn("unable to allocate queue for srq\n");
return -ENOMEM;