aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_comp.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-05-27 14:47:48 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-06-03 15:53:01 -0300
commit5bcf5a59c41e19141783c7305d420a5e36c937b2 (patch)
tree899a03f565ca4445b39db55d7a04132cacab9ad3 /drivers/infiniband/sw/rxe/rxe_comp.c
parentRDMA/rxe: Protect user space index loads/stores (diff)
downloadlinux-dev-5bcf5a59c41e19141783c7305d420a5e36c937b2.tar.xz
linux-dev-5bcf5a59c41e19141783c7305d420a5e36c937b2.zip
RDMA/rxe: Protext kernel index from user space
In order to prevent user space from modifying the index that belongs to the kernel for shared queues let the kernel use a local copy of the index and copy any new values of that index to the shared rxe_queue_bus struct. This adds more switch statements which decreases the performance of the queue API. Move the type into the parameter list for these functions so that the compiler can optimize out the switch statements when the explicit type is known. Modify all the calls in the driver on performance paths to pass in the explicit queue type. Link: https://lore.kernel.org/r/20210527194748.662636-4-rpearsonhpe@gmail.com Link: https://lore.kernel.org/linux-rdma/20210526165239.GP1002214@@nvidia.com/ Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_comp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 2af26737d32d..32e587c47637 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -141,7 +141,10 @@ static inline enum comp_state get_wqe(struct rxe_qp *qp,
/* we come here whether or not we found a response packet to see if
* there are any posted WQEs
*/
- wqe = queue_head(qp->sq.queue);
+ if (qp->is_user)
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER);
+ else
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL);
*wqe_p = wqe;
/* no WQE or requester has not started it yet */
@@ -414,16 +417,23 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_cqe cqe;
+ bool post;
+
+ /* do we need to post a completion */
+ post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ wqe->status != IB_WC_SUCCESS);
- if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- wqe->status != IB_WC_SUCCESS) {
+ if (post)
make_send_cqe(qp, wqe, &cqe);
- advance_consumer(qp->sq.queue);
+
+ if (qp->is_user)
+ advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER);
+ else
+ advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL);
+
+ if (post)
rxe_cq_post(qp->scq, &cqe, 0);
- } else {
- advance_consumer(qp->sq.queue);
- }
if (wqe->wr.opcode == IB_WR_SEND ||
wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
@@ -511,6 +521,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
@@ -518,12 +529,12 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
ib_device_put(qp->ibqp.device);
}
- while ((wqe = queue_head(qp->sq.queue))) {
+ while ((wqe = queue_head(q, q->type))) {
if (notify) {
wqe->status = IB_WC_WR_FLUSH_ERR;
do_complete(qp, wqe);
} else {
- advance_consumer(qp->sq.queue);
+ advance_consumer(q, q->type);
}
}
}