aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c546
1 files changed, 225 insertions, 321 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9dd4bd7aea92..88825edc7dce 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1,41 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <net/addrconf.h>
#include <rdma/uverbs_ioctl.h>
+
#include "rxe.h"
-#include "rxe_loc.h"
#include "rxe_queue.h"
#include "rxe_hw_counters.h"
@@ -53,16 +26,13 @@ static int rxe_query_device(struct ib_device *dev,
}
static int rxe_query_port(struct ib_device *dev,
- u8 port_num, struct ib_port_attr *attr)
+ u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_port *port;
int rc;
- port = &rxe->port;
-
/* *attr being zeroed by the caller, avoid zeroing it here */
- *attr = port->attr;
+ *attr = rxe->port.attr;
mutex_lock(&rxe->usdev_lock);
rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
@@ -81,24 +51,13 @@ static int rxe_query_port(struct ib_device *dev,
}
static int rxe_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
- struct rxe_dev *rxe = to_rdev(device);
- struct rxe_port *port;
-
- port = &rxe->port;
-
- if (unlikely(index >= port->attr.pkey_tbl_len)) {
- dev_warn(device->dev.parent, "invalid index = %d\n",
- index);
- goto err1;
- }
+ if (index > 0)
+ return -EINVAL;
- *pkey = port->pkey_tbl[index];
+ *pkey = IB_DEFAULT_PKEY_FULL;
return 0;
-
-err1:
- return -EINVAL;
}
static int rxe_modify_device(struct ib_device *dev,
@@ -122,7 +81,7 @@ static int rxe_modify_device(struct ib_device *dev,
}
static int rxe_modify_port(struct ib_device *dev,
- u8 port_num, int mask, struct ib_port_modify *attr)
+ u32 port_num, int mask, struct ib_port_modify *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
@@ -139,29 +98,27 @@ static int rxe_modify_port(struct ib_device *dev,
}
static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
- struct rxe_dev *rxe = to_rdev(dev);
-
- return rxe_link_layer(rxe, port_num);
+ return IB_LINK_LAYER_ETHERNET;
}
-static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(uctx->device);
- struct rxe_ucontext *uc = to_ruc(uctx);
+ struct rxe_dev *rxe = to_rdev(ibuc->device);
+ struct rxe_ucontext *uc = to_ruc(ibuc);
- return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
+ return rxe_add_to_pool(&rxe->uc_pool, uc);
}
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
- rxe_drop_ref(uc);
+ rxe_cleanup(uc);
}
-static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
+static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
struct ib_port_immutable *immutable)
{
int err;
@@ -185,33 +142,64 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
+ return rxe_add_to_pool(&rxe->pd_pool, pd);
}
-static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
- rxe_drop_ref(pd);
+ rxe_cleanup(pd);
+ return 0;
}
-static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+static int rxe_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
- int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
+ struct rxe_create_ah_resp __user *uresp = NULL;
+ int err;
- err = rxe_av_chk_attr(rxe, attr);
+ if (udata) {
+ /* test if new user provider */
+ if (udata->outlen >= sizeof(*uresp))
+ uresp = udata->outbuf;
+ ah->is_user = true;
+ } else {
+ ah->is_user = false;
+ }
+
+ err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
+ err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err)
return err;
- rxe_init_av(attr, &ah->av);
+ /* create index > 0 */
+ ah->ah_num = ah->elem.index;
+
+ if (uresp) {
+ /* only if new user provider */
+ err = copy_to_user(&uresp->ah_num, &ah->ah_num,
+ sizeof(uresp->ah_num));
+ if (err) {
+ rxe_cleanup(ah);
+ return -EFAULT;
+ }
+ } else if (ah->is_user) {
+ /* only if old user provider */
+ ah->ah_num = 0;
+ }
+
+ rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
return 0;
}
@@ -239,11 +227,13 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
+static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_drop_ref(ah);
+ rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
+
+ return 0;
}
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
@@ -253,8 +243,10 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
u32 length;
struct rxe_recv_wqe *recv_wqe;
int num_sge = ibwr->num_sge;
+ int full;
- if (unlikely(queue_full(rq->queue))) {
+ full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
+ if (unlikely(full)) {
err = -ENOMEM;
goto err1;
}
@@ -268,9 +260,8 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- recv_wqe = producer_addr(rq->queue);
+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id;
- recv_wqe->num_sge = num_sge;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -281,12 +272,8 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- /* make sure all changes to the work queue are written before we
- * update the producer pointer
- */
- smp_wmb();
+ queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
- advance_producer(rq->queue);
return 0;
err1:
@@ -308,27 +295,29 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
uresp = udata->outbuf;
}
- err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+ if (init->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ err = rxe_srq_chk_init(rxe, init);
if (err)
- goto err1;
+ return err;
- err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
+ err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err)
- goto err1;
+ return err;
- rxe_add_ref(pd);
+ rxe_get(pd);
srq->pd = pd;
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
- goto err2;
+ goto err_cleanup;
return 0;
-err2:
- rxe_drop_ref(pd);
- rxe_drop_ref(srq);
-err1:
+err_cleanup:
+ rxe_cleanup(srq);
+
return err;
}
@@ -352,16 +341,12 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err)
- goto err1;
+ return err;
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
if (err)
- goto err1;
-
+ return err;
return 0;
-
-err1:
- return err;
}
static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
@@ -377,23 +362,20 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
}
-static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
- if (srq->rq.queue)
- rxe_queue_cleanup(srq->rq.queue);
-
- rxe_drop_ref(srq->pd);
- rxe_drop_ref(srq);
+ rxe_cleanup(srq);
+ return 0;
}
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
int err = 0;
- unsigned long flags;
struct rxe_srq *srq = to_rsrq(ibsrq);
+ unsigned long flags;
spin_lock_irqsave(&srq->rq.producer_lock, flags);
@@ -412,54 +394,51 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
return err;
}
-static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init,
- struct ib_udata *udata)
+static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(ibpd->device);
- struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_qp *qp;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_pd *pd = to_rpd(ibqp->pd);
+ struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_create_qp_resp __user *uresp = NULL;
if (udata) {
if (udata->outlen < sizeof(*uresp))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
uresp = udata->outbuf;
}
+ if (init->create_flags)
+ return -EOPNOTSUPP;
+
err = rxe_qp_chk_init(rxe, init);
if (err)
- goto err1;
-
- qp = rxe_alloc(&rxe->qp_pool);
- if (!qp) {
- err = -ENOMEM;
- goto err1;
- }
+ return err;
if (udata) {
- if (udata->inlen) {
- err = -EINVAL;
- goto err2;
- }
- qp->is_user = 1;
+ if (udata->inlen)
+ return -EINVAL;
+
+ qp->is_user = true;
+ } else {
+ qp->is_user = false;
}
- rxe_add_index(qp);
+ err = rxe_add_to_pool(&rxe->qp_pool, qp);
+ if (err)
+ return err;
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
+ err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err)
- goto err3;
+ goto qp_init;
- return &qp->ibqp;
+ rxe_finalize(qp);
+ return 0;
-err3:
- rxe_drop_index(qp);
-err2:
- rxe_drop_ref(qp);
-err1:
- return ERR_PTR(err);
+qp_init:
+ rxe_cleanup(qp);
+ return err;
}
static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -469,6 +448,9 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_qp *qp = to_rqp(ibqp);
+ if (mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err)
goto err1;
@@ -477,6 +459,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
goto err1;
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
return 0;
err1:
@@ -497,10 +484,13 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rxe_qp *qp = to_rqp(ibqp);
+ int ret;
+
+ ret = rxe_qp_chk_destroy(qp);
+ if (ret)
+ return ret;
- rxe_qp_destroy(qp);
- rxe_drop_index(qp);
- rxe_drop_ref(qp);
+ rxe_cleanup(qp);
return 0;
}
@@ -535,15 +525,16 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
- wr->num_sge = ibwr->num_sge;
wr->opcode = ibwr->opcode;
wr->send_flags = ibwr->send_flags;
if (qp_type(qp) == IB_QPT_UD ||
- qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
+ struct ib_ah *ibah = ud_wr(ibwr)->ah;
+
wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
if (qp_type(qp) == IB_QPT_GSI)
wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
if (wr->opcode == IB_WR_SEND_WITH_IMM)
@@ -552,7 +543,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
wr->ex.imm_data = ibwr->ex.imm_data;
- /* fall through */
+ fallthrough;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
@@ -587,37 +578,37 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}
-static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ const struct ib_send_wr *ibwr)
+{
+ struct ib_sge *sge = ibwr->sg_list;
+ u8 *p = wqe->dma.inline_data;
+ int i;
+
+ for (i = 0; i < ibwr->num_sge; i++, sge++) {
+ memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+ p += sge->length;
+ }
+}
+
+static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
int num_sge = ibwr->num_sge;
- struct ib_sge *sge;
- int i;
- u8 *p;
init_send_wr(qp, &wqe->wr, ibwr);
- if (qp_type(qp) == IB_QPT_UD ||
- qp_type(qp) == IB_QPT_SMI ||
- qp_type(qp) == IB_QPT_GSI)
- memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
-
- if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
- p = wqe->dma.inline_data;
-
- sge = ibwr->sg_list;
- for (i = 0; i < num_sge; i++, sge++) {
- memcpy(p, (void *)(uintptr_t)sge->addr,
- sge->length);
-
- p += sge->length;
- }
- } else if (mask & WR_REG_MASK) {
+ /* local operation */
+ if (unlikely(mask & WR_LOCAL_OP_MASK)) {
wqe->mask = mask;
wqe->state = wqe_state_posted;
- return 0;
- } else
+ return;
+ }
+
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+ copy_inline_data_to_wqe(wqe, ibwr);
+ else
memcpy(wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -631,8 +622,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
wqe->dma.sge_offset = 0;
wqe->state = wqe_state_posted;
wqe->ssn = atomic_add_return(1, &qp->ssn);
-
- return 0;
}
static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -642,6 +631,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
unsigned long flags;
+ int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
@@ -649,31 +639,21 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags);
- if (unlikely(queue_full(sq->queue))) {
- err = -ENOMEM;
- goto err1;
+ full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
+
+ if (unlikely(full)) {
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ return -ENOMEM;
}
- send_wqe = producer_addr(sq->queue);
+ send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
+ init_send_wqe(qp, ibwr, mask, length, send_wqe);
- err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
- if (unlikely(err))
- goto err1;
+ queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- /*
- * make sure all changes to the work queue are
- * written before we update the producer pointer
- */
- smp_wmb();
-
- advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return 0;
-
-err1:
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
- return err;
}
static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
@@ -683,6 +663,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
unsigned int mask;
unsigned int length = 0;
int i;
+ struct ib_send_wr *next;
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
@@ -699,6 +680,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
break;
}
+ next = wr->next;
+
length = 0;
for (i = 0; i < wr->num_sge; i++)
length += wr->sg_list[i].length;
@@ -709,7 +692,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- wr = wr->next;
+ wr = next;
}
rxe_run_task(&qp->req.task, 1);
@@ -798,7 +781,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err)
@@ -809,16 +792,23 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (err)
return err;
- return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
+ return rxe_add_to_pool(&rxe->cq_pool, cq);
}
-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
+ /* See IBA C11-17: The CI shall return an error if this Verb is
+ * invoked while a Work Queue is still associated with the CQ.
+ */
+ if (atomic_read(&cq->num_wq))
+ return -EINVAL;
+
rxe_cq_disable(cq);
- rxe_drop_ref(cq);
+ rxe_cleanup(cq);
+ return 0;
}
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
@@ -857,12 +847,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
- cqe = queue_head(cq->queue);
+ cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
break;
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- advance_consumer(cq->queue);
+ queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
@@ -872,7 +862,9 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
{
struct rxe_cq *cq = to_rcq(ibcq);
- int count = queue_count(cq->queue);
+ int count;
+
+ count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
return (count > wc_cnt) ? wc_cnt : count;
}
@@ -880,14 +872,17 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
- unsigned long irq_flags;
int ret = 0;
+ int empty;
+ unsigned long irq_flags;
spin_lock_irqsave(&cq->cq_lock, irq_flags);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
+ empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
@@ -899,31 +894,19 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
- int err;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
- err = -ENOMEM;
- goto err1;
- }
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
- rxe_add_index(mr);
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- rxe_add_ref(pd);
-
- err = rxe_mem_init_dma(pd, access, mr);
- if (err)
- goto err2;
+ rxe_mr_init_dma(access, mr);
+ rxe_finalize(mr);
return &mr->ibmr;
-
-err2:
- rxe_drop_ref(pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
-err1:
- return ERR_PTR(err);
}
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
@@ -935,7 +918,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr) {
@@ -943,42 +926,30 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
goto err2;
}
- rxe_add_index(mr);
- rxe_add_ref(pd);
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- err = rxe_mem_init_user(pd, start, length, iova,
- access, udata, mr);
+ err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err)
goto err3;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err3:
- rxe_drop_ref(pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
+ rxe_cleanup(mr);
err2:
return ERR_PTR(err);
}
-static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-{
- struct rxe_mem *mr = to_rmr(ibmr);
-
- mr->state = RXE_MEM_STATE_ZOMBIE;
- rxe_drop_ref(mr->pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
- return 0;
-}
-
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
+ u32 max_num_sg)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
int err;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -990,27 +961,26 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
goto err1;
}
- rxe_add_index(mr);
+ rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- rxe_add_ref(pd);
-
- err = rxe_mem_init_fast(pd, max_num_sg, mr);
+ err = rxe_mr_init_fast(max_num_sg, mr);
if (err)
goto err2;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err2:
- rxe_drop_ref(pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
+ rxe_cleanup(mr);
err1:
return ERR_PTR(err);
}
static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
struct rxe_map *map;
struct rxe_phys_buf *buf;
@@ -1030,56 +1000,27 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
int n;
mr->nbuf = 0;
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- mr->va = ibmr->iova;
- mr->iova = ibmr->iova;
- mr->length = ibmr->length;
mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1;
- mr->offset = mr->iova & mr->page_mask;
+ mr->offset = ibmr->iova & mr->page_mask;
return n;
}
-static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
-{
- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
- struct rxe_mc_grp *grp;
-
- /* takes a ref on grp if successful */
- err = rxe_mcast_get_grp(rxe, mgid, &grp);
- if (err)
- return err;
-
- err = rxe_mcast_add_grp_elem(rxe, qp, grp);
-
- rxe_drop_ref(grp);
- return err;
-}
-
-static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
-{
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
-
- return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
-}
-
static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
- return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
+ return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
@@ -1107,8 +1048,9 @@ static const struct ib_device_ops rxe_dev_ops = {
.driver_id = RDMA_DRIVER_RXE,
.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
- .alloc_hw_stats = rxe_ib_alloc_hw_stats,
+ .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
.alloc_mr = rxe_alloc_mr,
+ .alloc_mw = rxe_alloc_mw,
.alloc_pd = rxe_alloc_pd,
.alloc_ucontext = rxe_alloc_ucontext,
.attach_mcast = rxe_attach_mcast,
@@ -1116,7 +1058,9 @@ static const struct ib_device_ops rxe_dev_ops = {
.create_cq = rxe_create_cq,
.create_qp = rxe_create_qp,
.create_srq = rxe_create_srq,
+ .create_user_ah = rxe_create_ah,
.dealloc_driver = rxe_dealloc,
+ .dealloc_mw = rxe_dealloc_mw,
.dealloc_pd = rxe_dealloc_pd,
.dealloc_ucontext = rxe_dealloc_ucontext,
.dereg_mr = rxe_dereg_mr,
@@ -1125,6 +1069,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.destroy_qp = rxe_destroy_qp,
.destroy_srq = rxe_destroy_srq,
.detach_mcast = rxe_detach_mcast,
+ .device_group = &rxe_attr_group,
.enable_driver = rxe_enable_driver,
.get_dma_mr = rxe_get_dma_mr,
.get_hw_stats = rxe_ib_get_hw_stats,
@@ -1155,80 +1100,39 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
+ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
- struct crypto_shash *tfm;
- strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+ strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = num_possible_cpus();
- dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_ops = &dma_virt_ops;
- dev->dev.dma_parms = &rxe->dma_parms;
- rxe->dma_parms = (struct device_dma_parameters)
- { .max_segment_size = SZ_2G };
- dma_coerce_mask_and_coherent(&dev->dev,
- dma_get_required_mask(&dev->dev));
-
- dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
- | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
- | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
- ;
+
+ dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
if (err)
return err;
- tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(tfm)) {
- pr_err("failed to allocate crc algorithm err:%ld\n",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- rxe->tfm = tfm;
+ err = rxe_icrc_init(rxe);
+ if (err)
+ return err;
- rdma_set_device_sysfs_group(dev, &rxe_attr_group);
- err = ib_register_device(dev, ibdev_name);
+ err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);