diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 181 |
1 files changed, 85 insertions, 96 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0aa0d7e52773..88825edc7dce 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -7,8 +7,8 @@ #include <linux/dma-mapping.h> #include <net/addrconf.h> #include <rdma/uverbs_ioctl.h> + #include "rxe.h" -#include "rxe_loc.h" #include "rxe_queue.h" #include "rxe_hw_counters.h" @@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) { struct rxe_ucontext *uc = to_ruc(ibuc); - rxe_drop_ref(uc); + rxe_cleanup(uc); } static int rxe_port_immutable(struct ib_device *dev, u32 port_num, @@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_pd *pd = to_rpd(ibpd); - rxe_drop_ref(pd); + rxe_cleanup(pd); return 0; } @@ -176,21 +176,20 @@ static int rxe_create_ah(struct ib_ah *ibah, if (err) return err; - err = rxe_add_to_pool(&rxe->ah_pool, ah); + err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, + init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); if (err) return err; /* create index > 0 */ - rxe_add_index(ah); - ah->ah_num = ah->pelem.index; + ah->ah_num = ah->elem.index; if (uresp) { /* only if new user provider */ err = copy_to_user(&uresp->ah_num, &ah->ah_num, sizeof(uresp->ah_num)); if (err) { - rxe_drop_index(ah); - rxe_drop_ref(ah); + rxe_cleanup(ah); return -EFAULT; } } else if (ah->is_user) { @@ -199,6 +198,8 @@ static int rxe_create_ah(struct ib_ah *ibah, } rxe_init_av(init_attr->ah_attr, &ah->av); + rxe_finalize(ah); + return 0; } @@ -230,8 +231,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); - rxe_drop_index(ah); - rxe_drop_ref(ah); + rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); + return 0; } @@ -261,7 +262,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER); recv_wqe->wr_id = ibwr->wr_id; - recv_wqe->num_sge = num_sge; memcpy(recv_wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); @@ -289,36 +289,35 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; - if (init->srq_type != IB_SRQT_BASIC) - return -EOPNOTSUPP; - if (udata) { if (udata->outlen < sizeof(*uresp)) return -EINVAL; uresp = udata->outbuf; } - err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); + if (init->srq_type != IB_SRQT_BASIC) + return -EOPNOTSUPP; + + err = rxe_srq_chk_init(rxe, init); if (err) - goto err1; + return err; err = rxe_add_to_pool(&rxe->srq_pool, srq); if (err) - goto err1; + return err; - rxe_add_ref(pd); + rxe_get(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) - goto err2; + goto err_cleanup; return 0; -err2: - rxe_drop_ref(pd); - rxe_drop_ref(srq); -err1: +err_cleanup: + rxe_cleanup(srq); + return err; } @@ -342,16 +341,12 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, err = rxe_srq_chk_attr(rxe, srq, attr, mask); if (err) - goto err1; + return err; err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); if (err) - goto err1; - + return err; return 0; - -err1: - return err; } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) @@ -371,11 +366,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rxe_srq *srq = to_rsrq(ibsrq); - if (srq->rq.queue) - rxe_queue_cleanup(srq->rq.queue); - - rxe_drop_ref(srq->pd); - rxe_drop_ref(srq); + rxe_cleanup(srq); return 0; } @@ -383,8 +374,8 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); + unsigned long flags; spin_lock_irqsave(&srq->rq.producer_lock, flags); @@ -438,16 +429,15 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, if (err) return err; - rxe_add_index(qp); err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); if (err) goto qp_init; + rxe_finalize(qp); return 0; qp_init: - rxe_drop_index(qp); - rxe_drop_ref(qp); + rxe_cleanup(qp); return err; } @@ -469,6 +459,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (err) goto err1; + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, + qp->ibqp.qp_num, + qp->attr.dest_qp_num); + return 0; err1: @@ -489,10 +484,13 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rxe_qp *qp = to_rqp(ibqp); + int ret; + + ret = rxe_qp_chk_destroy(qp); + if (ret) + return ret; - rxe_qp_destroy(qp); - rxe_drop_index(qp); - rxe_drop_ref(qp); + rxe_cleanup(qp); return 0; } @@ -527,12 +525,10 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, const struct ib_send_wr *ibwr) { wr->wr_id = ibwr->wr_id; - wr->num_sge = ibwr->num_sge; wr->opcode = ibwr->opcode; wr->send_flags = ibwr->send_flags; if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) { struct ib_ah *ibah = ud_wr(ibwr)->ah; @@ -803,9 +799,15 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct rxe_cq *cq = to_rcq(ibcq); + /* See IBA C11-17: The CI shall return an error if this Verb is + * invoked while a Work Queue is still associated with the CQ. + */ + if (atomic_read(&cq->num_wq)) + return -EINVAL; + rxe_cq_disable(cq); - rxe_drop_ref(cq); + rxe_cleanup(cq); return 0; } @@ -870,9 +872,9 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; int empty; + unsigned long irq_flags; spin_lock_irqsave(&cq->cq_lock, irq_flags); if (cq->notify != IB_CQ_NEXT_COMP) @@ -898,9 +900,11 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) if (!mr) return ERR_PTR(-ENOMEM); - rxe_add_index(mr); - rxe_add_ref(pd); - rxe_mr_init_dma(pd, access, mr); + rxe_get(pd); + mr->ibmr.pd = ibpd; + + rxe_mr_init_dma(access, mr); + rxe_finalize(mr); return &mr->ibmr; } @@ -922,20 +926,20 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, goto err2; } - rxe_add_index(mr); - rxe_add_ref(pd); + rxe_get(pd); + mr->ibmr.pd = ibpd; - err = rxe_mr_init_user(pd, start, length, iova, access, mr); + err = rxe_mr_init_user(rxe, start, length, iova, access, mr); if (err) goto err3; + rxe_finalize(mr); + return &mr->ibmr; err3: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); + rxe_cleanup(mr); err2: return ERR_PTR(err); } @@ -957,72 +961,57 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, goto err1; } - rxe_add_index(mr); + rxe_get(pd); + mr->ibmr.pd = ibpd; - rxe_add_ref(pd); - - err = rxe_mr_init_fast(pd, max_num_sg, mr); + err = rxe_mr_init_fast(max_num_sg, mr); if (err) goto err2; + rxe_finalize(mr); + return &mr->ibmr; err2: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); + rxe_cleanup(mr); err1: return ERR_PTR(err); } -/* build next_map_set from scatterlist - * The IB_WR_REG_MR WR will swap map_sets - */ -static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset) +static int rxe_set_page(struct ib_mr *ibmr, u64 addr) { struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map_set *set = mr->next_map_set; - int n; + struct rxe_map *map; + struct rxe_phys_buf *buf; - set->nbuf = 0; + if (unlikely(mr->nbuf == mr->num_buf)) + return -ENOMEM; - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page); + map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; + buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; - set->va = ibmr->iova; - set->iova = ibmr->iova; - set->length = ibmr->length; - set->page_shift = ilog2(ibmr->page_size); - set->page_mask = ibmr->page_size - 1; - set->offset = set->iova & set->page_mask; + buf->addr = addr; + buf->size = ibmr->page_size; + mr->nbuf++; - return n; + return 0; } -static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) +static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) { - int err; - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); - struct rxe_mc_grp *grp; - - /* takes a ref on grp if successful */ - err = rxe_mcast_get_grp(rxe, mgid, &grp); - if (err) - return err; + struct rxe_mr *mr = to_rmr(ibmr); + int n; - err = rxe_mcast_add_grp_elem(rxe, qp, grp); + mr->nbuf = 0; - rxe_drop_ref(grp); - return err; -} + n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); -static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) -{ - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); + mr->page_shift = ilog2(ibmr->page_size); + mr->page_mask = ibmr->page_size - 1; + mr->offset = ibmr->iova & mr->page_mask; - return rxe_mcast_drop_grp_elem(rxe, qp, mgid); + return n; } static ssize_t parent_show(struct device *device, |