diff options
Diffstat (limited to 'drivers/infiniband/sw')
39 files changed, 2089 insertions, 2247 deletions
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 3305f2744bfa..3acab569fbb9 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -2775,7 +2775,7 @@ void rvt_qp_iter(struct rvt_dev_info *rdi, EXPORT_SYMBOL(rvt_qp_iter); /* - * This should be called with s_lock held. + * This should be called with s_lock and r_lock held. */ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) @@ -3073,6 +3073,8 @@ do_write: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, @@ -3132,7 +3134,9 @@ send_comp: rvp->n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; + spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); + spin_unlock(&sqp->r_lock); if (local_ops) { atomic_dec(&sqp->local_ops_pending); local_ops = 0; @@ -3186,9 +3190,15 @@ serr: spin_unlock_irqrestore(&qp->r_lock, flags); serr_no_r_lock: spin_lock_irqsave(&sqp->s_lock, flags); + spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); + spin_unlock(&sqp->r_lock); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe; + + spin_lock(&sqp->r_lock); + lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 59481ae39505..d61f8de7f21c 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -15,7 +15,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("RDMA Verbs Transport Library"); -static int rvt_init(void) +static int __init rvt_init(void) { int ret = rvt_driver_cq_init(); @@ -26,7 +26,7 @@ static int rvt_init(void) } module_init(rvt_init); -static void rvt_cleanup(void) +static void __exit rvt_cleanup(void) { rvt_cq_exit(); } diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile index 1e24673e9318..5395a581f4bb 100644 --- a/drivers/infiniband/sw/rxe/Makefile +++ b/drivers/infiniband/sw/rxe/Makefile @@ -22,5 +22,4 @@ rdma_rxe-y := \ rxe_mcast.o \ rxe_task.o \ rxe_net.o \ - rxe_sysfs.o \ rxe_hw_counters.o diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 8e0f9c489cab..51daac5c4feb 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib"); MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_LICENSE("Dual BSD/GPL"); -bool rxe_initialized; - /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -30,8 +28,8 @@ void rxe_dealloc(struct ib_device *ib_dev) rxe_pool_cleanup(&rxe->cq_pool); rxe_pool_cleanup(&rxe->mr_pool); rxe_pool_cleanup(&rxe->mw_pool); - rxe_pool_cleanup(&rxe->mc_grp_pool); - rxe_pool_cleanup(&rxe->mc_elem_pool); + + WARN_ON(!RB_EMPTY_ROOT(&rxe->mcg_tree)); if (rxe->tfm) crypto_free_shash(rxe->tfm); @@ -48,6 +46,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_qp = RXE_MAX_QP; rxe->attr.max_qp_wr = RXE_MAX_QP_WR; rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS; + rxe->attr.kernel_cap_flags = IBK_ALLOW_USER_UNREG; rxe->attr.max_send_sge = RXE_MAX_SGE; rxe->attr.max_recv_sge = RXE_MAX_SGE; rxe->attr.max_sge_rd = RXE_MAX_SGE_RD; @@ -116,106 +115,37 @@ static void rxe_init_ports(struct rxe_dev *rxe) } /* init pools of managed objects */ -static int rxe_init_pools(struct rxe_dev *rxe) +static void rxe_init_pools(struct rxe_dev *rxe) { - int err; - - err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC, - rxe->max_ucontext); - if (err) - goto err1; - - err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD, - rxe->attr.max_pd); - if (err) - goto err2; - - err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH, - rxe->attr.max_ah); - if (err) - goto err3; - - err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ, - rxe->attr.max_srq); - if (err) - goto err4; - - err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP, - rxe->attr.max_qp); - if (err) - goto err5; - - err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ, - rxe->attr.max_cq); - if (err) - goto err6; - - err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR, - rxe->attr.max_mr); - if (err) - goto err7; - - err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW, - rxe->attr.max_mw); - if (err) - goto err8; - - err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP, - rxe->attr.max_mcast_grp); - if (err) - goto err9; - - err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM, - rxe->attr.max_total_mcast_qp_attach); - if (err) - goto err10; - - return 0; - -err10: - rxe_pool_cleanup(&rxe->mc_grp_pool); -err9: - rxe_pool_cleanup(&rxe->mw_pool); -err8: - rxe_pool_cleanup(&rxe->mr_pool); -err7: - rxe_pool_cleanup(&rxe->cq_pool); -err6: - rxe_pool_cleanup(&rxe->qp_pool); -err5: - rxe_pool_cleanup(&rxe->srq_pool); -err4: - rxe_pool_cleanup(&rxe->ah_pool); -err3: - rxe_pool_cleanup(&rxe->pd_pool); -err2: - rxe_pool_cleanup(&rxe->uc_pool); -err1: - return err; + rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC); + rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD); + rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH); + rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ); + rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP); + rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ); + rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR); + rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW); } /* initialize rxe device state */ -static int rxe_init(struct rxe_dev *rxe) +static void rxe_init(struct rxe_dev *rxe) { - int err; - /* init default device parameters */ rxe_init_device_param(rxe); rxe_init_ports(rxe); - - err = rxe_init_pools(rxe); - if (err) - return err; + rxe_init_pools(rxe); /* init pending mmap list */ spin_lock_init(&rxe->mmap_offset_lock); spin_lock_init(&rxe->pending_lock); INIT_LIST_HEAD(&rxe->pending_mmaps); - mutex_init(&rxe->usdev_lock); + /* init multicast support */ + spin_lock_init(&rxe->mcg_lock); + rxe->mcg_tree = RB_ROOT; - return 0; + mutex_init(&rxe->usdev_lock); } void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) @@ -237,12 +167,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) */ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name) { - int err; - - err = rxe_init(rxe); - if (err) - return err; - + rxe_init(rxe); rxe_set_mtu(rxe, mtu); return rxe_register_device(rxe, ibdev_name); @@ -290,7 +215,6 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); - rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -301,7 +225,6 @@ static void __exit rxe_module_exit(void) ib_unregister_driver(RDMA_DRIVER_RXE); rxe_net_exit(); - rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 1bb3fb618bf5..30fbdf3bc76a 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -12,7 +12,6 @@ #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> #include <linux/skbuff.h> #include <rdma/ib_verbs.h> @@ -39,8 +38,6 @@ #define RXE_ROCE_V2_SPORT (0xc000) -extern bool rxe_initialized; - void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c index 38c7b6fb39d7..3b05314ca739 100644 --- a/drivers/infiniband/sw/rxe/rxe_av.c +++ b/drivers/infiniband/sw/rxe/rxe_av.c @@ -99,11 +99,14 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr) av->network_type = type; } -struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt) +struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp) { struct rxe_ah *ah; u32 ah_num; + if (ahp) + *ahp = NULL; + if (!pkt || !pkt->qp) return NULL; @@ -117,10 +120,22 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt) if (ah_num) { /* only new user provider or kernel client */ ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num); - if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) { + if (!ah) { pr_warn("Unable to find AH matching ah_num\n"); return NULL; } + + if (rxe_ah_pd(ah) != pkt->qp->pd) { + pr_warn("PDs don't match for AH and QP\n"); + rxe_put(ah); + return NULL; + } + + if (ahp) + *ahp = ah; + else + rxe_put(ah); + return &ah->av; } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index d771ba8449a1..fb0c008af78c 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -114,6 +114,8 @@ void retransmit_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, retrans_timer); + pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index); + if (qp->valid) { qp->comp.timeout = 1; rxe_run_task(&qp->comp.task, 1); @@ -458,8 +460,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - unsigned long flags; - if (wqe->has_rd_atomic) { wqe->has_rd_atomic = 0; atomic_inc(&qp->req.rd_atomic); @@ -472,11 +472,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* state_lock used by requester & completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); if ((qp->req.state == QP_STATE_DRAIN) && (qp->comp.psn == qp->req.psn)) { qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -488,7 +488,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, qp->ibqp.qp_context); } } else { - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); } } @@ -528,7 +528,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) struct rxe_queue *q = qp->sq.queue; while ((skb = skb_dequeue(&qp->resp_pkts))) { - rxe_drop_ref(qp); + rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } @@ -550,7 +550,7 @@ static void free_pkt(struct rxe_pkt_info *pkt) struct ib_device *dev = qp->ibqp.device; kfree_skb(skb); - rxe_drop_ref(qp); + rxe_put(qp); ib_device_put(dev); } @@ -562,16 +562,16 @@ int rxe_completer(void *arg) struct sk_buff *skb = NULL; struct rxe_pkt_info *pkt = NULL; enum comp_state state; - int ret = 0; + int ret; - rxe_add_ref(qp); + if (!rxe_get(qp)) + return -EAGAIN; - if (!qp->valid || qp->req.state == QP_STATE_ERROR || - qp->req.state == QP_STATE_RESET) { + if (!qp->valid || qp->comp.state == QP_STATE_ERROR || + qp->comp.state == QP_STATE_RESET) { rxe_drain_resp_pkts(qp, qp->valid && - qp->req.state == QP_STATE_ERROR); - ret = -EAGAIN; - goto done; + qp->comp.state == QP_STATE_ERROR); + goto exit; } if (qp->comp.timeout) { @@ -581,10 +581,8 @@ int rxe_completer(void *arg) qp->comp.timeout_retry = 0; } - if (qp->req.need_retry) { - ret = -EAGAIN; - goto done; - } + if (qp->req.need_retry) + goto exit; state = COMPST_GET_ACK; @@ -677,8 +675,7 @@ int rxe_completer(void *arg) qp->qp_timeout_jiffies) mod_timer(&qp->retrans_timer, jiffies + qp->qp_timeout_jiffies); - ret = -EAGAIN; - goto done; + goto exit; case COMPST_ERROR_RETRY: /* we come here if the retry timer fired and we did @@ -690,10 +687,8 @@ int rxe_completer(void *arg) */ /* there is nothing to retry in this case */ - if (!wqe || (wqe->state == wqe_state_posted)) { - ret = -EAGAIN; - goto done; - } + if (!wqe || (wqe->state == wqe_state_posted)) + goto exit; /* if we've started a retry, don't start another * retry sequence, unless this is a timeout. @@ -731,18 +726,21 @@ int rxe_completer(void *arg) break; case COMPST_RNR_RETRY: + /* we come here if we received an RNR NAK */ if (qp->comp.rnr_retry > 0) { if (qp->comp.rnr_retry != 7) qp->comp.rnr_retry--; - qp->req.need_retry = 1; + /* don't start a retry flow until the + * rnr timer has fired + */ + qp->req.wait_for_rnr_timer = 1; pr_debug("qp#%d set rnr nak timer\n", qp_num(qp)); mod_timer(&qp->rnr_nak_timer, jiffies + rnrnak_jiffies(aeth_syn(pkt) & ~AETH_TYPE_MASK)); - ret = -EAGAIN; - goto done; + goto exit; } else { rxe_counter_inc(rxe, RXE_CNT_RNR_RETRY_EXCEEDED); @@ -755,15 +753,23 @@ int rxe_completer(void *arg) WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); do_complete(qp, wqe); rxe_qp_error(qp); - ret = -EAGAIN; - goto done; + goto exit; } } + /* A non-zero return value will cause rxe_do_task to + * exit its loop and end the tasklet. A zero return + * will continue looping and return to rxe_completer + */ done: + ret = 0; + goto out; +exit: + ret = -EAGAIN; +out: if (pkt) free_pkt(pkt); - rxe_drop_ref(qp); + rxe_put(qp); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 6848426c074f..b1a0ab3cd4bd 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -19,16 +19,16 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, } if (cqe > rxe->attr.max_cqe) { - pr_warn("cqe(%d) > max_cqe(%d)\n", - cqe, rxe->attr.max_cqe); + pr_debug("cqe(%d) > max_cqe(%d)\n", + cqe, rxe->attr.max_cqe); goto err1; } if (cq) { count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); if (cqe < count) { - pr_warn("cqe(%d) < current # elements in queue (%d)", - cqe, count); + pr_debug("cqe(%d) < current # elements in queue (%d)", + cqe, count); goto err1; } } @@ -106,9 +106,9 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; - unsigned long flags; int full; void *addr; + unsigned long flags; spin_lock_irqsave(&cq->cq_lock, flags); @@ -150,9 +150,9 @@ void rxe_cq_disable(struct rxe_cq *cq) spin_unlock_irqrestore(&cq->cq_lock, flags); } -void rxe_cq_cleanup(struct rxe_pool_entry *arg) +void rxe_cq_cleanup(struct rxe_pool_elem *elem) { - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); if (cq->queue) rxe_queue_cleanup(cq->queue); diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c index e03af3012590..46bb07c5c4df 100644 --- a/drivers/infiniband/sw/rxe/rxe_icrc.c +++ b/drivers/infiniband/sw/rxe/rxe_icrc.c @@ -151,18 +151,8 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt) payload_size(pkt) + bth_pad(pkt)); icrc = ~icrc; - if (unlikely(icrc != pkt_icrc)) { - if (skb->protocol == htons(ETH_P_IPV6)) - pr_warn_ratelimited("bad ICRC from %pI6c\n", - &ipv6_hdr(skb)->saddr); - else if (skb->protocol == htons(ETH_P_IP)) - pr_warn_ratelimited("bad ICRC from %pI4\n", - &ip_hdr(skb)->saddr); - else - pr_warn_ratelimited("bad ICRC from unknown\n"); - + if (unlikely(icrc != pkt_icrc)) return -EINVAL; - } return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1ca43b859d80..c2a5c8814a48 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -19,7 +19,7 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr); void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr); -struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt); +struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp); /* rxe_cq.c */ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, @@ -37,21 +37,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); void rxe_cq_disable(struct rxe_cq *cq); -void rxe_cq_cleanup(struct rxe_pool_entry *arg); +void rxe_cq_cleanup(struct rxe_pool_elem *elem); /* rxe_mcast.c */ -int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, - struct rxe_mc_grp **grp_p); - -int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, - struct rxe_mc_grp *grp); - -int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, - union ib_gid *mgid); - -void rxe_drop_all_mcast_groups(struct rxe_qp *qp); - -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid); +int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); +int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); +void rxe_cleanup_mcg(struct kref *kref); /* rxe_mmap.c */ struct rxe_mmap_info { @@ -72,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ u8 rxe_get_next_key(u32 last_key); -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr); -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, +void rxe_mr_init_dma(int access, struct rxe_mr *mr); +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr); -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, @@ -85,11 +77,10 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, enum rxe_mr_lookup_type type); int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); -int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey); +int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); -int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr); int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -void rxe_mr_cleanup(struct rxe_pool_entry *arg); +void rxe_mr_cleanup(struct rxe_pool_elem *elem); /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); @@ -97,41 +88,32 @@ int rxe_dealloc_mw(struct ib_mw *ibmw); int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); -void rxe_mw_cleanup(struct rxe_pool_entry *arg); +void rxe_mw_cleanup(struct rxe_pool_elem *elem); /* rxe_net.c */ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt); -int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb); +int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb); int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb); const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); -int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); -int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); /* rxe_qp.c */ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); - int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct rxe_create_qp_resp __user *uresp, struct ib_pd *ibpd, struct ib_udata *udata); - int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init); - int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); - int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata); - int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); - void rxe_qp_error(struct rxe_qp *qp); - -void rxe_qp_destroy(struct rxe_qp *qp); - -void rxe_qp_cleanup(struct rxe_pool_entry *arg); +int rxe_qp_chk_destroy(struct rxe_qp *qp); +void rxe_qp_cleanup(struct rxe_pool_elem *elem); static inline int qp_num(struct rxe_qp *qp) { @@ -162,7 +144,7 @@ static inline int rcv_wqe_size(int max_sge) max_sge * sizeof(struct ib_sge); } -void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res); +void free_rd_atomic_resource(struct resp_res *res); static inline void rxe_advance_resp_resource(struct rxe_qp *qp) { @@ -175,18 +157,16 @@ void retransmit_timer(struct timer_list *t); void rnr_nak_timer(struct timer_list *t); /* rxe_srq.c */ -#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT) - -int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, - struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); - +int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init); int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp); - +int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, + struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata); +void rxe_srq_cleanup(struct rxe_pool_elem *elem); void rxe_dealloc(struct ib_device *ib_dev); diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 1c1d1b53312d..86cc2e18a7fd 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -1,179 +1,479 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* + * Copyright (c) 2022 Hewlett Packard Enterprise, Inc. All rights reserved. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ +/* + * rxe_mcast.c implements driver support for multicast transport. + * It is based on two data structures struct rxe_mcg ('mcg') and + * struct rxe_mca ('mca'). An mcg is allocated each time a qp is + * attached to a new mgid for the first time. These are indexed by + * a red-black tree using the mgid. This data structure is searched + * for the mcg when a multicast packet is received and when another + * qp is attached to the same mgid. It is cleaned up when the last qp + * is detached from the mcg. Each time a qp is attached to an mcg an + * mca is created. It holds a pointer to the qp and is added to a list + * of qp's that are attached to the mcg. The qp_list is used to replicate + * mcast packets in the rxe receive path. + */ + #include "rxe.h" -#include "rxe_loc.h" -/* caller should hold mc_grp_pool->pool_lock */ -static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe, - struct rxe_pool *pool, - union ib_gid *mgid) +/** + * rxe_mcast_add - add multicast address to rxe device + * @rxe: rxe device object + * @mgid: multicast address as a gid + * + * Returns 0 on success else an error + */ +static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) { - int err; - struct rxe_mc_grp *grp; + unsigned char ll_addr[ETH_ALEN]; - grp = rxe_alloc_locked(&rxe->mc_grp_pool); - if (!grp) - return ERR_PTR(-ENOMEM); + ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - INIT_LIST_HEAD(&grp->qp_list); - spin_lock_init(&grp->mcg_lock); - grp->rxe = rxe; - rxe_add_key_locked(grp, mgid); + return dev_mc_add(rxe->ndev, ll_addr); +} - err = rxe_mcast_add(rxe, mgid); - if (unlikely(err)) { - rxe_drop_key_locked(grp); - rxe_drop_ref(grp); - return ERR_PTR(err); +/** + * rxe_mcast_del - delete multicast address from rxe device + * @rxe: rxe device object + * @mgid: multicast address as a gid + * + * Returns 0 on success else an error + */ +static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid) +{ + unsigned char ll_addr[ETH_ALEN]; + + ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); + + return dev_mc_del(rxe->ndev, ll_addr); +} + +/** + * __rxe_insert_mcg - insert an mcg into red-black tree (rxe->mcg_tree) + * @mcg: mcg object with an embedded red-black tree node + * + * Context: caller must hold a reference to mcg and rxe->mcg_lock and + * is responsible to avoid adding the same mcg twice to the tree. + */ +static void __rxe_insert_mcg(struct rxe_mcg *mcg) +{ + struct rb_root *tree = &mcg->rxe->mcg_tree; + struct rb_node **link = &tree->rb_node; + struct rb_node *node = NULL; + struct rxe_mcg *tmp; + int cmp; + + while (*link) { + node = *link; + tmp = rb_entry(node, struct rxe_mcg, node); + + cmp = memcmp(&tmp->mgid, &mcg->mgid, sizeof(mcg->mgid)); + if (cmp > 0) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; } - return grp; + rb_link_node(&mcg->node, node, link); + rb_insert_color(&mcg->node, tree); } -int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, - struct rxe_mc_grp **grp_p) +/** + * __rxe_remove_mcg - remove an mcg from red-black tree holding lock + * @mcg: mcast group object with an embedded red-black tree node + * + * Context: caller must hold a reference to mcg and rxe->mcg_lock + */ +static void __rxe_remove_mcg(struct rxe_mcg *mcg) { - int err; - struct rxe_mc_grp *grp; - struct rxe_pool *pool = &rxe->mc_grp_pool; - unsigned long flags; + rb_erase(&mcg->node, &mcg->rxe->mcg_tree); +} - if (rxe->attr.max_mcast_qp_attach == 0) - return -EINVAL; +/** + * __rxe_lookup_mcg - lookup mcg in rxe->mcg_tree while holding lock + * @rxe: rxe device object + * @mgid: multicast IP address + * + * Context: caller must hold rxe->mcg_lock + * Returns: mcg on success and takes a ref to mcg else NULL + */ +static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe, + union ib_gid *mgid) +{ + struct rb_root *tree = &rxe->mcg_tree; + struct rxe_mcg *mcg; + struct rb_node *node; + int cmp; + + node = tree->rb_node; - write_lock_irqsave(&pool->pool_lock, flags); + while (node) { + mcg = rb_entry(node, struct rxe_mcg, node); - grp = rxe_pool_get_key_locked(pool, mgid); - if (grp) - goto done; + cmp = memcmp(&mcg->mgid, mgid, sizeof(*mgid)); - grp = create_grp(rxe, pool, mgid); - if (IS_ERR(grp)) { - write_unlock_irqrestore(&pool->pool_lock, flags); - err = PTR_ERR(grp); - return err; + if (cmp > 0) + node = node->rb_left; + else if (cmp < 0) + node = node->rb_right; + else + break; } -done: - write_unlock_irqrestore(&pool->pool_lock, flags); - *grp_p = grp; - return 0; + if (node) { + kref_get(&mcg->ref_cnt); + return mcg; + } + + return NULL; } -int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, - struct rxe_mc_grp *grp) +/** + * rxe_lookup_mcg - lookup up mcg in red-back tree + * @rxe: rxe device object + * @mgid: multicast IP address + * + * Returns: mcg if found else NULL + */ +struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) { + struct rxe_mcg *mcg; + + spin_lock_bh(&rxe->mcg_lock); + mcg = __rxe_lookup_mcg(rxe, mgid); + spin_unlock_bh(&rxe->mcg_lock); + + return mcg; +} + +/** + * __rxe_init_mcg - initialize a new mcg + * @rxe: rxe device + * @mgid: multicast address as a gid + * @mcg: new mcg object + * + * Context: caller should hold rxe->mcg lock + */ +static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid, + struct rxe_mcg *mcg) +{ + kref_init(&mcg->ref_cnt); + memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid)); + INIT_LIST_HEAD(&mcg->qp_list); + mcg->rxe = rxe; + + /* caller holds a ref on mcg but that will be + * dropped when mcg goes out of scope. We need to take a ref + * on the pointer that will be saved in the red-black tree + * by __rxe_insert_mcg and used to lookup mcg from mgid later. + * Inserting mcg makes it visible to outside so this should + * be done last after the object is ready. + */ + kref_get(&mcg->ref_cnt); + __rxe_insert_mcg(mcg); +} + +/** + * rxe_get_mcg - lookup or allocate a mcg + * @rxe: rxe device object + * @mgid: multicast IP address as a gid + * + * Returns: mcg on success else ERR_PTR(error) + */ +static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid) +{ + struct rxe_mcg *mcg, *tmp; int err; - struct rxe_mc_elem *elem; - /* check to see of the qp is already a member of the group */ - spin_lock_bh(&qp->grp_lock); - spin_lock_bh(&grp->mcg_lock); - list_for_each_entry(elem, &grp->qp_list, qp_list) { - if (elem->qp == qp) { - err = 0; - goto out; - } - } + if (rxe->attr.max_mcast_grp == 0) + return ERR_PTR(-EINVAL); + + /* check to see if mcg already exists */ + mcg = rxe_lookup_mcg(rxe, mgid); + if (mcg) + return mcg; - if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) { + /* check to see if we have reached limit */ + if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) { err = -ENOMEM; - goto out; + goto err_dec; } - elem = rxe_alloc_locked(&rxe->mc_elem_pool); - if (!elem) { + /* speculative alloc of new mcg */ + mcg = kzalloc(sizeof(*mcg), GFP_KERNEL); + if (!mcg) { err = -ENOMEM; - goto out; + goto err_dec; + } + + spin_lock_bh(&rxe->mcg_lock); + /* re-check to see if someone else just added it */ + tmp = __rxe_lookup_mcg(rxe, mgid); + if (tmp) { + spin_unlock_bh(&rxe->mcg_lock); + atomic_dec(&rxe->mcg_num); + kfree(mcg); + return tmp; + } + + __rxe_init_mcg(rxe, mgid, mcg); + spin_unlock_bh(&rxe->mcg_lock); + + /* add mcast address outside of lock */ + err = rxe_mcast_add(rxe, mgid); + if (!err) + return mcg; + + kfree(mcg); +err_dec: + atomic_dec(&rxe->mcg_num); + return ERR_PTR(err); +} + +/** + * rxe_cleanup_mcg - cleanup mcg for kref_put + * @kref: struct kref embnedded in mcg + */ +void rxe_cleanup_mcg(struct kref *kref) +{ + struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt); + + kfree(mcg); +} + +/** + * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock + * @mcg: the mcg object + * + * Context: caller is holding rxe->mcg_lock + * no qp's are attached to mcg + */ +static void __rxe_destroy_mcg(struct rxe_mcg *mcg) +{ + struct rxe_dev *rxe = mcg->rxe; + + /* remove mcg from red-black tree then drop ref */ + __rxe_remove_mcg(mcg); + kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); + + atomic_dec(&rxe->mcg_num); +} + +/** + * rxe_destroy_mcg - destroy mcg object + * @mcg: the mcg object + * + * Context: no qp's are attached to mcg + */ +static void rxe_destroy_mcg(struct rxe_mcg *mcg) +{ + /* delete mcast address outside of lock */ + rxe_mcast_del(mcg->rxe, &mcg->mgid); + + spin_lock_bh(&mcg->rxe->mcg_lock); + __rxe_destroy_mcg(mcg); + spin_unlock_bh(&mcg->rxe->mcg_lock); +} + +/** + * __rxe_init_mca - initialize a new mca holding lock + * @qp: qp object + * @mcg: mcg object + * @mca: empty space for new mca + * + * Context: caller must hold references on qp and mcg, rxe->mcg_lock + * and pass memory for new mca + * + * Returns: 0 on success else an error + */ +static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg, + struct rxe_mca *mca) +{ + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + int n; + + n = atomic_inc_return(&rxe->mcg_attach); + if (n > rxe->attr.max_total_mcast_qp_attach) { + atomic_dec(&rxe->mcg_attach); + return -ENOMEM; + } + + n = atomic_inc_return(&mcg->qp_num); + if (n > rxe->attr.max_mcast_qp_attach) { + atomic_dec(&mcg->qp_num); + atomic_dec(&rxe->mcg_attach); + return -ENOMEM; } - /* each qp holds a ref on the grp */ - rxe_add_ref(grp); + atomic_inc(&qp->mcg_num); + + rxe_get(qp); + mca->qp = qp; + + list_add_tail(&mca->qp_list, &mcg->qp_list); + + return 0; +} - grp->num_qp++; - elem->qp = qp; - elem->grp = grp; +/** + * rxe_attach_mcg - attach qp to mcg if not already attached + * @qp: qp object + * @mcg: mcg object + * + * Context: caller must hold reference on qp and mcg. + * Returns: 0 on success else an error + */ +static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp) +{ + struct rxe_dev *rxe = mcg->rxe; + struct rxe_mca *mca, *tmp; + int err; - list_add(&elem->qp_list, &grp->qp_list); - list_add(&elem->grp_list, &qp->grp_list); + /* check to see if the qp is already a member of the group */ + spin_lock_bh(&rxe->mcg_lock); + list_for_each_entry(mca, &mcg->qp_list, qp_list) { + if (mca->qp == qp) { + spin_unlock_bh(&rxe->mcg_lock); + return 0; + } + } + spin_unlock_bh(&rxe->mcg_lock); - err = 0; + /* speculative alloc new mca without using GFP_ATOMIC */ + mca = kzalloc(sizeof(*mca), GFP_KERNEL); + if (!mca) + return -ENOMEM; + + spin_lock_bh(&rxe->mcg_lock); + /* re-check to see if someone else just attached qp */ + list_for_each_entry(tmp, &mcg->qp_list, qp_list) { + if (tmp->qp == qp) { + kfree(mca); + err = 0; + goto out; + } + } + + err = __rxe_init_mca(qp, mcg, mca); + if (err) + kfree(mca); out: - spin_unlock_bh(&grp->mcg_lock); - spin_unlock_bh(&qp->grp_lock); + spin_unlock_bh(&rxe->mcg_lock); return err; } -int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, - union ib_gid *mgid) +/** + * __rxe_cleanup_mca - cleanup mca object holding lock + * @mca: mca object + * @mcg: mcg object + * + * Context: caller must hold a reference to mcg and rxe->mcg_lock + */ +static void __rxe_cleanup_mca(struct rxe_mca *mca, struct rxe_mcg *mcg) { - struct rxe_mc_grp *grp; - struct rxe_mc_elem *elem, *tmp; + list_del(&mca->qp_list); - grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid); - if (!grp) - goto err1; + atomic_dec(&mcg->qp_num); + atomic_dec(&mcg->rxe->mcg_attach); + atomic_dec(&mca->qp->mcg_num); + rxe_put(mca->qp); - spin_lock_bh(&qp->grp_lock); - spin_lock_bh(&grp->mcg_lock); + kfree(mca); +} + +/** + * rxe_detach_mcg - detach qp from mcg + * @mcg: mcg object + * @qp: qp object + * + * Returns: 0 on success else an error if qp is not attached. + */ +static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp) +{ + struct rxe_dev *rxe = mcg->rxe; + struct rxe_mca *mca, *tmp; - list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) { - if (elem->qp == qp) { - list_del(&elem->qp_list); - list_del(&elem->grp_list); - grp->num_qp--; + spin_lock_bh(&rxe->mcg_lock); + list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) { + if (mca->qp == qp) { + __rxe_cleanup_mca(mca, mcg); - spin_unlock_bh(&grp->mcg_lock); - spin_unlock_bh(&qp->grp_lock); - rxe_drop_ref(elem); - rxe_drop_ref(grp); /* ref held by QP */ - rxe_drop_ref(grp); /* ref from get_key */ + /* if the number of qp's attached to the + * mcast group falls to zero go ahead and + * tear it down. This will not free the + * object since we are still holding a ref + * from the caller + */ + if (atomic_read(&mcg->qp_num) <= 0) + __rxe_destroy_mcg(mcg); + + spin_unlock_bh(&rxe->mcg_lock); return 0; } } - spin_unlock_bh(&grp->mcg_lock); - spin_unlock_bh(&qp->grp_lock); - rxe_drop_ref(grp); /* ref from get_key */ -err1: + /* we didn't find the qp on the list */ + spin_unlock_bh(&rxe->mcg_lock); return -EINVAL; } -void rxe_drop_all_mcast_groups(struct rxe_qp *qp) +/** + * rxe_attach_mcast - attach qp to multicast group (see IBA-11.3.1) + * @ibqp: (IB) qp object + * @mgid: multicast IP address + * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6) + * + * Returns: 0 on success else an errno + */ +int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) { - struct rxe_mc_grp *grp; - struct rxe_mc_elem *elem; + int err; + struct rxe_dev *rxe = to_rdev(ibqp->device); + struct rxe_qp *qp = to_rqp(ibqp); + struct rxe_mcg *mcg; - while (1) { - spin_lock_bh(&qp->grp_lock); - if (list_empty(&qp->grp_list)) { - spin_unlock_bh(&qp->grp_lock); - break; - } - elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem, - grp_list); - list_del(&elem->grp_list); - spin_unlock_bh(&qp->grp_lock); - - grp = elem->grp; - spin_lock_bh(&grp->mcg_lock); - list_del(&elem->qp_list); - grp->num_qp--; - spin_unlock_bh(&grp->mcg_lock); - rxe_drop_ref(grp); - rxe_drop_ref(elem); - } + /* takes a ref on mcg if successful */ + mcg = rxe_get_mcg(rxe, mgid); + if (IS_ERR(mcg)) + return PTR_ERR(mcg); + + err = rxe_attach_mcg(mcg, qp); + + /* if we failed to attach the first qp to mcg tear it down */ + if (atomic_read(&mcg->qp_num) == 0) + rxe_destroy_mcg(mcg); + + kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); + + return err; } -void rxe_mc_cleanup(struct rxe_pool_entry *arg) +/** + * rxe_detach_mcast - detach qp from multicast group (see IBA-11.3.2) + * @ibqp: address of (IB) qp object + * @mgid: multicast IP address + * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6) + * + * Returns: 0 on success else an errno + */ +int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) { - struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem); - struct rxe_dev *rxe = grp->rxe; + struct rxe_dev *rxe = to_rdev(ibqp->device); + struct rxe_qp *qp = to_rqp(ibqp); + struct rxe_mcg *mcg; + int err; - rxe_drop_key(grp); - rxe_mcast_delete(rxe, &grp->mgid); + mcg = rxe_lookup_mcg(rxe, mgid); + if (!mcg) + return -EINVAL; + + err = rxe_detach_mcg(mcg, qp); + kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); + + return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index 035f226af133..9149b6095429 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -4,7 +4,6 @@ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ -#include <linux/module.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/errno.h> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 53271df10e47..502e9ada99b3 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -24,7 +24,7 @@ u8 rxe_get_next_key(u32 last_key) int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) { - struct rxe_map_set *set = mr->cur_map_set; + switch (mr->type) { case IB_MR_TYPE_DMA: @@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) case IB_MR_TYPE_USER: case IB_MR_TYPE_MEM_REG: - if (iova < set->iova || length > set->length || - iova > set->iova + set->length - length) + if (iova < mr->ibmr.iova || length > mr->ibmr.length || + iova > mr->ibmr.iova + mr->ibmr.length - length) return -EFAULT; return 0; @@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) static void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1); + u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; /* set ibmr->l/rkey and also copy into private l/rkey @@ -65,106 +65,56 @@ static void rxe_mr_init(int access, struct rxe_mr *mr) mr->map_shift = ilog2(RXE_BUF_PER_MAP); } -static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set) -{ - int i; - - for (i = 0; i < num_map; i++) - kfree(set->map[i]); - - kfree(set->map); - kfree(set); -} - -static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp) +static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf) { int i; - struct rxe_map_set *set; + int num_map; + struct rxe_map **map = mr->map; - set = kmalloc(sizeof(*set), GFP_KERNEL); - if (!set) - goto err_out; + num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; - set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL); - if (!set->map) - goto err_free_set; + mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); + if (!mr->map) + goto err1; for (i = 0; i < num_map; i++) { - set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL); - if (!set->map[i]) - goto err_free_map; + mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); + if (!mr->map[i]) + goto err2; } - *setp = set; - - return 0; - -err_free_map: - for (i--; i >= 0; i--) - kfree(set->map[i]); - - kfree(set->map); -err_free_set: - kfree(set); -err_out: - return -ENOMEM; -} - -/** - * rxe_mr_alloc() - Allocate memory map array(s) for MR - * @mr: Memory region - * @num_buf: Number of buffer descriptors to support - * @both: If non zero allocate both mr->map and mr->next_map - * else just allocate mr->map. Used for fast MRs - * - * Return: 0 on success else an error - */ -static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both) -{ - int ret; - int num_map; - BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP)); - num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; mr->map_shift = ilog2(RXE_BUF_PER_MAP); mr->map_mask = RXE_BUF_PER_MAP - 1; + mr->num_buf = num_buf; - mr->max_buf = num_map * RXE_BUF_PER_MAP; mr->num_map = num_map; - - ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set); - if (ret) - goto err_out; - - if (both) { - ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set); - if (ret) { - rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); - goto err_out; - } - } + mr->max_buf = num_map * RXE_BUF_PER_MAP; return 0; -err_out: +err2: + for (i--; i >= 0; i--) + kfree(mr->map[i]); + + kfree(mr->map); +err1: return -ENOMEM; } -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) +void rxe_mr_init_dma(int access, struct rxe_mr *mr) { rxe_mr_init(access, mr); - mr->ibmr.pd = &pd->ibpd; mr->access = access; mr->state = RXE_MR_STATE_VALID; mr->type = IB_MR_TYPE_DMA; } -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr) { - struct rxe_map_set *set; struct rxe_map **map; struct rxe_phys_buf *buf = NULL; struct ib_umem *umem; @@ -172,8 +122,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int num_buf; void *vaddr; int err; + int i; - umem = ib_umem_get(pd->ibpd.device, start, length, access); + umem = ib_umem_get(&rxe->ib_dev, start, length, access); if (IS_ERR(umem)) { pr_warn("%s: Unable to pin memory region err = %d\n", __func__, (int)PTR_ERR(umem)); @@ -185,20 +136,18 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, rxe_mr_init(access, mr); - err = rxe_mr_alloc(mr, num_buf, 0); + err = rxe_mr_alloc(mr, num_buf); if (err) { pr_warn("%s: Unable to allocate memory for map\n", __func__); goto err_release_umem; } - set = mr->cur_map_set; - set->page_shift = PAGE_SHIFT; - set->page_mask = PAGE_SIZE - 1; - - num_buf = 0; - map = set->map; + mr->page_shift = PAGE_SHIFT; + mr->page_mask = PAGE_SIZE - 1; + num_buf = 0; + map = mr->map; if (length > 0) { buf = map[0]->buf; @@ -221,42 +170,39 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, buf->size = PAGE_SIZE; num_buf++; buf++; + } } - mr->ibmr.pd = &pd->ibpd; mr->umem = umem; mr->access = access; + mr->offset = ib_umem_offset(umem); mr->state = RXE_MR_STATE_VALID; mr->type = IB_MR_TYPE_USER; - set->length = length; - set->iova = iova; - set->va = start; - set->offset = ib_umem_offset(umem); - return 0; err_cleanup_map: - rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); + for (i = 0; i < mr->num_map; i++) + kfree(mr->map[i]); + kfree(mr->map); err_release_umem: ib_umem_release(umem); err_out: return err; } -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) { int err; /* always allow remote access for FMRs */ rxe_mr_init(IB_ACCESS_REMOTE, mr); - err = rxe_mr_alloc(mr, max_pages, 1); + err = rxe_mr_alloc(mr, max_pages); if (err) goto err1; - mr->ibmr.pd = &pd->ibpd; mr->max_buf = max_pages; mr->state = RXE_MR_STATE_FREE; mr->type = IB_MR_TYPE_MEM_REG; @@ -270,24 +216,21 @@ err1: static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, size_t *offset_out) { - struct rxe_map_set *set = mr->cur_map_set; - size_t offset = iova - set->iova + set->offset; + size_t offset = iova - mr->ibmr.iova + mr->offset; int map_index; int buf_index; u64 length; - struct rxe_map *map; - if (likely(set->page_shift)) { - *offset_out = offset & set->page_mask; - offset >>= set->page_shift; + if (likely(mr->page_shift)) { + *offset_out = offset & mr->page_mask; + offset >>= mr->page_shift; *n_out = offset & mr->map_mask; *m_out = offset >> mr->map_shift; } else { map_index = 0; buf_index = 0; - map = set->map[map_index]; - length = map->buf[buf_index].size; + length = mr->map[map_index]->buf[buf_index].size; while (offset >= length) { offset -= length; @@ -297,8 +240,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, map_index++; buf_index = 0; } - map = set->map[map_index]; - length = map->buf[buf_index].size; + length = mr->map[map_index]->buf[buf_index].size; } *m_out = map_index; @@ -319,7 +261,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) goto out; } - if (!mr->cur_map_set) { + if (!mr->map) { addr = (void *)(uintptr_t)iova; goto out; } @@ -332,13 +274,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) lookup_iova(mr, iova, &m, &n, &offset); - if (offset + length > mr->cur_map_set->map[m]->buf[n].size) { + if (offset + length > mr->map[m]->buf[n].size) { pr_warn("crosses page boundary\n"); addr = NULL; goto out; } - addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset; + addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset; out: return addr; @@ -374,7 +316,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, return 0; } - WARN_ON_ONCE(!mr->cur_map_set); + WARN_ON_ONCE(!mr->map); err = mr_check_range(mr, iova, length); if (err) { @@ -384,7 +326,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, lookup_iova(mr, iova, &m, &i, &offset); - map = mr->cur_map_set->map + m; + map = mr->map + m; buf = map[0]->buf + i; while (length > 0) { @@ -461,7 +403,7 @@ int copy_data( if (offset >= sge->length) { if (mr) { - rxe_drop_ref(mr); + rxe_put(mr); mr = NULL; } sge++; @@ -506,13 +448,13 @@ int copy_data( dma->resid = resid; if (mr) - rxe_drop_ref(mr); + rxe_put(mr); return 0; err2: if (mr) - rxe_drop_ref(mr); + rxe_put(mr); err1: return err; } @@ -571,29 +513,29 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || mr_pd(mr) != pd || (access && !(access & mr->access)) || mr->state != RXE_MR_STATE_VALID)) { - rxe_drop_ref(mr); + rxe_put(mr); mr = NULL; } return mr; } -int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey) +int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_mr *mr; int ret; - mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8); + mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); if (!mr) { - pr_err("%s: No MR for rkey %#x\n", __func__, rkey); + pr_err("%s: No MR for key %#x\n", __func__, key); ret = -EINVAL; goto err; } - if (rkey != mr->rkey) { - pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n", - __func__, rkey, mr->rkey); + if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) { + pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n", + __func__, key, (mr->rkey ? mr->rkey : mr->lkey)); ret = -EINVAL; goto err_drop_ref; } @@ -615,7 +557,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey) ret = 0; err_drop_ref: - rxe_drop_ref(mr); + rxe_put(mr); err: return ret; } @@ -630,9 +572,8 @@ err: int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) { struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr); - u32 key = wqe->wr.wr.reg.key & 0xff; + u32 key = wqe->wr.wr.reg.key; u32 access = wqe->wr.wr.reg.access; - struct rxe_map_set *set; /* user can only register MR in free state */ if (unlikely(mr->state != RXE_MR_STATE_FREE)) { @@ -648,36 +589,19 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) return -EINVAL; } + /* user is only allowed to change key portion of l/rkey */ + if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) { + pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n", + __func__, key, mr->lkey); + return -EINVAL; + } + mr->access = access; - mr->lkey = (mr->lkey & ~0xff) | key; - mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0; + mr->lkey = key; + mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0; + mr->ibmr.iova = wqe->wr.wr.reg.mr->iova; mr->state = RXE_MR_STATE_VALID; - set = mr->cur_map_set; - mr->cur_map_set = mr->next_map_set; - mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova; - mr->next_map_set = set; - - return 0; -} - -int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr) -{ - struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map_set *set = mr->next_map_set; - struct rxe_map *map; - struct rxe_phys_buf *buf; - - if (unlikely(set->nbuf == mr->num_buf)) - return -ENOMEM; - - map = set->map[set->nbuf / RXE_BUF_PER_MAP]; - buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP]; - - buf->addr = addr; - buf->size = ibmr->page_size; - set->nbuf++; - return 0; } @@ -685,29 +609,27 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct rxe_mr *mr = to_rmr(ibmr); - if (atomic_read(&mr->num_mw) > 0) { - pr_warn("%s: Attempt to deregister an MR while bound to MWs\n", - __func__); + /* See IBA 10.6.7.2.6 */ + if (atomic_read(&mr->num_mw) > 0) return -EINVAL; - } - mr->state = RXE_MR_STATE_INVALID; - rxe_drop_ref(mr_pd(mr)); - rxe_drop_index(mr); - rxe_drop_ref(mr); + rxe_cleanup(mr); return 0; } -void rxe_mr_cleanup(struct rxe_pool_entry *arg) +void rxe_mr_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); + struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); + int i; + rxe_put(mr_pd(mr)); ib_umem_release(mr->umem); - if (mr->cur_map_set) - rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); + if (mr->map) { + for (i = 0; i < mr->num_map; i++) + kfree(mr->map[i]); - if (mr->next_map_set) - rxe_mr_free_map_set(mr->num_map, mr->next_map_set); + kfree(mr->map); + } } diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index 9534a7fe1a98..902b7df7aaed 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -3,6 +3,14 @@ * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. */ +/* + * The rdma_rxe driver supports type 1 or type 2B memory windows. + * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by + * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw() + * but bound by bind_mw work requests. The ibv_bind_mw() call is converted + * by libibverbs to a bind_mw work request. + */ + #include "rxe.h" int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) @@ -12,58 +20,29 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) struct rxe_dev *rxe = to_rdev(ibmw->device); int ret; - rxe_add_ref(pd); + rxe_get(pd); ret = rxe_add_to_pool(&rxe->mw_pool, mw); if (ret) { - rxe_drop_ref(pd); + rxe_put(pd); return ret; } - rxe_add_index(mw); - mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); + mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; spin_lock_init(&mw->lock); - return 0; -} - -static void rxe_do_dealloc_mw(struct rxe_mw *mw) -{ - if (mw->mr) { - struct rxe_mr *mr = mw->mr; - - mw->mr = NULL; - atomic_dec(&mr->num_mw); - rxe_drop_ref(mr); - } + rxe_finalize(mw); - if (mw->qp) { - struct rxe_qp *qp = mw->qp; - - mw->qp = NULL; - rxe_drop_ref(qp); - } - - mw->access = 0; - mw->addr = 0; - mw->length = 0; - mw->state = RXE_MW_STATE_INVALID; + return 0; } int rxe_dealloc_mw(struct ib_mw *ibmw) { struct rxe_mw *mw = to_rmw(ibmw); - struct rxe_pd *pd = to_rpd(ibmw->pd); - unsigned long flags; - spin_lock_irqsave(&mw->lock, flags); - rxe_do_dealloc_mw(mw); - spin_unlock_irqrestore(&mw->lock, flags); - - rxe_drop_ref(mw); - rxe_drop_ref(pd); + rxe_cleanup(mw); return 0; } @@ -71,8 +50,6 @@ int rxe_dealloc_mw(struct ib_mw *ibmw) static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr) { - u32 key = wqe->wr.wr.mw.rkey & 0xff; - if (mw->ibmw.type == IB_MW_TYPE_1) { if (unlikely(mw->state != RXE_MW_STATE_VALID)) { pr_err_once( @@ -110,11 +87,6 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, } } - if (unlikely(key == (mw->rkey & 0xff))) { - pr_err_once("attempt to bind MW with same key\n"); - return -EINVAL; - } - /* remaining checks only apply to a nonzero MR */ if (!mr) return 0; @@ -136,21 +108,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && !(mr->access & IB_ACCESS_LOCAL_WRITE))) { pr_err_once( - "attempt to bind an writeable MW to an MR without local write access\n"); + "attempt to bind an Writable MW to an MR without local write access\n"); return -EINVAL; } /* C10-75 */ if (mw->access & IB_ZERO_BASED) { - if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) { + if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { pr_err_once( "attempt to bind a ZB MW outside of the MR\n"); return -EINVAL; } } else { - if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) || + if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > - (mr->cur_map_set->iova + mr->cur_map_set->length)))) { + (mr->ibmr.iova + mr->ibmr.length)))) { pr_err_once( "attempt to bind a VA MW outside of the MR\n"); return -EINVAL; @@ -172,7 +144,7 @@ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, mw->length = wqe->wr.wr.mw.length; if (mw->mr) { - rxe_drop_ref(mw->mr); + rxe_put(mw->mr); atomic_dec(&mw->mr->num_mw); mw->mr = NULL; } @@ -180,11 +152,11 @@ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (mw->length) { mw->mr = mr; atomic_inc(&mr->num_mw); - rxe_add_ref(mr); + rxe_get(mr); } if (mw->ibmw.type == IB_MW_TYPE_2) { - rxe_add_ref(qp); + rxe_get(qp); mw->qp = qp; } } @@ -197,7 +169,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; - unsigned long flags; mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); if (unlikely(!mw)) { @@ -225,7 +196,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) mr = NULL; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_bind_mw(qp, wqe, mw, mr); if (ret) @@ -233,12 +204,12 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_bind_mw(qp, wqe, mw, mr); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_mr: if (mr) - rxe_drop_ref(mr); + rxe_put(mr); err_drop_mw: - rxe_drop_ref(mw); + rxe_put(mw); err: return ret; } @@ -263,13 +234,13 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw) /* valid type 2 MW will always have a QP pointer */ qp = mw->qp; mw->qp = NULL; - rxe_drop_ref(qp); + rxe_put(qp); /* valid type 2 MW will always have an MR pointer */ mr = mw->mr; mw->mr = NULL; atomic_dec(&mr->num_mw); - rxe_drop_ref(mr); + rxe_put(mr); mw->access = 0; mw->addr = 0; @@ -280,7 +251,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw) int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - unsigned long flags; struct rxe_mw *mw; int ret; @@ -295,7 +265,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) goto err_drop_ref; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_invalidate_mw(qp, mw); if (ret) @@ -303,9 +273,9 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) rxe_do_invalidate_mw(mw); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_ref: - rxe_drop_ref(mw); + rxe_put(mw); err: return ret; } @@ -326,16 +296,37 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) (mw->length == 0) || (access && !(access & mw->access)) || mw->state != RXE_MW_STATE_VALID)) { - rxe_drop_ref(mw); + rxe_put(mw); return NULL; } return mw; } -void rxe_mw_cleanup(struct rxe_pool_entry *elem) +void rxe_mw_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); + struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); + struct rxe_pd *pd = to_rpd(mw->ibmw.pd); + + rxe_put(pd); + + if (mw->mr) { + struct rxe_mr *mr = mw->mr; + + mw->mr = NULL; + atomic_dec(&mr->num_mw); + rxe_put(mr); + } + + if (mw->qp) { + struct rxe_qp *qp = mw->qp; + + mw->qp = NULL; + rxe_put(qp); + } - rxe_drop_index(mw); + mw->access = 0; + mw->addr = 0; + mw->length = 0; + mw->state = RXE_MW_STATE_INVALID; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 2cb810cb890a..35f327b9d4b8 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -20,28 +20,6 @@ static struct rxe_recv_sockets recv_sockets; -int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) -{ - int err; - unsigned char ll_addr[ETH_ALEN]; - - ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_add(rxe->ndev, ll_addr); - - return err; -} - -int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) -{ - int err; - unsigned char ll_addr[ETH_ALEN]; - - ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_del(rxe->ndev, ll_addr); - - return err; -} - static struct dst_entry *rxe_find_route4(struct net_device *ndev, struct in_addr *saddr, struct in_addr *daddr) @@ -167,7 +145,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; if (skb_linearize(skb)) { - pr_err("skb_linearize failed\n"); ib_device_put(&rxe->ib_dev); goto drop; } @@ -293,13 +270,13 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); } -static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb) +static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; bool xnet = false; __be16 df = htons(IP_DF); - struct rxe_av *av = rxe_get_av(pkt); struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; @@ -319,11 +296,11 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb) return 0; } -static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb) +static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; - struct rxe_av *av = rxe_get_av(pkt); struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; @@ -344,16 +321,17 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb) return 0; } -int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb) +int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { int err = 0; if (skb->protocol == htons(ETH_P_IP)) - err = prepare4(pkt, skb); + err = prepare4(av, pkt, skb); else if (skb->protocol == htons(ETH_P_IPV6)) - err = prepare6(pkt, skb); + err = prepare6(av, pkt, skb); - if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac)) + if (ether_addr_equal(skb->dev->dev_addr, av->dmac)) pkt->mask |= RXE_LOOPBACK_MASK; return err; @@ -369,7 +347,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_run_task(&qp->req.task, 1); - rxe_drop_ref(qp); + rxe_put(qp); } static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) @@ -379,7 +357,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) skb->destructor = rxe_skb_tx_dtor; skb->sk = pkt->qp->sk->sk; - rxe_add_ref(pkt->qp); + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) { @@ -389,7 +367,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) } else { pr_err("Unknown layer 3 protocol: %d\n", skb->protocol); atomic_dec(&pkt->qp->skb_out); - rxe_drop_ref(pkt->qp); + rxe_put(pkt->qp); kfree_skb(skb); return -EINVAL; } @@ -444,7 +422,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, else err = rxe_send(skb, pkt); if (err) { - rxe->xmit_errors++; rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 3ef5a10a6efd..d4ba4d506f17 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -29,7 +29,6 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { [IB_WR_SEND] = { .name = "IB_WR_SEND", .mask = { - [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK, @@ -39,7 +38,6 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { [IB_WR_SEND_WITH_IMM] = { .name = "IB_WR_SEND_WITH_IMM", .mask = { - [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK, [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK, @@ -108,8 +106,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { [IB_OPCODE_RC_SEND_FIRST] = { .name = "IB_OPCODE_RC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -117,9 +115,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { } }, [IB_OPCODE_RC_SEND_MIDDLE] = { - .name = "IB_OPCODE_RC_SEND_MIDDLE]", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .name = "IB_OPCODE_RC_SEND_MIDDLE", + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -128,8 +126,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST] = { .name = "IB_OPCODE_RC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -138,21 +136,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY] = { .name = "IB_OPCODE_RC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -161,33 +159,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -196,8 +194,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -206,69 +204,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RC_RDMA_READ_REQUEST", - .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = { @@ -282,109 +280,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_AETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMACK_BYTES + RXE_AETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMACK_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_COMPARE_SWAP] = { .name = "IB_OPCODE_RC_COMPARE_SWAP", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_FETCH_ADD] = { .name = "IB_OPCODE_RC_FETCH_ADD", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK | RXE_START_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = { .name = "IB_OPCODE_UC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -393,8 +392,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_MIDDLE] = { .name = "IB_OPCODE_UC_SEND_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -403,8 +402,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST] = { .name = "IB_OPCODE_UC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -413,21 +412,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_SEND_ONLY] = { .name = "IB_OPCODE_UC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -436,33 +435,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -471,8 +470,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -481,460 +480,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, /* RD */ [IB_OPCODE_RD_SEND_FIRST] = { .name = "IB_OPCODE_RD_SEND_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_MIDDLE] = { .name = "IB_OPCODE_RD_SEND_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST] = { .name = "IB_OPCODE_RD_SEND_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY] = { .name = "IB_OPCODE_RD_SEND_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES - + RXE_DETH_BYTES + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES + + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RD_RDMA_READ_REQUEST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK - | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | + RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_COMPARE_SWAP] = { .name = "RD_COMPARE_SWAP", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_FETCH_ADD] = { .name = "IB_OPCODE_RD_FETCH_ADD", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, /* UD */ [IB_OPCODE_UD_SEND_ONLY] = { .name = "IB_OPCODE_UD_SEND_ONLY", - .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 918270e34a35..86c7a8bf3cbb 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -50,9 +50,7 @@ enum rxe_device_param { | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_SRQ_RESIZE | IB_DEVICE_MEM_MGT_EXTENSIONS - | IB_DEVICE_ALLOW_USER_UNREG | IB_DEVICE_MEM_WINDOW - | IB_DEVICE_MEM_WINDOW_TYPE_2A | IB_DEVICE_MEM_WINDOW_TYPE_2B, RXE_MAX_SGE = 32, RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) + @@ -107,6 +105,12 @@ enum rxe_device_param { RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64, RXE_INFLIGHT_SKBS_PER_QP_LOW = 16, + /* Max number of interations of each tasklet + * before yielding the cpu to let other + * work make progress + */ + RXE_MAX_ITERATIONS = 1024, + /* Delay before calling arbiter timer */ RXE_NSEC_ARB_TIMER_DELAY = 200, diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 2e80bb6aa957..f50620f5a0a1 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -5,499 +5,298 @@ */ #include "rxe.h" -#include "rxe_loc.h" + +#define RXE_POOL_TIMEOUT (200) +#define RXE_POOL_ALIGN (16) static const struct rxe_type_info { const char *name; size_t size; size_t elem_offset; - void (*cleanup)(struct rxe_pool_entry *obj); - enum rxe_pool_flags flags; + void (*cleanup)(struct rxe_pool_elem *elem); u32 min_index; u32 max_index; - size_t key_offset; - size_t key_size; + u32 max_elem; } rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { - .name = "rxe-uc", + .name = "uc", .size = sizeof(struct rxe_ucontext), - .elem_offset = offsetof(struct rxe_ucontext, pelem), - .flags = RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_ucontext, elem), + .min_index = 1, + .max_index = UINT_MAX, + .max_elem = UINT_MAX, }, [RXE_TYPE_PD] = { - .name = "rxe-pd", + .name = "pd", .size = sizeof(struct rxe_pd), - .elem_offset = offsetof(struct rxe_pd, pelem), - .flags = RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_pd, elem), + .min_index = 1, + .max_index = UINT_MAX, + .max_elem = UINT_MAX, }, [RXE_TYPE_AH] = { - .name = "rxe-ah", + .name = "ah", .size = sizeof(struct rxe_ah), - .elem_offset = offsetof(struct rxe_ah, pelem), - .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_ah, elem), .min_index = RXE_MIN_AH_INDEX, .max_index = RXE_MAX_AH_INDEX, + .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1, }, [RXE_TYPE_SRQ] = { - .name = "rxe-srq", + .name = "srq", .size = sizeof(struct rxe_srq), - .elem_offset = offsetof(struct rxe_srq, pelem), - .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_srq, elem), + .cleanup = rxe_srq_cleanup, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, + .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1, }, [RXE_TYPE_QP] = { - .name = "rxe-qp", + .name = "qp", .size = sizeof(struct rxe_qp), - .elem_offset = offsetof(struct rxe_qp, pelem), + .elem_offset = offsetof(struct rxe_qp, elem), .cleanup = rxe_qp_cleanup, - .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_QP_INDEX, .max_index = RXE_MAX_QP_INDEX, + .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1, }, [RXE_TYPE_CQ] = { - .name = "rxe-cq", + .name = "cq", .size = sizeof(struct rxe_cq), - .elem_offset = offsetof(struct rxe_cq, pelem), - .flags = RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_cq, elem), .cleanup = rxe_cq_cleanup, + .min_index = 1, + .max_index = UINT_MAX, + .max_elem = UINT_MAX, }, [RXE_TYPE_MR] = { - .name = "rxe-mr", + .name = "mr", .size = sizeof(struct rxe_mr), - .elem_offset = offsetof(struct rxe_mr, pelem), + .elem_offset = offsetof(struct rxe_mr, elem), .cleanup = rxe_mr_cleanup, - .flags = RXE_POOL_INDEX, .min_index = RXE_MIN_MR_INDEX, .max_index = RXE_MAX_MR_INDEX, + .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1, }, [RXE_TYPE_MW] = { - .name = "rxe-mw", + .name = "mw", .size = sizeof(struct rxe_mw), - .elem_offset = offsetof(struct rxe_mw, pelem), + .elem_offset = offsetof(struct rxe_mw, elem), .cleanup = rxe_mw_cleanup, - .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_MW_INDEX, .max_index = RXE_MAX_MW_INDEX, - }, - [RXE_TYPE_MC_GRP] = { - .name = "rxe-mc_grp", - .size = sizeof(struct rxe_mc_grp), - .elem_offset = offsetof(struct rxe_mc_grp, pelem), - .cleanup = rxe_mc_cleanup, - .flags = RXE_POOL_KEY, - .key_offset = offsetof(struct rxe_mc_grp, mgid), - .key_size = sizeof(union ib_gid), - }, - [RXE_TYPE_MC_ELEM] = { - .name = "rxe-mc_elem", - .size = sizeof(struct rxe_mc_elem), - .elem_offset = offsetof(struct rxe_mc_elem, pelem), + .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1, }, }; -static inline const char *pool_name(struct rxe_pool *pool) -{ - return rxe_type_info[pool->type].name; -} - -static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) -{ - int err = 0; - - if ((max - min + 1) < pool->max_elem) { - pr_warn("not enough indices for max_elem\n"); - err = -EINVAL; - goto out; - } - - pool->index.max_index = max; - pool->index.min_index = min; - - pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL); - if (!pool->index.table) { - err = -ENOMEM; - goto out; - } - -out: - return err; -} - -int rxe_pool_init( - struct rxe_dev *rxe, - struct rxe_pool *pool, - enum rxe_elem_type type, - unsigned int max_elem) +void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, + enum rxe_elem_type type) { - int err = 0; - size_t size = rxe_type_info[type].size; + const struct rxe_type_info *info = &rxe_type_info[type]; memset(pool, 0, sizeof(*pool)); pool->rxe = rxe; + pool->name = info->name; pool->type = type; - pool->max_elem = max_elem; - pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); - pool->flags = rxe_type_info[type].flags; - pool->index.tree = RB_ROOT; - pool->key.tree = RB_ROOT; - pool->cleanup = rxe_type_info[type].cleanup; + pool->max_elem = info->max_elem; + pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); + pool->elem_offset = info->elem_offset; + pool->cleanup = info->cleanup; atomic_set(&pool->num_elem, 0); - rwlock_init(&pool->pool_lock); - - if (rxe_type_info[type].flags & RXE_POOL_INDEX) { - err = rxe_pool_init_index(pool, - rxe_type_info[type].max_index, - rxe_type_info[type].min_index); - if (err) - goto out; - } - - if (rxe_type_info[type].flags & RXE_POOL_KEY) { - pool->key.key_offset = rxe_type_info[type].key_offset; - pool->key.key_size = rxe_type_info[type].key_size; - } - -out: - return err; + xa_init_flags(&pool->xa, XA_FLAGS_ALLOC); + pool->limit.min = info->min_index; + pool->limit.max = info->max_index; } void rxe_pool_cleanup(struct rxe_pool *pool) { - if (atomic_read(&pool->num_elem) > 0) - pr_warn("%s pool destroyed with unfree'd elem\n", - pool_name(pool)); - - bitmap_free(pool->index.table); -} - -static u32 alloc_index(struct rxe_pool *pool) -{ - u32 index; - u32 range = pool->index.max_index - pool->index.min_index + 1; - - index = find_next_zero_bit(pool->index.table, range, pool->index.last); - if (index >= range) - index = find_first_zero_bit(pool->index.table, range); - - WARN_ON_ONCE(index >= range); - set_bit(index, pool->index.table); - pool->index.last = index; - return index + pool->index.min_index; -} - -static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) -{ - struct rb_node **link = &pool->index.tree.rb_node; - struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; - - while (*link) { - parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, index_node); - - if (elem->index == new->index) { - pr_warn("element already exists!\n"); - return -EINVAL; - } - - if (elem->index > new->index) - link = &(*link)->rb_left; - else - link = &(*link)->rb_right; - } - - rb_link_node(&new->index_node, parent, link); - rb_insert_color(&new->index_node, &pool->index.tree); - - return 0; -} - -static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) -{ - struct rb_node **link = &pool->key.tree.rb_node; - struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; - int cmp; - - while (*link) { - parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, key_node); - - cmp = memcmp((u8 *)elem + pool->key.key_offset, - (u8 *)new + pool->key.key_offset, pool->key.key_size); - - if (cmp == 0) { - pr_warn("key already exists!\n"); - return -EINVAL; - } - - if (cmp > 0) - link = &(*link)->rb_left; - else - link = &(*link)->rb_right; - } - - rb_link_node(&new->key_node, parent, link); - rb_insert_color(&new->key_node, &pool->key.tree); - - return 0; -} - -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) -{ - struct rxe_pool *pool = elem->pool; - int err; - - memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); - err = rxe_insert_key(pool, elem); - - return err; -} - -int __rxe_add_key(struct rxe_pool_entry *elem, void *key) -{ - struct rxe_pool *pool = elem->pool; - unsigned long flags; - int err; - - write_lock_irqsave(&pool->pool_lock, flags); - err = __rxe_add_key_locked(elem, key); - write_unlock_irqrestore(&pool->pool_lock, flags); - - return err; -} - -void __rxe_drop_key_locked(struct rxe_pool_entry *elem) -{ - struct rxe_pool *pool = elem->pool; - - rb_erase(&elem->key_node, &pool->key.tree); -} - -void __rxe_drop_key(struct rxe_pool_entry *elem) -{ - struct rxe_pool *pool = elem->pool; - unsigned long flags; - - write_lock_irqsave(&pool->pool_lock, flags); - __rxe_drop_key_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + WARN_ON(!xa_empty(&pool->xa)); } -int __rxe_add_index_locked(struct rxe_pool_entry *elem) -{ - struct rxe_pool *pool = elem->pool; - int err; - - elem->index = alloc_index(pool); - err = rxe_insert_index(pool, elem); - - return err; -} - -int __rxe_add_index(struct rxe_pool_entry *elem) +void *rxe_alloc(struct rxe_pool *pool) { - struct rxe_pool *pool = elem->pool; - unsigned long flags; + struct rxe_pool_elem *elem; + void *obj; int err; - write_lock_irqsave(&pool->pool_lock, flags); - err = __rxe_add_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); - - return err; -} - -void __rxe_drop_index_locked(struct rxe_pool_entry *elem) -{ - struct rxe_pool *pool = elem->pool; - - clear_bit(elem->index - pool->index.min_index, pool->index.table); - rb_erase(&elem->index_node, &pool->index.tree); -} - -void __rxe_drop_index(struct rxe_pool_entry *elem) -{ - struct rxe_pool *pool = elem->pool; - unsigned long flags; - - write_lock_irqsave(&pool->pool_lock, flags); - __rxe_drop_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); -} - -void *rxe_alloc_locked(struct rxe_pool *pool) -{ - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + if (WARN_ON(!(pool->type == RXE_TYPE_MR))) + return NULL; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) - goto out_cnt; + goto err_cnt; - obj = kzalloc(info->size, GFP_ATOMIC); + obj = kzalloc(pool->elem_size, GFP_KERNEL); if (!obj) - goto out_cnt; + goto err_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); + init_completion(&elem->complete); + + /* allocate index in array but leave pointer as NULL so it + * can't be looked up until rxe_finalize() is called + */ + err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit, + &pool->next, GFP_KERNEL); + if (err < 0) + goto err_free; return obj; -out_cnt: +err_free: + kfree(obj); +err_cnt: atomic_dec(&pool->num_elem); return NULL; } -void *rxe_alloc(struct rxe_pool *pool) +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, + bool sleepable) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; - - if (atomic_inc_return(&pool->num_elem) > pool->max_elem) - goto out_cnt; + int err; + gfp_t gfp_flags; - obj = kzalloc(info->size, GFP_KERNEL); - if (!obj) - goto out_cnt; + if (WARN_ON(pool->type == RXE_TYPE_MR)) + return -EINVAL; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + if (atomic_inc_return(&pool->num_elem) > pool->max_elem) + goto err_cnt; elem->pool = pool; + elem->obj = (u8 *)elem - pool->elem_offset; kref_init(&elem->ref_cnt); + init_completion(&elem->complete); - return obj; - -out_cnt: - atomic_dec(&pool->num_elem); - return NULL; -} - -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) -{ - if (atomic_inc_return(&pool->num_elem) > pool->max_elem) - goto out_cnt; + /* AH objects are unique in that the create_ah verb + * can be called in atomic context. If the create_ah + * call is not sleepable use GFP_ATOMIC. + */ + gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC; - elem->pool = pool; - kref_init(&elem->ref_cnt); + if (sleepable) + might_sleep(); + err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit, + &pool->next, gfp_flags); + if (err < 0) + goto err_cnt; return 0; -out_cnt: +err_cnt: atomic_dec(&pool->num_elem); return -EINVAL; } -void rxe_elem_release(struct kref *kref) +void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { - struct rxe_pool_entry *elem = - container_of(kref, struct rxe_pool_entry, ref_cnt); - struct rxe_pool *pool = elem->pool; - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - u8 *obj; + struct rxe_pool_elem *elem; + struct xarray *xa = &pool->xa; + void *obj; + + rcu_read_lock(); + elem = xa_load(xa, index); + if (elem && kref_get_unless_zero(&elem->ref_cnt)) + obj = elem->obj; + else + obj = NULL; + rcu_read_unlock(); - if (pool->cleanup) - pool->cleanup(elem); + return obj; +} - if (!(pool->flags & RXE_POOL_NO_ALLOC)) { - obj = (u8 *)elem - info->elem_offset; - kfree(obj); - } +static void rxe_elem_release(struct kref *kref) +{ + struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt); - atomic_dec(&pool->num_elem); + complete(&elem->complete); } -void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) +int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; - - node = pool->index.tree.rb_node; - - while (node) { - elem = rb_entry(node, struct rxe_pool_entry, index_node); - - if (elem->index > index) - node = node->rb_left; - else if (elem->index < index) - node = node->rb_right; - else - break; - } - - if (node) { - kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + struct rxe_pool *pool = elem->pool; + struct xarray *xa = &pool->xa; + static int timeout = RXE_POOL_TIMEOUT; + int ret, err = 0; + void *xa_ret; + + if (sleepable) + might_sleep(); + + /* erase xarray entry to prevent looking up + * the pool elem from its index + */ + xa_ret = xa_erase(xa, elem->index); + WARN_ON(xa_err(xa_ret)); + + /* if this is the last call to rxe_put complete the + * object. It is safe to touch obj->elem after this since + * it is freed below + */ + __rxe_put(elem); + + /* wait until all references to the object have been + * dropped before final object specific cleanup and + * return to rdma-core + */ + if (sleepable) { + if (!completion_done(&elem->complete) && timeout) { + ret = wait_for_completion_timeout(&elem->complete, + timeout); + + /* Shouldn't happen. There are still references to + * the object but, rather than deadlock, free the + * object or pass back to rdma-core. + */ + if (WARN_ON(!ret)) + err = -EINVAL; + } } else { - obj = NULL; + unsigned long until = jiffies + timeout; + + /* AH objects are unique in that the destroy_ah verb + * can be called in atomic context. This delay + * replaces the wait_for_completion call above + * when the destroy_ah call is not sleepable + */ + while (!completion_done(&elem->complete) && + time_before(jiffies, until)) + mdelay(1); + + if (WARN_ON(!completion_done(&elem->complete))) + err = -EINVAL; } - return obj; -} + if (pool->cleanup) + pool->cleanup(elem); -void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) -{ - u8 *obj; - unsigned long flags; + if (pool->type == RXE_TYPE_MR) + kfree_rcu(elem->obj); - read_lock_irqsave(&pool->pool_lock, flags); - obj = rxe_pool_get_index_locked(pool, index); - read_unlock_irqrestore(&pool->pool_lock, flags); + atomic_dec(&pool->num_elem); - return obj; + return err; } -void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) +int __rxe_get(struct rxe_pool_elem *elem) { - const struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; - int cmp; - - node = pool->key.tree.rb_node; - - while (node) { - elem = rb_entry(node, struct rxe_pool_entry, key_node); - - cmp = memcmp((u8 *)elem + pool->key.key_offset, - key, pool->key.key_size); - - if (cmp > 0) - node = node->rb_left; - else if (cmp < 0) - node = node->rb_right; - else - break; - } - - if (node) { - kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; - } else { - obj = NULL; - } - - return obj; + return kref_get_unless_zero(&elem->ref_cnt); } -void *rxe_pool_get_key(struct rxe_pool *pool, void *key) +int __rxe_put(struct rxe_pool_elem *elem) { - u8 *obj; - unsigned long flags; + return kref_put(&elem->ref_cnt, rxe_elem_release); +} - read_lock_irqsave(&pool->pool_lock, flags); - obj = rxe_pool_get_key_locked(pool, key); - read_unlock_irqrestore(&pool->pool_lock, flags); +void __rxe_finalize(struct rxe_pool_elem *elem) +{ + void *xa_ret; - return obj; + xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL); + WARN_ON(xa_err(xa_ret)); } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 8ecd9f870aea..9d83cb32092f 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -7,15 +7,6 @@ #ifndef RXE_POOL_H #define RXE_POOL_H -#define RXE_POOL_ALIGN (16) -#define RXE_POOL_CACHE_FLAGS (0) - -enum rxe_pool_flags { - RXE_POOL_INDEX = BIT(1), - RXE_POOL_KEY = BIT(2), - RXE_POOL_NO_ALLOC = BIT(4), -}; - enum rxe_elem_type { RXE_TYPE_UC, RXE_TYPE_PD, @@ -25,137 +16,70 @@ enum rxe_elem_type { RXE_TYPE_CQ, RXE_TYPE_MR, RXE_TYPE_MW, - RXE_TYPE_MC_GRP, - RXE_TYPE_MC_ELEM, RXE_NUM_TYPES, /* keep me last */ }; -struct rxe_pool_entry; - -struct rxe_pool_entry { +struct rxe_pool_elem { struct rxe_pool *pool; + void *obj; struct kref ref_cnt; struct list_head list; - - /* only used if keyed */ - struct rb_node key_node; - - /* only used if indexed */ - struct rb_node index_node; + struct completion complete; u32 index; }; struct rxe_pool { struct rxe_dev *rxe; - rwlock_t pool_lock; /* protects pool add/del/search */ - size_t elem_size; - void (*cleanup)(struct rxe_pool_entry *obj); - enum rxe_pool_flags flags; + const char *name; + void (*cleanup)(struct rxe_pool_elem *elem); enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; + size_t elem_size; + size_t elem_offset; - /* only used if indexed */ - struct { - struct rb_root tree; - unsigned long *table; - u32 last; - u32 max_index; - u32 min_index; - } index; - - /* only used if keyed */ - struct { - struct rb_root tree; - size_t key_offset; - size_t key_size; - } key; + struct xarray xa; + struct xa_limit limit; + u32 next; }; /* initialize a pool of objects with given limit on * number of elements. gets parameters from rxe_type_info * pool elements will be allocated out of a slab cache */ -int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, - enum rxe_elem_type type, u32 max_elem); +void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, + enum rxe_elem_type type); /* free resources from object pool */ void rxe_pool_cleanup(struct rxe_pool *pool); -/* allocate an object from pool holding and not holding the pool lock */ -void *rxe_alloc_locked(struct rxe_pool *pool); - +/* allocate an object from pool */ void *rxe_alloc(struct rxe_pool *pool); /* connect already allocated object to pool */ -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); - -#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) - -/* assign an index to an indexed object and insert object into - * pool's rb tree holding and not holding the pool_lock - */ -int __rxe_add_index_locked(struct rxe_pool_entry *elem); - -#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) - -int __rxe_add_index(struct rxe_pool_entry *elem); - -#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) - -/* drop an index and remove object from rb tree - * holding and not holding the pool_lock - */ -void __rxe_drop_index_locked(struct rxe_pool_entry *elem); - -#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) - -void __rxe_drop_index(struct rxe_pool_entry *elem); - -#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) - -/* assign a key to a keyed object and insert object into - * pool's rb tree holding and not holding pool_lock - */ -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); - -#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) - -int __rxe_add_key(struct rxe_pool_entry *elem, void *key); - -#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) - -/* remove elem from rb tree holding and not holding the pool_lock */ -void __rxe_drop_key_locked(struct rxe_pool_entry *elem); - -#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) - -void __rxe_drop_key(struct rxe_pool_entry *elem); - -#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) - -/* lookup an indexed object from index holding and not holding the pool_lock. - * takes a reference on object - */ -void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index); +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, + bool sleepable); +#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem, true) +#define rxe_add_to_pool_ah(pool, obj, sleepable) __rxe_add_to_pool(pool, \ + &(obj)->elem, sleepable) +/* lookup an indexed object from index. takes a reference on object */ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index); -/* lookup keyed object from key holding and not holding the pool_lock. - * takes a reference on the objecti - */ -void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key); +int __rxe_get(struct rxe_pool_elem *elem); +#define rxe_get(obj) __rxe_get(&(obj)->elem) -void *rxe_pool_get_key(struct rxe_pool *pool, void *key); +int __rxe_put(struct rxe_pool_elem *elem); +#define rxe_put(obj) __rxe_put(&(obj)->elem) -/* cleanup an object when all references are dropped */ -void rxe_elem_release(struct kref *kref); +int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable); +#define rxe_cleanup(obj) __rxe_cleanup(&(obj)->elem, true) +#define rxe_cleanup_ah(obj, sleepable) __rxe_cleanup(&(obj)->elem, sleepable) -/* take a reference on an object */ -#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt) +#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt) -/* drop a reference on an object */ -#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release) +void __rxe_finalize(struct rxe_pool_elem *elem); +#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem) #endif /* RXE_POOL_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 54b8711321c1..a62bab88415c 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -19,34 +19,34 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq) { if (cap->max_send_wr > rxe->attr.max_qp_wr) { - pr_warn("invalid send wr = %d > %d\n", - cap->max_send_wr, rxe->attr.max_qp_wr); + pr_debug("invalid send wr = %u > %d\n", + cap->max_send_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_send_sge > rxe->attr.max_send_sge) { - pr_warn("invalid send sge = %d > %d\n", - cap->max_send_sge, rxe->attr.max_send_sge); + pr_debug("invalid send sge = %u > %d\n", + cap->max_send_sge, rxe->attr.max_send_sge); goto err1; } if (!has_srq) { if (cap->max_recv_wr > rxe->attr.max_qp_wr) { - pr_warn("invalid recv wr = %d > %d\n", - cap->max_recv_wr, rxe->attr.max_qp_wr); + pr_debug("invalid recv wr = %u > %d\n", + cap->max_recv_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_recv_sge > rxe->attr.max_recv_sge) { - pr_warn("invalid recv sge = %d > %d\n", - cap->max_recv_sge, rxe->attr.max_recv_sge); + pr_debug("invalid recv sge = %u > %d\n", + cap->max_recv_sge, rxe->attr.max_recv_sge); goto err1; } } if (cap->max_inline_data > rxe->max_inline_data) { - pr_warn("invalid max inline data = %d > %d\n", - cap->max_inline_data, rxe->max_inline_data); + pr_debug("invalid max inline data = %u > %d\n", + cap->max_inline_data, rxe->max_inline_data); goto err1; } @@ -63,7 +63,6 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) int port_num = init->port_num; switch (init->qp_type) { - case IB_QPT_SMI: case IB_QPT_GSI: case IB_QPT_RC: case IB_QPT_UC: @@ -74,28 +73,23 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) } if (!init->recv_cq || !init->send_cq) { - pr_warn("missing cq\n"); + pr_debug("missing cq\n"); goto err1; } if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) goto err1; - if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { + if (init->qp_type == IB_QPT_GSI) { if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { - pr_warn("invalid port = %d\n", port_num); + pr_debug("invalid port = %d\n", port_num); goto err1; } port = &rxe->port; - if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { - pr_warn("SMI QP exists for port %d\n", port_num); - goto err1; - } - if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { - pr_warn("GSI QP exists for port %d\n", port_num); + pr_debug("GSI QP exists for port %d\n", port_num); goto err1; } } @@ -126,21 +120,15 @@ static void free_rd_atomic_resources(struct rxe_qp *qp) for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { struct resp_res *res = &qp->resp.resources[i]; - free_rd_atomic_resource(qp, res); + free_rd_atomic_resource(res); } kfree(qp->resp.resources); qp->resp.resources = NULL; } } -void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) +void free_rd_atomic_resource(struct resp_res *res) { - if (res->type == RXE_ATOMIC_MASK) { - kfree_skb(res->atomic.skb); - } else if (res->type == RXE_READ_MASK) { - if (res->read.mr) - rxe_drop_ref(res->read.mr); - } res->type = 0; } @@ -152,7 +140,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp) if (qp->resp.resources) { for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { res = &qp->resp.resources[i]; - free_rd_atomic_resource(qp, res); + free_rd_atomic_resource(res); } } } @@ -167,16 +155,10 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, qp->attr.path_mtu = 1; qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); - qpn = qp->pelem.index; + qpn = qp->elem.index; port = &rxe->port; switch (init->qp_type) { - case IB_QPT_SMI: - qp->ibqp.qp_num = 0; - port->qp_smi_index = qpn; - qp->attr.port_num = init->port_num; - break; - case IB_QPT_GSI: qp->ibqp.qp_num = 1; port->qp_gsi_index = qpn; @@ -188,11 +170,16 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, break; } - INIT_LIST_HEAD(&qp->grp_list); - - spin_lock_init(&qp->grp_lock); spin_lock_init(&qp->state_lock); + spin_lock_init(&qp->req.task.state_lock); + spin_lock_init(&qp->resp.task.state_lock); + spin_lock_init(&qp->comp.task.state_lock); + + spin_lock_init(&qp->sq.sq_lock); + spin_lock_init(&qp->rq.producer_lock); + spin_lock_init(&qp->rq.consumer_lock); + atomic_set(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -217,8 +204,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, * the port number must be in the Dynamic Ports range * (0xc000 - 0xffff). */ - qp->src_port = RXE_ROCE_V2_SPORT + - (hash_32_generic(qp_num(qp), 14) & 0x3fff); + qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); qp->sq.max_wr = init->cap.max_send_wr; /* These caps are limited by rxe_qp_chk_cap() done by the caller */ @@ -250,15 +236,15 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, QUEUE_TYPE_FROM_CLIENT); qp->req.state = QP_STATE_RESET; + qp->comp.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; - spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); - rxe_init_task(rxe, &qp->req.task, qp, + rxe_init_task(&qp->req.task, qp, rxe_requester, "req"); - rxe_init_task(rxe, &qp->comp.task, qp, + rxe_init_task(&qp->comp.task, qp, rxe_completer, "comp"); qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ @@ -304,12 +290,9 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, } } - spin_lock_init(&qp->rq.producer_lock); - spin_lock_init(&qp->rq.consumer_lock); - skb_queue_head_init(&qp->resp_pkts); - rxe_init_task(rxe, &qp->resp.task, qp, + rxe_init_task(&qp->resp.task, qp, rxe_responder, "resp"); qp->resp.opcode = OPCODE_NONE; @@ -331,17 +314,20 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; - rxe_add_ref(pd); - rxe_add_ref(rcq); - rxe_add_ref(scq); + rxe_get(pd); + rxe_get(rcq); + rxe_get(scq); if (srq) - rxe_add_ref(srq); + rxe_get(srq); qp->pd = pd; qp->rcq = rcq; qp->scq = scq; qp->srq = srq; + atomic_inc(&rcq->num_wq); + atomic_inc(&scq->num_wq); + rxe_qp_init_misc(rxe, qp, init); err = rxe_qp_init_req(rxe, qp, init, udata, uresp); @@ -361,16 +347,19 @@ err2: rxe_queue_cleanup(qp->sq.queue); qp->sq.queue = NULL; err1: + atomic_dec(&rcq->num_wq); + atomic_dec(&scq->num_wq); + qp->pd = NULL; qp->rcq = NULL; qp->scq = NULL; qp->srq = NULL; if (srq) - rxe_drop_ref(srq); - rxe_drop_ref(scq); - rxe_drop_ref(rcq); - rxe_drop_ref(pd); + rxe_put(srq); + rxe_put(scq); + rxe_put(rcq); + rxe_put(pd); return err; } @@ -413,7 +402,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { - pr_warn("invalid mask or state for qp\n"); + pr_debug("invalid mask or state for qp\n"); goto err1; } @@ -427,7 +416,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_PORT) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { - pr_warn("invalid port %d\n", attr->port_num); + pr_debug("invalid port %d\n", attr->port_num); goto err1; } } @@ -442,12 +431,12 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) goto err1; if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { - pr_warn("invalid alt port %d\n", attr->alt_port_num); + pr_debug("invalid alt port %d\n", attr->alt_port_num); goto err1; } if (attr->alt_timeout > 31) { - pr_warn("invalid QP alt timeout %d > 31\n", - attr->alt_timeout); + pr_debug("invalid QP alt timeout %d > 31\n", + attr->alt_timeout); goto err1; } } @@ -468,17 +457,16 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { - pr_warn("invalid max_rd_atomic %d > %d\n", - attr->max_rd_atomic, - rxe->attr.max_qp_rd_atom); + pr_debug("invalid max_rd_atomic %d > %d\n", + attr->max_rd_atomic, + rxe->attr.max_qp_rd_atom); goto err1; } } if (mask & IB_QP_TIMEOUT) { if (attr->timeout > 31) { - pr_warn("invalid QP timeout %d > 31\n", - attr->timeout); + pr_debug("invalid QP timeout %d > 31\n", attr->timeout); goto err1; } } @@ -504,6 +492,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* move qp to the reset state */ qp->req.state = QP_STATE_RESET; + qp->comp.state = QP_STATE_RESET; qp->resp.state = QP_STATE_RESET; /* let state machines reset themselves drain work and packet queues @@ -521,6 +510,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) atomic_set(&qp->ssn, 0); qp->req.opcode = -1; qp->req.need_retry = 0; + qp->req.wait_for_rnr_timer = 0; qp->req.noack_pkts = 0; qp->resp.msn = 0; qp->resp.opcode = -1; @@ -529,7 +519,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) qp->resp.sent_psn_nak = 0; if (qp->resp.mr) { - rxe_drop_ref(qp->resp.mr); + rxe_put(qp->resp.mr); qp->resp.mr = NULL; } @@ -566,6 +556,7 @@ void rxe_qp_error(struct rxe_qp *qp) { qp->req.state = QP_STATE_ERROR; qp->resp.state = QP_STATE_ERROR; + qp->comp.state = QP_STATE_ERROR; qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ @@ -703,6 +694,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, pr_debug("qp#%d state -> INIT\n", qp_num(qp)); qp->req.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT; + qp->comp.state = QP_STATE_INIT; break; case IB_QPS_RTR: @@ -713,6 +705,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, case IB_QPS_RTS: pr_debug("qp#%d state -> RTS\n", qp_num(qp)); qp->req.state = QP_STATE_READY; + qp->comp.state = QP_STATE_READY; break; case IB_QPS_SQD: @@ -771,9 +764,25 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) return 0; } -/* called by the destroy qp verb */ -void rxe_qp_destroy(struct rxe_qp *qp) +int rxe_qp_chk_destroy(struct rxe_qp *qp) { + /* See IBA o10-2.2.3 + * An attempt to destroy a QP while attached to a mcast group + * will fail immediately. + */ + if (atomic_read(&qp->mcg_num)) { + pr_debug("Attempt to destroy QP while attached to multicast group\n"); + return -EBUSY; + } + + return 0; +} + +/* called when the last reference to the qp is dropped */ +static void rxe_qp_do_cleanup(struct work_struct *work) +{ + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); + qp->valid = 0; qp->qp_timeout_jiffies = 0; rxe_cleanup_task(&qp->resp.task); @@ -787,54 +796,54 @@ void rxe_qp_destroy(struct rxe_qp *qp) rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ - __rxe_do_task(&qp->req.task); + if (qp->req.task.func) + __rxe_do_task(&qp->req.task); + if (qp->sq.queue) { __rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->req.task); } -} - -/* called when the last reference to the qp is dropped */ -static void rxe_qp_do_cleanup(struct work_struct *work) -{ - struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); - - rxe_drop_all_mcast_groups(qp); if (qp->sq.queue) rxe_queue_cleanup(qp->sq.queue); if (qp->srq) - rxe_drop_ref(qp->srq); + rxe_put(qp->srq); if (qp->rq.queue) rxe_queue_cleanup(qp->rq.queue); - if (qp->scq) - rxe_drop_ref(qp->scq); - if (qp->rcq) - rxe_drop_ref(qp->rcq); - if (qp->pd) - rxe_drop_ref(qp->pd); + if (qp->scq) { + atomic_dec(&qp->scq->num_wq); + rxe_put(qp->scq); + } - if (qp->resp.mr) { - rxe_drop_ref(qp->resp.mr); - qp->resp.mr = NULL; + if (qp->rcq) { + atomic_dec(&qp->rcq->num_wq); + rxe_put(qp->rcq); } + if (qp->pd) + rxe_put(qp->pd); + + if (qp->resp.mr) + rxe_put(qp->resp.mr); + if (qp_type(qp) == IB_QPT_RC) sk_dst_reset(qp->sk->sk); free_rd_atomic_resources(qp); - kernel_sock_shutdown(qp->sk, SHUT_RDWR); - sock_release(qp->sk); + if (qp->sk) { + kernel_sock_shutdown(qp->sk, SHUT_RDWR); + sock_release(qp->sk); + } } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +void rxe_qp_cleanup(struct rxe_pool_elem *elem) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); } diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 6e6e023c1b45..d6dbf5a0058d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -112,23 +112,25 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, unsigned int num_elem) { enum queue_type type = q->type; + u32 new_prod; u32 prod; u32 cons; if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type))) return -EINVAL; - prod = queue_get_producer(new_q, type); + new_prod = queue_get_producer(new_q, type); + prod = queue_get_producer(q, type); cons = queue_get_consumer(q, type); - while (!queue_empty(q, type)) { - memcpy(queue_addr_from_index(new_q, prod), + while ((prod - cons) & q->index_mask) { + memcpy(queue_addr_from_index(new_q, new_prod), queue_addr_from_index(q, cons), new_q->elem_size); - prod = queue_next_index(new_q, prod); + new_prod = queue_next_index(new_q, new_prod); cons = queue_next_index(q, cons); } - new_q->buf->producer_index = prod; + new_q->buf->producer_index = new_prod; q->buf->consumer_index = cons; /* update private index copies */ @@ -151,7 +153,8 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, struct rxe_queue *new_q; unsigned int num_elem = *num_elem_p; int err; - unsigned long flags = 0, flags1; + unsigned long producer_flags; + unsigned long consumer_flags; new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); if (!new_q) @@ -165,17 +168,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, goto err1; } - spin_lock_irqsave(consumer_lock, flags1); + spin_lock_irqsave(consumer_lock, consumer_flags); if (producer_lock) { - spin_lock_irqsave(producer_lock, flags); + spin_lock_irqsave(producer_lock, producer_flags); err = resize_finish(q, new_q, num_elem); - spin_unlock_irqrestore(producer_lock, flags); + spin_unlock_irqrestore(producer_lock, producer_flags); } else { err = resize_finish(q, new_q, num_elem); } - spin_unlock_irqrestore(consumer_lock, flags1); + spin_unlock_irqrestore(consumer_lock, consumer_flags); rxe_queue_cleanup(new_q); /* new/old dep on err */ if (err) diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 6227112ef7a2..ed44042782fa 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -7,9 +7,6 @@ #ifndef RXE_QUEUE_H #define RXE_QUEUE_H -/* for definition of shared struct rxe_queue_buf */ -#include <uapi/rdma/rdma_user_rxe.h> - /* Implements a simple circular buffer that is shared between user * and the driver and can be resized. The requested element size is * rounded up to a power of 2 and the number of elements in the buffer @@ -53,6 +50,8 @@ enum queue_type { QUEUE_TYPE_FROM_DRIVER, }; +struct rxe_queue_buf; + struct rxe_queue { struct rxe_dev *rxe; struct rxe_queue_buf *buf; diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 6a6cc1fa90e4..434a693cd4a5 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -16,48 +16,36 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, unsigned int pkt_type; if (unlikely(!qp->valid)) - goto err1; + return -EINVAL; pkt_type = pkt->opcode & 0xe0; switch (qp_type(qp)) { case IB_QPT_RC: - if (unlikely(pkt_type != IB_OPCODE_RC)) { - pr_warn_ratelimited("bad qp type\n"); - goto err1; - } + if (unlikely(pkt_type != IB_OPCODE_RC)) + return -EINVAL; break; case IB_QPT_UC: - if (unlikely(pkt_type != IB_OPCODE_UC)) { - pr_warn_ratelimited("bad qp type\n"); - goto err1; - } + if (unlikely(pkt_type != IB_OPCODE_UC)) + return -EINVAL; break; case IB_QPT_UD: - case IB_QPT_SMI: case IB_QPT_GSI: - if (unlikely(pkt_type != IB_OPCODE_UD)) { - pr_warn_ratelimited("bad qp type\n"); - goto err1; - } + if (unlikely(pkt_type != IB_OPCODE_UD)) + return -EINVAL; break; default: - pr_warn_ratelimited("unsupported qp type\n"); - goto err1; + return -EINVAL; } if (pkt->mask & RXE_REQ_MASK) { if (unlikely(qp->resp.state != QP_STATE_READY)) - goto err1; + return -EINVAL; } else if (unlikely(qp->req.state < QP_STATE_READY || - qp->req.state > QP_STATE_DRAINED)) { - goto err1; - } + qp->req.state > QP_STATE_DRAINED)) + return -EINVAL; return 0; - -err1: - return -EINVAL; } static void set_bad_pkey_cntr(struct rxe_port *port) @@ -85,26 +73,20 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, pkt->pkey_index = 0; if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) { - pr_warn_ratelimited("bad pkey = 0x%x\n", pkey); set_bad_pkey_cntr(port); - goto err1; + return -EINVAL; } if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) { u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; if (unlikely(deth_qkey(pkt) != qkey)) { - pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n", - deth_qkey(pkt), qkey, qpn); set_qkey_viol_cntr(port); - goto err1; + return -EINVAL; } } return 0; - -err1: - return -EINVAL; } static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, @@ -113,13 +95,10 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb = PKT_TO_SKB(pkt); if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC) - goto done; + return 0; - if (unlikely(pkt->port_num != qp->attr.port_num)) { - pr_warn_ratelimited("port %d != qp port %d\n", - pkt->port_num, qp->attr.port_num); - goto err1; - } + if (unlikely(pkt->port_num != qp->attr.port_num)) + return -EINVAL; if (skb->protocol == htons(ETH_P_IP)) { struct in_addr *saddr = @@ -127,19 +106,9 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct in_addr *daddr = &qp->pri_av.dgid_addr._sockaddr_in.sin_addr; - if (ip_hdr(skb)->daddr != saddr->s_addr) { - pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n", - &ip_hdr(skb)->daddr, - &saddr->s_addr); - goto err1; - } - - if (ip_hdr(skb)->saddr != daddr->s_addr) { - pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n", - &ip_hdr(skb)->saddr, - &daddr->s_addr); - goto err1; - } + if ((ip_hdr(skb)->daddr != saddr->s_addr) || + (ip_hdr(skb)->saddr != daddr->s_addr)) + return -EINVAL; } else if (skb->protocol == htons(ETH_P_IPV6)) { struct in6_addr *saddr = @@ -147,24 +116,12 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct in6_addr *daddr = &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr; - if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) { - pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n", - &ipv6_hdr(skb)->daddr, saddr); - goto err1; - } - - if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) { - pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n", - &ipv6_hdr(skb)->saddr, daddr); - goto err1; - } + if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) || + memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) + return -EINVAL; } -done: return 0; - -err1: - return -EINVAL; } static int hdr_check(struct rxe_pkt_info *pkt) @@ -176,24 +133,18 @@ static int hdr_check(struct rxe_pkt_info *pkt) int index; int err; - if (unlikely(bth_tver(pkt) != BTH_TVER)) { - pr_warn_ratelimited("bad tver\n"); + if (unlikely(bth_tver(pkt) != BTH_TVER)) goto err1; - } - if (unlikely(qpn == 0)) { - pr_warn_once("QP 0 not supported"); + if (unlikely(qpn == 0)) goto err1; - } if (qpn != IB_MULTICAST_QPN) { index = (qpn == 1) ? port->qp_gsi_index : qpn; qp = rxe_pool_get_index(&rxe->qp_pool, index); - if (unlikely(!qp)) { - pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn); + if (unlikely(!qp)) goto err1; - } err = check_type_state(rxe, pkt, qp); if (unlikely(err)) @@ -207,17 +158,15 @@ static int hdr_check(struct rxe_pkt_info *pkt) if (unlikely(err)) goto err2; } else { - if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) { - pr_warn_ratelimited("no grh for mcast qpn\n"); + if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) goto err1; - } } pkt->qp = qp; return 0; err2: - rxe_drop_ref(qp); + rxe_put(qp); err1: return -EINVAL; } @@ -233,8 +182,8 @@ static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb) static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) { struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); - struct rxe_mc_grp *mcg; - struct rxe_mc_elem *mce; + struct rxe_mcg *mcg; + struct rxe_mca *mca; struct rxe_qp *qp; union ib_gid dgid; int err; @@ -246,19 +195,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid)); /* lookup mcast group corresponding to mgid, takes a ref */ - mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid); + mcg = rxe_lookup_mcg(rxe, &dgid); if (!mcg) goto drop; /* mcast group not registered */ - spin_lock_bh(&mcg->mcg_lock); + spin_lock_bh(&rxe->mcg_lock); /* this is unreliable datagram service so we let * failures to deliver a multicast packet to a * single QP happen and just move on and try * the rest of them on the list */ - list_for_each_entry(mce, &mcg->qp_list, qp_list) { - qp = mce->qp; + list_for_each_entry(mca, &mcg->qp_list, qp_list) { + qp = mca->qp; /* validate qp for incoming packet */ err = check_type_state(rxe, pkt, qp); @@ -273,7 +222,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) * skb and pass to the QP. Pass the original skb to * the last QP in the list. */ - if (mce->qp_list.next != &mcg->qp_list) { + if (mca->qp_list.next != &mcg->qp_list) { struct sk_buff *cskb; struct rxe_pkt_info *cpkt; @@ -288,19 +237,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) cpkt = SKB_TO_PKT(cskb); cpkt->qp = qp; - rxe_add_ref(qp); + rxe_get(qp); rxe_rcv_pkt(cpkt, cskb); } else { pkt->qp = qp; - rxe_add_ref(qp); + rxe_get(qp); rxe_rcv_pkt(pkt, skb); skb = NULL; /* mark consumed */ } } - spin_unlock_bh(&mcg->mcg_lock); + spin_unlock_bh(&rxe->mcg_lock); - rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ + kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); if (likely(!skb)) return; @@ -365,10 +314,8 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(skb->len < RXE_BTH_BYTES)) goto drop; - if (rxe_chk_dgid(rxe, skb) < 0) { - pr_warn_ratelimited("failed checking dgid\n"); + if (rxe_chk_dgid(rxe, skb) < 0) goto drop; - } pkt->opcode = bth_opcode(pkt); pkt->psn = bth_psn(pkt); @@ -397,7 +344,7 @@ void rxe_rcv(struct sk_buff *skb) drop: if (pkt->qp) - rxe_drop_ref(pkt->qp); + rxe_put(pkt->qp); kfree_skb(skb); ib_device_put(&rxe->ib_dev); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 0c9d2af15f3d..f63771207970 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -15,8 +15,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode); static inline void retry_first_write_send(struct rxe_qp *qp, - struct rxe_send_wqe *wqe, - unsigned int mask, int npsn) + struct rxe_send_wqe *wqe, int npsn) { int i; @@ -33,8 +32,6 @@ static inline void retry_first_write_send(struct rxe_qp *qp, } else { advance_dma_data(&wqe->dma, to_send); } - if (mask & WR_WRITE_MASK) - wqe->iova += qp->mtu; } } @@ -85,7 +82,7 @@ static void req_retry(struct rxe_qp *qp) if (mask & WR_WRITE_OR_SEND_MASK) { npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK; - retry_first_write_send(qp, wqe, mask, npsn); + retry_first_write_send(qp, wqe, npsn); } if (mask & WR_READ_MASK) { @@ -103,14 +100,17 @@ void rnr_nak_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); - pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp)); + pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp)); + + /* request a send queue retry */ + qp->req.need_retry = 1; + qp->req.wait_for_rnr_timer = 0; rxe_run_task(&qp->req.task, 1); } static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; - unsigned long flags; struct rxe_queue *q = qp->sq.queue; unsigned int index = qp->req.wqe_index; unsigned int cons; @@ -124,25 +124,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) /* check to see if we are drained; * state_lock used by requester and completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); do { if (qp->req.state != QP_STATE_DRAIN) { /* comp just finished */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } if (wqe && ((index != cons) || (wqe->state != wqe_state_posted))) { /* comp not done yet */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -166,16 +164,36 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) (wqe->state != wqe_state_processing))) return NULL; - if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && - (index != cons))) { - qp->req.wait_fence = 1; - return NULL; - } - wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); return wqe; } +/** + * rxe_wqe_is_fenced - check if next wqe is fenced + * @qp: the queue pair + * @wqe: the next wqe + * + * Returns: 1 if wqe needs to wait + * 0 if wqe is ready to go + */ +static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) +{ + /* Local invalidate fence (LIF) see IBA 10.6.5.1 + * Requires ALL previous operations on the send queue + * are complete. Make mandatory for the rxe driver. + */ + if (wqe->wr.opcode == IB_WR_LOCAL_INV) + return qp->req.wqe_index != queue_get_consumer(qp->sq.queue, + QUEUE_TYPE_FROM_CLIENT); + + /* Fence see IBA 10.8.3.3 + * Requires that all previous read and atomic operations + * are complete. + */ + return (wqe->wr.send_flags & IB_SEND_FENCE) && + atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic; +} + static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) { switch (opcode) { @@ -311,7 +329,6 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, case IB_QPT_UC: return next_opcode_uc(qp, opcode, fits); - case IB_QPT_SMI: case IB_QPT_UD: case IB_QPT_GSI: switch (opcode) { @@ -361,38 +378,25 @@ static inline int get_mtu(struct rxe_qp *qp) } static struct sk_buff *init_req_packet(struct rxe_qp *qp, + struct rxe_av *av, struct rxe_send_wqe *wqe, - int opcode, int payload, + int opcode, u32 payload, struct rxe_pkt_info *pkt) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct sk_buff *skb; struct rxe_send_wr *ibwr = &wqe->wr; - struct rxe_av *av; int pad = (-payload) & 0x3; int paylen; int solicited; - u16 pkey; u32 qp_num; int ack_req; /* length from start of bth to end of icrc */ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; - - /* pkt->hdr, port_num and mask are initialized in ifc layer */ - pkt->rxe = rxe; - pkt->opcode = opcode; - pkt->qp = qp; - pkt->psn = qp->req.psn; - pkt->mask = rxe_opcode[opcode].mask; - pkt->paylen = paylen; - pkt->wqe = wqe; + pkt->paylen = paylen; /* init skb */ - av = rxe_get_av(pkt); - if (!av) - return NULL; - skb = rxe_init_packet(rxe, av, paylen, pkt); if (unlikely(!skb)) return NULL; @@ -404,8 +408,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == (RXE_WRITE_MASK | RXE_IMMDT_MASK)); - pkey = IB_DEFAULT_PKEY_FULL; - qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num; @@ -414,7 +416,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (ack_req) qp->req.noack_pkts = 0; - bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, + bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, ack_req, pkt->psn); /* init optional headers */ @@ -432,8 +434,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (pkt->mask & RXE_ATMETH_MASK) { atmeth_set_va(pkt, wqe->iova); - if (opcode == IB_OPCODE_RC_COMPARE_SWAP || - opcode == IB_OPCODE_RD_COMPARE_SWAP) { + if (opcode == IB_OPCODE_RC_COMPARE_SWAP) { atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap); atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add); } else { @@ -453,13 +454,13 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, return skb; } -static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, - struct rxe_pkt_info *pkt, struct sk_buff *skb, - int paylen) +static int finish_packet(struct rxe_qp *qp, struct rxe_av *av, + struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, + struct sk_buff *skb, u32 payload) { int err; - err = rxe_prepare(pkt, skb); + err = rxe_prepare(av, pkt, skb); if (err) return err; @@ -467,19 +468,19 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (wqe->wr.send_flags & IB_SEND_INLINE) { u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; - memcpy(payload_addr(pkt), tmp, paylen); + memcpy(payload_addr(pkt), tmp, payload); - wqe->dma.resid -= paylen; - wqe->dma.sge_offset += paylen; + wqe->dma.resid -= payload; + wqe->dma.sge_offset += payload; } else { err = copy_data(qp->pd, 0, &wqe->dma, - payload_addr(pkt), paylen, + payload_addr(pkt), payload, RXE_FROM_MR_OBJ); if (err) return err; } if (bth_pad(pkt)) { - u8 *pad = payload_addr(pkt) + paylen; + u8 *pad = payload_addr(pkt) + payload; memset(pad, 0, bth_pad(pkt)); } @@ -503,7 +504,7 @@ static void update_wqe_state(struct rxe_qp *qp, static void update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, - int payload) + u32 payload) { /* number of packets left to send including current one */ int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; @@ -545,8 +546,7 @@ static void rollback_state(struct rxe_send_wqe *wqe, qp->req.psn = rollback_psn; } -static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, - struct rxe_pkt_info *pkt, int payload) +static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { qp->req.opcode = pkt->opcode; @@ -604,9 +604,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) wqe->status = IB_WC_SUCCESS; qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); - if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || - qp->sq_sig_type == IB_SIGNAL_ALL_WR) - rxe_run_task(&qp->comp.task, 1); + /* There is no ack coming for local work requests + * which can lead to a deadlock. So go ahead and complete + * it now. + */ + rxe_run_task(&qp->comp.task, 1); return 0; } @@ -614,24 +616,39 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) int rxe_requester(void *arg) { struct rxe_qp *qp = (struct rxe_qp *)arg; + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_pkt_info pkt; struct sk_buff *skb; struct rxe_send_wqe *wqe; enum rxe_hdr_mask mask; - int payload; + u32 payload; int mtu; int opcode; + int err; int ret; struct rxe_send_wqe rollback_wqe; u32 rollback_psn; struct rxe_queue *q = qp->sq.queue; + struct rxe_ah *ah; + struct rxe_av *av; - rxe_add_ref(qp); + if (!rxe_get(qp)) + return -EAGAIN; -next_wqe: - if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) + if (unlikely(!qp->valid)) goto exit; + if (unlikely(qp->req.state == QP_STATE_ERROR)) { + wqe = req_next_wqe(qp); + if (wqe) + /* + * Generate an error completion for error qp state + */ + goto err; + else + goto exit; + } + if (unlikely(qp->req.state == QP_STATE_RESET)) { qp->req.wqe_index = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); @@ -639,10 +656,17 @@ next_wqe: qp->req.need_rd_atomic = 0; qp->req.wait_psn = 0; qp->req.need_retry = 0; + qp->req.wait_for_rnr_timer = 0; goto exit; } - if (unlikely(qp->req.need_retry)) { + /* we come here if the retransmit timer has fired + * or if the rnr timer has fired. If the retransmit + * timer fires while we are processing an RNR NAK wait + * until the rnr timer has fired before starting the + * retry flow + */ + if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) { req_retry(qp); qp->req.need_retry = 0; } @@ -651,12 +675,17 @@ next_wqe: if (unlikely(!wqe)) goto exit; + if (rxe_wqe_is_fenced(qp, wqe)) { + qp->req.wait_fence = 1; + goto exit; + } + if (wqe->mask & WR_LOCAL_OP_MASK) { - ret = rxe_do_local_ops(qp, wqe); - if (unlikely(ret)) + err = rxe_do_local_ops(qp, wqe); + if (unlikely(err)) goto err; else - goto next_wqe; + goto done; } if (unlikely(qp_type(qp) == IB_QPT_RC && @@ -676,7 +705,7 @@ next_wqe: opcode = next_opcode(qp, wqe, wqe->wr.opcode); if (unlikely(opcode < 0)) { wqe->status = IB_WC_LOC_QP_OP_ERR; - goto exit; + goto err; } mask = rxe_opcode[opcode].mask; @@ -704,31 +733,51 @@ next_wqe: qp->req.wqe_index); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; - __rxe_do_task(&qp->comp.task); - rxe_drop_ref(qp); - return 0; + rxe_run_task(&qp->comp.task, 0); + goto done; } payload = mtu; } - skb = init_req_packet(qp, wqe, opcode, payload, &pkt); + pkt.rxe = rxe; + pkt.opcode = opcode; + pkt.qp = qp; + pkt.psn = qp->req.psn; + pkt.mask = rxe_opcode[opcode].mask; + pkt.wqe = wqe; + + av = rxe_get_av(&pkt, &ah); + if (unlikely(!av)) { + pr_err("qp#%d Failed no address vector\n", qp_num(qp)); + wqe->status = IB_WC_LOC_QP_OP_ERR; + goto err; + } + + skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt); if (unlikely(!skb)) { pr_err("qp#%d Failed allocating skb\n", qp_num(qp)); wqe->status = IB_WC_LOC_QP_OP_ERR; + if (ah) + rxe_put(ah); goto err; } - ret = finish_packet(qp, wqe, &pkt, skb, payload); - if (unlikely(ret)) { + err = finish_packet(qp, av, wqe, &pkt, skb, payload); + if (unlikely(err)) { pr_debug("qp#%d Error during finish packet\n", qp_num(qp)); - if (ret == -EFAULT) + if (err == -EFAULT) wqe->status = IB_WC_LOC_PROT_ERR; else wqe->status = IB_WC_LOC_QP_OP_ERR; kfree_skb(skb); + if (ah) + rxe_put(ah); goto err; } + if (ah) + rxe_put(ah); + /* * To prevent a race on wqe access between requester and completer, * wqe members state and psn need to be set before calling @@ -738,13 +787,14 @@ next_wqe: save_state(wqe, qp, &rollback_wqe, &rollback_psn); update_wqe_state(qp, wqe, &pkt); update_wqe_psn(qp, wqe, &pkt, payload); - ret = rxe_xmit_packet(qp, &pkt, skb); - if (ret) { + + err = rxe_xmit_packet(qp, &pkt, skb); + if (err) { qp->need_req_skb = 1; rollback_state(wqe, qp, &rollback_wqe, rollback_psn); - if (ret == -EAGAIN) { + if (err == -EAGAIN) { rxe_run_task(&qp->req.task, 1); goto exit; } @@ -753,15 +803,25 @@ next_wqe: goto err; } - update_state(qp, wqe, &pkt, payload); - - goto next_wqe; + update_state(qp, &pkt); + /* A non-zero return value will cause rxe_do_task to + * exit its loop and end the tasklet. A zero return + * will continue looping and return to rxe_requester + */ +done: + ret = 0; + goto out; err: + /* update wqe_index for each wqe completion */ + qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); wqe->state = wqe_state_error; - __rxe_do_task(&qp->comp.task); - + qp->req.state = QP_STATE_ERROR; + rxe_run_task(&qp->comp.task, 0); exit: - rxe_drop_ref(qp); - return -EAGAIN; + ret = -EAGAIN; +out: + rxe_put(qp); + + return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index e8f435fa6e4d..693081e813ec 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -21,6 +21,7 @@ enum resp_states { RESPST_CHK_RKEY, RESPST_EXECUTE, RESPST_READ_REPLY, + RESPST_ATOMIC_REPLY, RESPST_COMPLETE, RESPST_ACKNOWLEDGE, RESPST_CLEANUP, @@ -55,6 +56,7 @@ static char *resp_state_name[] = { [RESPST_CHK_RKEY] = "CHK_RKEY", [RESPST_EXECUTE] = "EXECUTE", [RESPST_READ_REPLY] = "READ_REPLY", + [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY", [RESPST_COMPLETE] = "COMPLETE", [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE", [RESPST_CLEANUP] = "CLEANUP", @@ -99,7 +101,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp, if (qp->resp.state == QP_STATE_ERROR) { while ((skb = skb_dequeue(&qp->req_pkts))) { - rxe_drop_ref(qp); + rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } @@ -277,7 +279,6 @@ static enum resp_states check_op_valid(struct rxe_qp *qp, break; case IB_QPT_UD: - case IB_QPT_SMI: case IB_QPT_GSI: break; @@ -297,21 +298,22 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) struct ib_event ev; unsigned int count; size_t size; + unsigned long flags; if (srq->error) return RESPST_ERR_RNR; - spin_lock_bh(&srq->rq.consumer_lock); + spin_lock_irqsave(&srq->rq.consumer_lock, flags); wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); if (!wqe) { - spin_unlock_bh(&srq->rq.consumer_lock); + spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); return RESPST_ERR_RNR; } /* don't trust user space data */ if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) { - spin_unlock_bh(&srq->rq.consumer_lock); + spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); pr_warn("%s: invalid num_sge in SRQ entry\n", __func__); return RESPST_ERR_MALFORMED_WQE; } @@ -327,11 +329,11 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) goto event; } - spin_unlock_bh(&srq->rq.consumer_lock); + spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); return RESPST_CHK_LENGTH; event: - spin_unlock_bh(&srq->rq.consumer_lock); + spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; @@ -448,7 +450,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp, if (rkey_is_mw(rkey)) { mw = rxe_lookup_mw(qp, access, rkey); if (!mw) { - pr_err("%s: no MW matches rkey %#x\n", __func__, rkey); + pr_debug("%s: no MW matches rkey %#x\n", + __func__, rkey); state = RESPST_ERR_RKEY_VIOLATION; goto err; } @@ -463,12 +466,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp, if (mw->access & IB_ZERO_BASED) qp->resp.offset = mw->addr; - rxe_drop_ref(mw); - rxe_add_ref(mr); + rxe_put(mw); + rxe_get(mr); } else { mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); if (!mr) { - pr_err("%s: no MR matches rkey %#x\n", __func__, rkey); + pr_debug("%s: no MR matches rkey %#x\n", + __func__, rkey); state = RESPST_ERR_RKEY_VIOLATION; goto err; } @@ -507,9 +511,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp, err: if (mr) - rxe_drop_ref(mr); + rxe_put(mr); if (mw) - rxe_drop_ref(mw); + rxe_put(mw); return state; } @@ -549,50 +553,106 @@ out: return rc; } +static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, + struct rxe_pkt_info *pkt, + int type) +{ + struct resp_res *res; + u32 pkts; + + res = &qp->resp.resources[qp->resp.res_head]; + rxe_advance_resp_resource(qp); + free_rd_atomic_resource(res); + + res->type = type; + res->replay = 0; + + switch (type) { + case RXE_READ_MASK: + res->read.va = qp->resp.va + qp->resp.offset; + res->read.va_org = qp->resp.va + qp->resp.offset; + res->read.resid = qp->resp.resid; + res->read.length = qp->resp.resid; + res->read.rkey = qp->resp.rkey; + + pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); + res->first_psn = pkt->psn; + res->cur_psn = pkt->psn; + res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK; + + res->state = rdatm_res_state_new; + break; + case RXE_ATOMIC_MASK: + res->first_psn = pkt->psn; + res->last_psn = pkt->psn; + res->cur_psn = pkt->psn; + break; + } + + return res; +} + /* Guarantee atomicity of atomic operations at the machine level. */ static DEFINE_SPINLOCK(atomic_ops_lock); -static enum resp_states process_atomic(struct rxe_qp *qp, - struct rxe_pkt_info *pkt) +static enum resp_states atomic_reply(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) { u64 *vaddr; enum resp_states ret; struct rxe_mr *mr = qp->resp.mr; + struct resp_res *res = qp->resp.res; + u64 value; - if (mr->state != RXE_MR_STATE_VALID) { - ret = RESPST_ERR_RKEY_VIOLATION; - goto out; + if (!res) { + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); + qp->resp.res = res; } - vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64)); + if (!res->replay) { + if (mr->state != RXE_MR_STATE_VALID) { + ret = RESPST_ERR_RKEY_VIOLATION; + goto out; + } - /* check vaddr is 8 bytes aligned. */ - if (!vaddr || (uintptr_t)vaddr & 7) { - ret = RESPST_ERR_MISALIGNED_ATOMIC; - goto out; - } + vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, + sizeof(u64)); - spin_lock_bh(&atomic_ops_lock); + /* check vaddr is 8 bytes aligned. */ + if (!vaddr || (uintptr_t)vaddr & 7) { + ret = RESPST_ERR_MISALIGNED_ATOMIC; + goto out; + } - qp->resp.atomic_orig = *vaddr; + spin_lock_bh(&atomic_ops_lock); + res->atomic.orig_val = value = *vaddr; - if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP || - pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) { - if (*vaddr == atmeth_comp(pkt)) - *vaddr = atmeth_swap_add(pkt); - } else { - *vaddr += atmeth_swap_add(pkt); - } + if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) { + if (value == atmeth_comp(pkt)) + value = atmeth_swap_add(pkt); + } else { + value += atmeth_swap_add(pkt); + } - spin_unlock_bh(&atomic_ops_lock); + *vaddr = value; + spin_unlock_bh(&atomic_ops_lock); - ret = RESPST_NONE; + qp->resp.msn++; + + /* next expected psn, read handles this separately */ + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; + + qp->resp.opcode = pkt->opcode; + qp->resp.status = IB_WC_SUCCESS; + } + + ret = RESPST_ACKNOWLEDGE; out: return ret; } static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, - struct rxe_pkt_info *pkt, struct rxe_pkt_info *ack, int opcode, int payload, @@ -630,9 +690,9 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, } if (ack->mask & RXE_ATMACK_MASK) - atmack_set_orig(ack, qp->resp.atomic_orig); + atmack_set_orig(ack, qp->resp.res->atomic.orig_val); - err = rxe_prepare(ack, skb); + err = rxe_prepare(&qp->pri_av, ack, skb); if (err) { kfree_skb(skb); return NULL; @@ -641,6 +701,59 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, return skb; } +/** + * rxe_recheck_mr - revalidate MR from rkey and get a reference + * @qp: the qp + * @rkey: the rkey + * + * This code allows the MR to be invalidated or deregistered or + * the MW if one was used to be invalidated or deallocated. + * It is assumed that the access permissions if originally good + * are OK and the mappings to be unchanged. + * + * TODO: If someone reregisters an MR to change its size or + * access permissions during the processing of an RDMA read + * we should kill the responder resource and complete the + * operation with an error. + * + * Return: mr on success else NULL + */ +static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey) +{ + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + struct rxe_mr *mr; + struct rxe_mw *mw; + + if (rkey_is_mw(rkey)) { + mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); + if (!mw) + return NULL; + + mr = mw->mr; + if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID || + !mr || mr->state != RXE_MR_STATE_VALID) { + rxe_put(mw); + return NULL; + } + + rxe_get(mr); + rxe_put(mw); + + return mr; + } + + mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8); + if (!mr) + return NULL; + + if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) { + rxe_put(mr); + return NULL; + } + + return mr; +} + /* RDMA read response. If res is not NULL, then we have a current RDMA request * being processed or replayed. */ @@ -655,53 +768,32 @@ static enum resp_states read_reply(struct rxe_qp *qp, int opcode; int err; struct resp_res *res = qp->resp.res; + struct rxe_mr *mr; if (!res) { - /* This is the first time we process that request. Get a - * resource - */ - res = &qp->resp.resources[qp->resp.res_head]; - - free_rd_atomic_resource(qp, res); - rxe_advance_resp_resource(qp); - - res->type = RXE_READ_MASK; - res->replay = 0; - - res->read.va = qp->resp.va + - qp->resp.offset; - res->read.va_org = qp->resp.va + - qp->resp.offset; - - res->first_psn = req_pkt->psn; + res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); + qp->resp.res = res; + } - if (reth_len(req_pkt)) { - res->last_psn = (req_pkt->psn + - (reth_len(req_pkt) + mtu - 1) / - mtu - 1) & BTH_PSN_MASK; + if (res->state == rdatm_res_state_new) { + if (!res->replay) { + mr = qp->resp.mr; + qp->resp.mr = NULL; } else { - res->last_psn = res->first_psn; + mr = rxe_recheck_mr(qp, res->read.rkey); + if (!mr) + return RESPST_ERR_RKEY_VIOLATION; } - res->cur_psn = req_pkt->psn; - - res->read.resid = qp->resp.resid; - res->read.length = qp->resp.resid; - res->read.rkey = qp->resp.rkey; - /* note res inherits the reference to mr from qp */ - res->read.mr = qp->resp.mr; - qp->resp.mr = NULL; - - qp->resp.res = res; - res->state = rdatm_res_state_new; - } - - if (res->state == rdatm_res_state_new) { if (res->read.resid <= mtu) opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY; else opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST; } else { + mr = rxe_recheck_mr(qp, res->read.rkey); + if (!mr) + return RESPST_ERR_RKEY_VIOLATION; + if (res->read.resid > mtu) opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE; else @@ -712,15 +804,17 @@ static enum resp_states read_reply(struct rxe_qp *qp, payload = min_t(int, res->read.resid, mtu); - skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, + skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, res->cur_psn, AETH_ACK_UNLIMITED); - if (!skb) + if (!skb) { + rxe_put(mr); return RESPST_ERR_RNR; + } - err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), - payload, RXE_FROM_MR_OBJ); - if (err) - pr_err("Failed copying memory\n"); + rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt), + payload, RXE_FROM_MR_OBJ); + if (mr) + rxe_put(mr); if (bth_pad(&ack_pkt)) { u8 *pad = payload_addr(&ack_pkt) + payload; @@ -729,10 +823,8 @@ static enum resp_states read_reply(struct rxe_qp *qp, } err = rxe_xmit_packet(qp, &ack_pkt, skb); - if (err) { - pr_err("Failed sending RDMA reply.\n"); + if (err) return RESPST_ERR_RNR; - } res->read.va += payload; res->read.resid -= payload; @@ -771,7 +863,6 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) if (pkt->mask & RXE_SEND_MASK) { if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) { if (skb->protocol == htons(ETH_P_IP)) { memset(&hdr.reserved, 0, @@ -798,9 +889,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) qp->resp.msn++; return RESPST_READ_REPLY; } else if (pkt->mask & RXE_ATOMIC_MASK) { - err = process_atomic(qp, pkt); - if (err) - return err; + return RESPST_ATOMIC_REPLY; } else { /* Unreachable */ WARN_ON_ONCE(1); @@ -814,6 +903,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) return RESPST_ERR_INVALIDATE_RKEY; } + if (pkt->mask & RXE_END_MASK) + /* We successfully processed this new request. */ + qp->resp.msn++; + /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; qp->resp.ack_psn = qp->resp.psn; @@ -821,11 +914,9 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; - if (pkt->mask & RXE_COMP_MASK) { - /* We successfully processed this new request. */ - qp->resp.msn++; + if (pkt->mask & RXE_COMP_MASK) return RESPST_COMPLETE; - } else if (qp_type(qp) == IB_QPT_RC) + else if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; @@ -935,62 +1026,41 @@ finish: return RESPST_CLEANUP; } -static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, - u8 syndrome, u32 psn) + +static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, + int opcode, const char *msg) { - int err = 0; + int err; struct rxe_pkt_info ack_pkt; struct sk_buff *skb; - skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, - 0, psn, syndrome); - if (!skb) { - err = -ENOMEM; - goto err1; - } + skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome); + if (!skb) + return -ENOMEM; err = rxe_xmit_packet(qp, &ack_pkt, skb); if (err) - pr_err_ratelimited("Failed sending ack\n"); + pr_err_ratelimited("Failed sending %s\n", msg); -err1: return err; } -static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, - u8 syndrome) +static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) { - int rc = 0; - struct rxe_pkt_info ack_pkt; - struct sk_buff *skb; - struct resp_res *res; - - skb = prepare_ack_packet(qp, pkt, &ack_pkt, - IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, - syndrome); - if (!skb) { - rc = -ENOMEM; - goto out; - } + return send_common_ack(qp, syndrome, psn, + IB_OPCODE_RC_ACKNOWLEDGE, "ACK"); +} - res = &qp->resp.resources[qp->resp.res_head]; - free_rd_atomic_resource(qp, res); - rxe_advance_resp_resource(qp); +static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) +{ + int ret = send_common_ack(qp, syndrome, psn, + IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK"); - skb_get(skb); - res->type = RXE_ATOMIC_MASK; - res->atomic.skb = skb; - res->first_psn = ack_pkt.psn; - res->last_psn = ack_pkt.psn; - res->cur_psn = ack_pkt.psn; - - rc = rxe_xmit_packet(qp, &ack_pkt, skb); - if (rc) { - pr_err_ratelimited("Failed sending ack\n"); - rxe_drop_ref(qp); - } -out: - return rc; + /* have to clear this since it is used to trigger + * long read replies + */ + qp->resp.res = NULL; + return ret; } static enum resp_states acknowledge(struct rxe_qp *qp, @@ -1000,11 +1070,11 @@ static enum resp_states acknowledge(struct rxe_qp *qp, return RESPST_CLEANUP; if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) - send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); + send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); else if (pkt->mask & RXE_ATOMIC_MASK) - send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); + send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); else if (bth_ack(pkt)) - send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); + send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); return RESPST_CLEANUP; } @@ -1016,13 +1086,13 @@ static enum resp_states cleanup(struct rxe_qp *qp, if (pkt) { skb = skb_dequeue(&qp->req_pkts); - rxe_drop_ref(qp); + rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } if (qp->resp.mr) { - rxe_drop_ref(qp->resp.mr); + rxe_put(qp->resp.mr); qp->resp.mr = NULL; } @@ -1057,7 +1127,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (pkt->mask & RXE_SEND_MASK || pkt->mask & RXE_WRITE_MASK) { /* SEND. Ack again and cleanup. C9-105. */ - send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); + send_ack(qp, AETH_ACK_UNLIMITED, prev_psn); return RESPST_CLEANUP; } else if (pkt->mask & RXE_READ_MASK) { struct resp_res *res; @@ -1111,14 +1181,11 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, /* Find the operation in our list of responder resources. */ res = find_resource(qp, pkt->psn); if (res) { - skb_get(res->atomic.skb); - /* Resend the result. */ - rc = rxe_xmit_packet(qp, pkt, res->atomic.skb); - if (rc) { - pr_err("Failed resending result. This flow is not handled - skb ignored\n"); - rc = RESPST_CLEANUP; - goto out; - } + res->replay = 1; + res->cur_psn = pkt->psn; + qp->resp.res = res; + rc = RESPST_ATOMIC_REPLY; + goto out; } /* Resource not found. Class D error. Drop the request. */ @@ -1166,7 +1233,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } if (qp->resp.mr) { - rxe_drop_ref(qp->resp.mr); + rxe_put(qp->resp.mr); qp->resp.mr = NULL; } @@ -1180,7 +1247,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) struct rxe_queue *q = qp->rq.queue; while ((skb = skb_dequeue(&qp->req_pkts))) { - rxe_drop_ref(qp); + rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } @@ -1198,16 +1265,15 @@ int rxe_responder(void *arg) struct rxe_dev *rxe = to_rdev(qp->ibqp.device); enum resp_states state; struct rxe_pkt_info *pkt = NULL; - int ret = 0; + int ret; - rxe_add_ref(qp); + if (!rxe_get(qp)) + return -EAGAIN; qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; - if (!qp->valid) { - ret = -EINVAL; - goto done; - } + if (!qp->valid) + goto exit; switch (qp->resp.state) { case QP_STATE_RESET: @@ -1253,6 +1319,9 @@ int rxe_responder(void *arg) case RESPST_READ_REPLY: state = read_reply(qp, pkt); break; + case RESPST_ATOMIC_REPLY: + state = atomic_reply(qp, pkt); + break; case RESPST_ACKNOWLEDGE: state = acknowledge(qp, pkt); break; @@ -1264,7 +1333,7 @@ int rxe_responder(void *arg) break; case RESPST_ERR_PSN_OUT_OF_SEQ: /* RC only - Class B. Drop packet. */ - send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); + send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); state = RESPST_CLEANUP; break; @@ -1286,7 +1355,7 @@ int rxe_responder(void *arg) if (qp_type(qp) == IB_QPT_RC) { rxe_counter_inc(rxe, RXE_CNT_SND_RNR); /* RC - class B */ - send_ack(qp, pkt, AETH_RNR_NAK | + send_ack(qp, AETH_RNR_NAK | (~AETH_TYPE_MASK & qp->attr.min_rnr_timer), pkt->psn); @@ -1375,7 +1444,7 @@ int rxe_responder(void *arg) case RESPST_ERROR: qp->resp.goto_error = 0; - pr_warn("qp#%d moved to error state\n", qp_num(qp)); + pr_debug("qp#%d moved to error state\n", qp_num(qp)); rxe_qp_error(qp); goto exit; @@ -1384,9 +1453,16 @@ int rxe_responder(void *arg) } } + /* A non-zero return value will cause rxe_do_task to + * exit its loop and end the tasklet. A zero return + * will continue looping and return to rxe_responder + */ +done: + ret = 0; + goto out; exit: ret = -EAGAIN; -done: - rxe_drop_ref(qp); +out: + rxe_put(qp); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index eb1c4c3b3a78..02b39498c370 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -6,64 +6,34 @@ #include <linux/vmalloc.h> #include "rxe.h" -#include "rxe_loc.h" #include "rxe_queue.h" -int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, - struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) +int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) { - if (srq && srq->error) { - pr_warn("srq in error state\n"); + struct ib_srq_attr *attr = &init->attr; + + if (attr->max_wr > rxe->attr.max_srq_wr) { + pr_warn("max_wr(%d) > max_srq_wr(%d)\n", + attr->max_wr, rxe->attr.max_srq_wr); goto err1; } - if (mask & IB_SRQ_MAX_WR) { - if (attr->max_wr > rxe->attr.max_srq_wr) { - pr_warn("max_wr(%d) > max_srq_wr(%d)\n", - attr->max_wr, rxe->attr.max_srq_wr); - goto err1; - } - - if (attr->max_wr <= 0) { - pr_warn("max_wr(%d) <= 0\n", attr->max_wr); - goto err1; - } - - if (srq && srq->limit && (attr->max_wr < srq->limit)) { - pr_warn("max_wr (%d) < srq->limit (%d)\n", - attr->max_wr, srq->limit); - goto err1; - } - - if (attr->max_wr < RXE_MIN_SRQ_WR) - attr->max_wr = RXE_MIN_SRQ_WR; + if (attr->max_wr <= 0) { + pr_warn("max_wr(%d) <= 0\n", attr->max_wr); + goto err1; } - if (mask & IB_SRQ_LIMIT) { - if (attr->srq_limit > rxe->attr.max_srq_wr) { - pr_warn("srq_limit(%d) > max_srq_wr(%d)\n", - attr->srq_limit, rxe->attr.max_srq_wr); - goto err1; - } + if (attr->max_wr < RXE_MIN_SRQ_WR) + attr->max_wr = RXE_MIN_SRQ_WR; - if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) { - pr_warn("srq_limit (%d) > cur limit(%d)\n", - attr->srq_limit, - srq->rq.queue->buf->index_mask); - goto err1; - } + if (attr->max_sge > rxe->attr.max_srq_sge) { + pr_warn("max_sge(%d) > max_srq_sge(%d)\n", + attr->max_sge, rxe->attr.max_srq_sge); + goto err1; } - if (mask == IB_SRQ_INIT_MASK) { - if (attr->max_sge > rxe->attr.max_srq_sge) { - pr_warn("max_sge(%d) > max_srq_sge(%d)\n", - attr->max_sge, rxe->attr.max_srq_sge); - goto err1; - } - - if (attr->max_sge < RXE_MIN_SRQ_SGE) - attr->max_sge = RXE_MIN_SRQ_SGE; - } + if (attr->max_sge < RXE_MIN_SRQ_SGE) + attr->max_sge = RXE_MIN_SRQ_SGE; return 0; @@ -83,7 +53,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; - srq->srq_num = srq->pelem.index; + srq->srq_num = srq->elem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; @@ -93,8 +63,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, spin_lock_init(&srq->rq.consumer_lock); type = QUEUE_TYPE_FROM_CLIENT; - q = rxe_queue_init(rxe, &srq->rq.max_wr, - srq_wqe_size, type); + q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type); if (!q) { pr_warn("unable to allocate queue for srq\n"); return -ENOMEM; @@ -121,6 +90,57 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, return 0; } +int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, + struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) +{ + if (srq->error) { + pr_warn("srq in error state\n"); + goto err1; + } + + if (mask & IB_SRQ_MAX_WR) { + if (attr->max_wr > rxe->attr.max_srq_wr) { + pr_warn("max_wr(%d) > max_srq_wr(%d)\n", + attr->max_wr, rxe->attr.max_srq_wr); + goto err1; + } + + if (attr->max_wr <= 0) { + pr_warn("max_wr(%d) <= 0\n", attr->max_wr); + goto err1; + } + + if (srq->limit && (attr->max_wr < srq->limit)) { + pr_warn("max_wr (%d) < srq->limit (%d)\n", + attr->max_wr, srq->limit); + goto err1; + } + + if (attr->max_wr < RXE_MIN_SRQ_WR) + attr->max_wr = RXE_MIN_SRQ_WR; + } + + if (mask & IB_SRQ_LIMIT) { + if (attr->srq_limit > rxe->attr.max_srq_wr) { + pr_warn("srq_limit(%d) > max_srq_wr(%d)\n", + attr->srq_limit, rxe->attr.max_srq_wr); + goto err1; + } + + if (attr->srq_limit > srq->rq.queue->buf->index_mask) { + pr_warn("srq_limit (%d) > cur limit(%d)\n", + attr->srq_limit, + srq->rq.queue->buf->index_mask); + goto err1; + } + } + + return 0; + +err1: + return -EINVAL; +} + int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) @@ -154,3 +174,14 @@ err2: srq->rq.queue = NULL; return err; } + +void rxe_srq_cleanup(struct rxe_pool_elem *elem) +{ + struct rxe_srq *srq = container_of(elem, typeof(*srq), elem); + + if (srq->pd) + rxe_put(srq->pd); + + if (srq->rq.queue) + rxe_queue_cleanup(srq->rq.queue); +} diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c deleted file mode 100644 index 666202ddff48..000000000000 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - */ - -#include "rxe.h" -#include "rxe_net.h" - -/* Copy argument and remove trailing CR. Return the new length. */ -static int sanitize_arg(const char *val, char *intf, int intf_len) -{ - int len; - - if (!val) - return 0; - - /* Remove newline. */ - for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++) - intf[len] = val[len]; - intf[len] = 0; - - if (len == 0 || (val[len] != 0 && val[len] != '\n')) - return 0; - - return len; -} - -static int rxe_param_set_add(const char *val, const struct kernel_param *kp) -{ - int len; - int err = 0; - char intf[32]; - struct net_device *ndev; - struct rxe_dev *exists; - - if (!rxe_initialized) { - pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); - return -EAGAIN; - } - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - ndev = dev_get_by_name(&init_net, intf); - if (!ndev) { - pr_err("interface %s not found\n", intf); - return -EINVAL; - } - - if (is_vlan_dev(ndev)) { - pr_err("rxe creation allowed on top of a real device only\n"); - err = -EPERM; - goto err; - } - - exists = rxe_get_dev_from_net(ndev); - if (exists) { - ib_device_put(&exists->ib_dev); - pr_err("already configured on %s\n", intf); - err = -EINVAL; - goto err; - } - - err = rxe_net_add("rxe%d", ndev); - if (err) { - pr_err("failed to add %s\n", intf); - goto err; - } - -err: - dev_put(ndev); - return err; -} - -static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) -{ - int len; - char intf[32]; - struct ib_device *ib_dev; - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - if (strncmp("all", intf, len) == 0) { - pr_info("rxe_sys: remove all"); - ib_unregister_driver(RDMA_DRIVER_RXE); - return 0; - } - - ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); - if (!ib_dev) { - pr_err("not configured on %s\n", intf); - return -EINVAL; - } - - ib_unregister_device_and_put(ib_dev); - - return 0; -} - -static const struct kernel_param_ops rxe_add_ops = { - .set = rxe_param_set_add, -}; - -static const struct kernel_param_ops rxe_remove_ops = { - .set = rxe_param_set_remove, -}; - -module_param_cb(add, &rxe_add_ops, NULL, 0200); -MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface"); -module_param_cb(remove, &rxe_remove_ops, NULL, 0200); -MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface"); diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 6951fdcb31bf..ec2b7de1c497 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -8,7 +8,7 @@ #include <linux/interrupt.h> #include <linux/hardirq.h> -#include "rxe_task.h" +#include "rxe.h" int __rxe_do_task(struct rxe_task *task) @@ -32,25 +32,25 @@ void rxe_do_task(struct tasklet_struct *t) { int cont; int ret; - unsigned long flags; struct rxe_task *task = from_tasklet(task, t, tasklet); + unsigned int iterations = RXE_MAX_ITERATIONS; - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_START: task->state = TASK_STATE_BUSY; - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); break; case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; fallthrough; case TASK_STATE_ARMED: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); return; default: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); pr_warn("%s failed with bad state %d\n", __func__, task->state); return; } @@ -59,16 +59,23 @@ void rxe_do_task(struct tasklet_struct *t) cont = 0; ret = task->func(task->arg); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_BUSY: - if (ret) + if (ret) { task->state = TASK_STATE_START; - else + } else if (iterations--) { cont = 1; + } else { + /* reschedule the tasklet and exit + * the loop to give up the cpu + */ + tasklet_schedule(&task->tasklet); + task->state = TASK_STATE_START; + } break; - /* soneone tried to run the task since the last time we called + /* someone tried to run the task since the last time we called * func, so we will call one more time regardless of the * return value */ @@ -81,16 +88,15 @@ void rxe_do_task(struct tasklet_struct *t) pr_warn("%s failed with bad state %d\n", __func__, task->state); } - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (cont); task->ret = ret; } -int rxe_init_task(void *obj, struct rxe_task *task, +int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *), char *name) { - task->obj = obj; task->arg = arg; task->func = func; snprintf(task->name, sizeof(task->name), "%s", name); @@ -106,7 +112,6 @@ int rxe_init_task(void *obj, struct rxe_task *task, void rxe_cleanup_task(struct rxe_task *task) { - unsigned long flags; bool idle; /* @@ -116,9 +121,9 @@ void rxe_cleanup_task(struct rxe_task *task) task->destroyed = true; do { - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); idle = (task->state == TASK_STATE_START); - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (!idle); tasklet_kill(&task->tasklet); diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index 11d183fd3338..7f612a1c68a7 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -19,7 +19,6 @@ enum { * called again. */ struct rxe_task { - void *obj; struct tasklet_struct tasklet; int state; spinlock_t state_lock; /* spinlock for task state */ @@ -35,7 +34,7 @@ struct rxe_task { * arg => parameter to pass to fcn * func => function to call until it returns != 0 */ -int rxe_init_task(void *obj, struct rxe_task *task, +int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *), char *name); /* cleanup task */ diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0aa0d7e52773..88825edc7dce 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -7,8 +7,8 @@ #include <linux/dma-mapping.h> #include <net/addrconf.h> #include <rdma/uverbs_ioctl.h> + #include "rxe.h" -#include "rxe_loc.h" #include "rxe_queue.h" #include "rxe_hw_counters.h" @@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) { struct rxe_ucontext *uc = to_ruc(ibuc); - rxe_drop_ref(uc); + rxe_cleanup(uc); } static int rxe_port_immutable(struct ib_device *dev, u32 port_num, @@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_pd *pd = to_rpd(ibpd); - rxe_drop_ref(pd); + rxe_cleanup(pd); return 0; } @@ -176,21 +176,20 @@ static int rxe_create_ah(struct ib_ah *ibah, if (err) return err; - err = rxe_add_to_pool(&rxe->ah_pool, ah); + err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, + init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); if (err) return err; /* create index > 0 */ - rxe_add_index(ah); - ah->ah_num = ah->pelem.index; + ah->ah_num = ah->elem.index; if (uresp) { /* only if new user provider */ err = copy_to_user(&uresp->ah_num, &ah->ah_num, sizeof(uresp->ah_num)); if (err) { - rxe_drop_index(ah); - rxe_drop_ref(ah); + rxe_cleanup(ah); return -EFAULT; } } else if (ah->is_user) { @@ -199,6 +198,8 @@ static int rxe_create_ah(struct ib_ah *ibah, } rxe_init_av(init_attr->ah_attr, &ah->av); + rxe_finalize(ah); + return 0; } @@ -230,8 +231,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); - rxe_drop_index(ah); - rxe_drop_ref(ah); + rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); + return 0; } @@ -261,7 +262,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER); recv_wqe->wr_id = ibwr->wr_id; - recv_wqe->num_sge = num_sge; memcpy(recv_wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); @@ -289,36 +289,35 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; - if (init->srq_type != IB_SRQT_BASIC) - return -EOPNOTSUPP; - if (udata) { if (udata->outlen < sizeof(*uresp)) return -EINVAL; uresp = udata->outbuf; } - err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); + if (init->srq_type != IB_SRQT_BASIC) + return -EOPNOTSUPP; + + err = rxe_srq_chk_init(rxe, init); if (err) - goto err1; + return err; err = rxe_add_to_pool(&rxe->srq_pool, srq); if (err) - goto err1; + return err; - rxe_add_ref(pd); + rxe_get(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) - goto err2; + goto err_cleanup; return 0; -err2: - rxe_drop_ref(pd); - rxe_drop_ref(srq); -err1: +err_cleanup: + rxe_cleanup(srq); + return err; } @@ -342,16 +341,12 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, err = rxe_srq_chk_attr(rxe, srq, attr, mask); if (err) - goto err1; + return err; err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); if (err) - goto err1; - + return err; return 0; - -err1: - return err; } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) @@ -371,11 +366,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rxe_srq *srq = to_rsrq(ibsrq); - if (srq->rq.queue) - rxe_queue_cleanup(srq->rq.queue); - - rxe_drop_ref(srq->pd); - rxe_drop_ref(srq); + rxe_cleanup(srq); return 0; } @@ -383,8 +374,8 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); + unsigned long flags; spin_lock_irqsave(&srq->rq.producer_lock, flags); @@ -438,16 +429,15 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, if (err) return err; - rxe_add_index(qp); err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); if (err) goto qp_init; + rxe_finalize(qp); return 0; qp_init: - rxe_drop_index(qp); - rxe_drop_ref(qp); + rxe_cleanup(qp); return err; } @@ -469,6 +459,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (err) goto err1; + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, + qp->ibqp.qp_num, + qp->attr.dest_qp_num); + return 0; err1: @@ -489,10 +484,13 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rxe_qp *qp = to_rqp(ibqp); + int ret; + + ret = rxe_qp_chk_destroy(qp); + if (ret) + return ret; - rxe_qp_destroy(qp); - rxe_drop_index(qp); - rxe_drop_ref(qp); + rxe_cleanup(qp); return 0; } @@ -527,12 +525,10 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, const struct ib_send_wr *ibwr) { wr->wr_id = ibwr->wr_id; - wr->num_sge = ibwr->num_sge; wr->opcode = ibwr->opcode; wr->send_flags = ibwr->send_flags; if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) { struct ib_ah *ibah = ud_wr(ibwr)->ah; @@ -803,9 +799,15 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct rxe_cq *cq = to_rcq(ibcq); + /* See IBA C11-17: The CI shall return an error if this Verb is + * invoked while a Work Queue is still associated with the CQ. + */ + if (atomic_read(&cq->num_wq)) + return -EINVAL; + rxe_cq_disable(cq); - rxe_drop_ref(cq); + rxe_cleanup(cq); return 0; } @@ -870,9 +872,9 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; int empty; + unsigned long irq_flags; spin_lock_irqsave(&cq->cq_lock, irq_flags); if (cq->notify != IB_CQ_NEXT_COMP) @@ -898,9 +900,11 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) if (!mr) return ERR_PTR(-ENOMEM); - rxe_add_index(mr); - rxe_add_ref(pd); - rxe_mr_init_dma(pd, access, mr); + rxe_get(pd); + mr->ibmr.pd = ibpd; + + rxe_mr_init_dma(access, mr); + rxe_finalize(mr); return &mr->ibmr; } @@ -922,20 +926,20 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, goto err2; } - rxe_add_index(mr); - rxe_add_ref(pd); + rxe_get(pd); + mr->ibmr.pd = ibpd; - err = rxe_mr_init_user(pd, start, length, iova, access, mr); + err = rxe_mr_init_user(rxe, start, length, iova, access, mr); if (err) goto err3; + rxe_finalize(mr); + return &mr->ibmr; err3: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); + rxe_cleanup(mr); err2: return ERR_PTR(err); } @@ -957,72 +961,57 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, goto err1; } - rxe_add_index(mr); + rxe_get(pd); + mr->ibmr.pd = ibpd; - rxe_add_ref(pd); - - err = rxe_mr_init_fast(pd, max_num_sg, mr); + err = rxe_mr_init_fast(max_num_sg, mr); if (err) goto err2; + rxe_finalize(mr); + return &mr->ibmr; err2: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); + rxe_cleanup(mr); err1: return ERR_PTR(err); } -/* build next_map_set from scatterlist - * The IB_WR_REG_MR WR will swap map_sets - */ -static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset) +static int rxe_set_page(struct ib_mr *ibmr, u64 addr) { struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map_set *set = mr->next_map_set; - int n; + struct rxe_map *map; + struct rxe_phys_buf *buf; - set->nbuf = 0; + if (unlikely(mr->nbuf == mr->num_buf)) + return -ENOMEM; - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page); + map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; + buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; - set->va = ibmr->iova; - set->iova = ibmr->iova; - set->length = ibmr->length; - set->page_shift = ilog2(ibmr->page_size); - set->page_mask = ibmr->page_size - 1; - set->offset = set->iova & set->page_mask; + buf->addr = addr; + buf->size = ibmr->page_size; + mr->nbuf++; - return n; + return 0; } -static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) +static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) { - int err; - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); - struct rxe_mc_grp *grp; - - /* takes a ref on grp if successful */ - err = rxe_mcast_get_grp(rxe, mgid, &grp); - if (err) - return err; + struct rxe_mr *mr = to_rmr(ibmr); + int n; - err = rxe_mcast_add_grp_elem(rxe, qp, grp); + mr->nbuf = 0; - rxe_drop_ref(grp); - return err; -} + n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); -static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) -{ - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); + mr->page_shift = ilog2(ibmr->page_size); + mr->page_mask = ibmr->page_size - 1; + mr->offset = ibmr->iova & mr->page_mask; - return rxe_mcast_drop_grp_elem(rxe, qp, mgid); + return n; } static ssize_t parent_show(struct device *device, diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 35e041450090..5f5cbfcb3569 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -9,7 +9,6 @@ #include <linux/interrupt.h> #include <linux/workqueue.h> -#include <rdma/rdma_user_rxe.h> #include "rxe_pool.h" #include "rxe_task.h" #include "rxe_hw_counters.h" @@ -35,17 +34,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) struct rxe_ucontext { struct ib_ucontext ibuc; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_pd { struct ib_pd ibpd; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_ah { struct ib_ah ibah; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_av av; bool is_user; int ah_num; @@ -60,13 +59,14 @@ struct rxe_cqe { struct rxe_cq { struct ib_cq ibcq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; bool is_dying; bool is_user; struct tasklet_struct comp_task; + atomic_t num_wq; }; enum wqe_state { @@ -95,7 +95,7 @@ struct rxe_rq { struct rxe_srq { struct ib_srq ibsrq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; @@ -123,11 +123,13 @@ struct rxe_req_info { int need_rd_atomic; int wait_psn; int need_retry; + int wait_for_rnr_timer; int noack_pkts; struct rxe_task task; }; struct rxe_comp_info { + enum rxe_qp_state state; u32 psn; int opcode; int timeout; @@ -154,10 +156,9 @@ struct resp_res { union { struct { - struct sk_buff *skb; + u64 orig_val; } atomic; struct { - struct rxe_mr *mr; u64 va_org; u32 rkey; u32 length; @@ -189,7 +190,6 @@ struct rxe_resp_info { u32 resid; u32 rkey; u32 length; - u64 atomic_orig; /* SRQ only */ struct { @@ -209,7 +209,7 @@ struct rxe_resp_info { struct rxe_qp { struct ib_qp ibqp; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; @@ -232,9 +232,7 @@ struct rxe_qp { struct rxe_av pri_av; struct rxe_av alt_av; - /* list of mcast groups qp has joined (for cleanup) */ - struct list_head grp_list; - spinlock_t grp_lock; /* guard grp_list */ + atomic_t mcg_num; struct sk_buff_head req_pkts; struct sk_buff_head resp_pkts; @@ -290,17 +288,6 @@ struct rxe_map { struct rxe_phys_buf buf[RXE_BUF_PER_MAP]; }; -struct rxe_map_set { - struct rxe_map **map; - u64 va; - u64 iova; - size_t length; - u32 offset; - u32 nbuf; - int page_shift; - int page_mask; -}; - static inline int rkey_is_mw(u32 rkey) { u32 index = rkey >> 8; @@ -309,7 +296,7 @@ static inline int rkey_is_mw(u32 rkey) } struct rxe_mr { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_mr ibmr; struct ib_umem *umem; @@ -318,20 +305,23 @@ struct rxe_mr { u32 rkey; enum rxe_mr_state state; enum ib_mr_type type; + u32 offset; int access; + int page_shift; + int page_mask; int map_shift; int map_mask; u32 num_buf; + u32 nbuf; u32 max_buf; u32 num_map; atomic_t num_mw; - struct rxe_map_set *cur_map_set; - struct rxe_map_set *next_map_set; + struct rxe_map **map; }; enum rxe_mw_state { @@ -342,7 +332,7 @@ enum rxe_mw_state { struct rxe_mw { struct ib_mw ibmw; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t lock; enum rxe_mw_state state; struct rxe_qp *qp; /* Type 2 only */ @@ -353,23 +343,20 @@ struct rxe_mw { u64 length; }; -struct rxe_mc_grp { - struct rxe_pool_entry pelem; - spinlock_t mcg_lock; /* guard group */ +struct rxe_mcg { + struct rb_node node; + struct kref ref_cnt; struct rxe_dev *rxe; struct list_head qp_list; union ib_gid mgid; - int num_qp; + atomic_t qp_num; u32 qkey; u16 pkey; }; -struct rxe_mc_elem { - struct rxe_pool_entry pelem; +struct rxe_mca { struct list_head qp_list; - struct list_head grp_list; struct rxe_qp *qp; - struct rxe_mc_grp *grp; }; struct rxe_port { @@ -379,7 +366,6 @@ struct rxe_port { spinlock_t port_lock; /* guard port */ unsigned int mtu_cap; /* special QPs */ - u32 qp_smi_index; u32 qp_gsi_index; }; @@ -392,8 +378,6 @@ struct rxe_dev { struct net_device *ndev; - int xmit_errors; - struct rxe_pool uc_pool; struct rxe_pool pd_pool; struct rxe_pool ah_pool; @@ -402,8 +386,12 @@ struct rxe_dev { struct rxe_pool cq_pool; struct rxe_pool mr_pool; struct rxe_pool mw_pool; - struct rxe_pool mc_grp_pool; - struct rxe_pool mc_elem_pool; + + /* multicast support */ + spinlock_t mcg_lock; + struct rb_root mcg_tree; + atomic_t mcg_num; + atomic_t mcg_attach; spinlock_t pending_lock; /* guard pending_mmaps */ struct list_head pending_mmaps; @@ -484,6 +472,4 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); - #endif /* RXE_VERBS_H */ diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index 1b5105cbabae..81b70a3eeb87 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -1,7 +1,10 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" - depends on INET && INFINIBAND && LIBCRC32C + depends on INET && INFINIBAND depends on INFINIBAND_VIRT_DMA + select LIBCRC32C + select CRYPTO + select CRYPTO_CRC32C help This driver implements the iWARP RDMA transport over the Linux TCP/IP network stack. It enables a system with a diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8c..2f3a9cda3850 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -418,6 +418,7 @@ struct siw_qp { struct ib_qp base_qp; struct siw_device *sdev; struct kref ref; + struct completion qp_free; struct list_head devq; int tx_cpu; struct siw_qp_attrs attrs; @@ -644,14 +645,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 7acdd3c3a599..f88d2971c2c6 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep) enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR; rv = siw_recv_mpa_rr(cep); - if (rv != -EAGAIN) - siw_cancel_mpatimer(cep); if (rv) goto out_err; + siw_cancel_mpatimer(cep); + rep = &cep->mpa.hdr; if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) { @@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep) } out_err: - siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); + if (rv != -EAGAIN) + siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); return rv; } @@ -968,14 +969,15 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_set_inuse(new_cep); rv = siw_proc_mpareq(new_cep); - siw_cep_set_free(new_cep); - if (rv != -EAGAIN) { siw_cep_put(cep); new_cep->listen_cep = NULL; - if (rv) + if (rv) { + siw_cep_set_free(new_cep); goto error; + } } + siw_cep_set_free(new_cep); } return; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 9093e6a80b26..dacc174604bf 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -98,15 +98,14 @@ static int siw_create_tx_threads(void) continue; siw_tx_thread[cpu] = - kthread_create(siw_run_sq, (unsigned long *)(long)cpu, - "siw_tx/%d", cpu); + kthread_run_on_cpu(siw_run_sq, + (unsigned long *)(long)cpu, + cpu, "siw_tx/%u"); if (IS_ERR(siw_tx_thread[cpu])) { siw_tx_thread[cpu] = NULL; continue; } - kthread_bind(siw_tx_thread[cpu], cpu); - wake_up_process(siw_tx_thread[cpu]); assigned++; } return assigned; @@ -120,6 +119,7 @@ static int siw_dev_qualified(struct net_device *netdev) * <linux/if_arp.h> for type identifiers. */ if (netdev->type == ARPHRD_ETHER || netdev->type == ARPHRD_IEEE802 || + netdev->type == ARPHRD_NONE || (netdev->type == ARPHRD_LOOPBACK && loopback_enabled)) return 1; @@ -316,12 +316,12 @@ static struct siw_device *siw_device_create(struct net_device *netdev) sdev->netdev = netdev; - if (netdev->type != ARPHRD_LOOPBACK) { + if (netdev->type != ARPHRD_LOOPBACK && netdev->type != ARPHRD_NONE) { addrconf_addr_eui48((unsigned char *)&base_dev->node_guid, netdev->dev_addr); } else { /* - * The loopback device does not have a HW address, + * This device does not have a HW address, * but connection mangagement lib expects gid != 0 */ size_t len = min_t(size_t, strlen(base_dev->name), 6); diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 7e01f2438afc..e6f634971228 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -1342,6 +1342,6 @@ void siw_free_qp(struct kref *ref) vfree(qp->orq); siw_put_tx_cpu(qp->tx_cpu); - + complete(&qp->qp_free); atomic_dec(&sdev->num_qp); } diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c..fd721cc19682 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -961,27 +961,28 @@ out: static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) { struct sk_buff *skb = srx->skb; + int avail = min(srx->skb_new, srx->fpdu_part_rem); u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; __wsum crc_in, crc_own = 0; siw_dbg_qp(qp, "expected %d, available %d, pad %u\n", srx->fpdu_part_rem, srx->skb_new, srx->pad); - if (srx->skb_new < srx->fpdu_part_rem) - return -EAGAIN; - - skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem); + skb_copy_bits(skb, srx->skb_offset, tbuf, avail); - if (srx->mpa_crc_hd && srx->pad) - crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); + srx->skb_new -= avail; + srx->skb_offset += avail; + srx->skb_copied += avail; + srx->fpdu_part_rem -= avail; - srx->skb_new -= srx->fpdu_part_rem; - srx->skb_offset += srx->fpdu_part_rem; - srx->skb_copied += srx->fpdu_part_rem; + if (srx->fpdu_part_rem) + return -EAGAIN; if (!srx->mpa_crc_hd) return 0; + if (srx->pad) + crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); /* * CRC32 is computed, transmitted and received directly in NBO, * so there's never a reason to convert byte order. @@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) * completely received. */ if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) { - bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; + int hdrlen = iwarp_pktinfo[opcode].hdr_len; - if (srx->skb_new < bytes) - return -EAGAIN; + bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new); skb_copy_bits(skb, srx->skb_offset, (char *)c_hdr + srx->fpdu_part_rcvd, bytes); @@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) srx->skb_new -= bytes; srx->skb_offset += bytes; srx->skb_copied += bytes; + + if (srx->fpdu_part_rcvd < hdrlen) + return -EAGAIN; } /* @@ -1153,11 +1156,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1169,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1187,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 1f4e60257700..7d47b521070b 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page(paddr); + return virt_to_page((void *)paddr); return NULL; } @@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) kunmap_local(kaddr); } } else { - u64 va = sge->laddr + sge_off; + /* + * Cast to an uintptr_t to preserve all 64 bits + * in sge->laddr. + */ + uintptr_t va = (uintptr_t)(sge->laddr + sge_off); - page_array[seg] = virt_to_page(va & PAGE_MASK); + /* + * virt_to_page() takes a (void *) pointer + * so cast to a (void *) meaning it will be 64 + * bits on a 64 bit platform and 32 bits on a + * 32 bit platform. + */ + page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)(uintptr_t)va, + (void *)va, plen); } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 1b36350601fa..3e814cfb298c 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -8,6 +8,7 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/xarray.h> +#include <net/addrconf.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> @@ -131,8 +132,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, /* Revisit atomic caps if RFC 7306 gets supported */ attr->atomic_cap = 0; - attr->device_cap_flags = - IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG; + attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; + attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG; attr->max_cq = sdev->attrs.max_cq; attr->max_cqe = sdev->attrs.max_cqe; attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL; @@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, attr->vendor_id = SIW_VENDOR_ID; attr->vendor_part_id = sdev->vendor_part_id; - memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + sdev->netdev->dev_addr); return 0; } @@ -311,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { siw_dbg(base_dev, "too many QP's\n"); - return -ENOMEM; + rv = -ENOMEM; + goto err_atomic; } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); @@ -477,6 +480,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, list_add_tail(&qp->devq, &sdev->qp_list); spin_unlock_irqrestore(&sdev->lock, flags); + init_completion(&qp->qp_free); + return 0; err_out_xa: @@ -621,6 +626,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) qp->scq = qp->rcq = NULL; siw_qp_put(qp); + wait_for_completion(&qp->qp_free); return 0; } @@ -660,7 +666,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, kbuf += core_sge->length; core_sge++; } - sqe->sge[0].length = bytes > 0 ? bytes : 0; + sqe->sge[0].length = max(bytes, 0); sqe->num_sge = bytes > 0 ? 1 : 0; return bytes; @@ -1164,7 +1170,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, err_out: siw_dbg(base_cq->device, "CQ creation failed: %d", rv); - if (cq && cq->queue) { + if (cq->queue) { struct siw_ucontext *ctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); |