aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c14
5 files changed, 30 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 6cd642aaa4de..de6d0774e609 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -753,6 +753,11 @@ static void connect_request_upcall(struct iwch_ep *ep)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (state_read(&ep->parent_ep->com) != DEAD) {
get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
@@ -770,6 +775,11 @@ static void established_upcall(struct iwch_ep *ep)
PDBG("%s ep %p\n", __func__, ep);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (ep->com.cm_id) {
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 71e0d845da3d..abcc9e76962b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct ib_event event;
struct iwch_qp_attributes attrs;
struct iwch_qp *qhp;
+ unsigned long flag;
spin_lock(&rnicp->lock);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
struct iwch_cq *chp;
struct iwch_qp *qhp;
u32 cqid = RSPQ_CQID(rsp_msg);
+ unsigned long flag;
rnicp = (struct iwch_dev *) rdev_p->ulp;
spin_lock(&rnicp->lock);
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
*/
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
dst_confirm(qhp->ep->dst);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
break;
case TPT_ERR_STAG:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index c7d9411f2954..37c224fc3ad9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
+ spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 9a342c9b220d..87c14b0c5ac0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -103,6 +103,7 @@ struct iwch_cq {
struct iwch_dev *rhp;
struct t3_cq cq;
spinlock_t lock;
+ spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
u32 __user *user_rptr_addr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ecd313f359a4..bea5839d89ee 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
+ }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag);
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, *flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
+ }
/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq);
+ spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq);
+ spin_lock_irqsave(&schp->comp_handler_lock, *flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
}
return;
}