aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Welty <brian.welty@intel.com>2018-05-02 06:44:03 -0700
committerDoug Ledford <dledford@redhat.com>2018-05-09 15:53:30 -0400
commit832369fa6410c93547264ad449ebbf16567bbccd (patch)
tree6c82426a405a6cb2ceb439ad254461970732f4ee
parentIB/{hfi1, rdmavt, qib}: Implement CQ completion vector support (diff)
downloadlinux-dev-832369fa6410c93547264ad449ebbf16567bbccd.tar.xz
linux-dev-832369fa6410c93547264ad449ebbf16567bbccd.zip
IB/{hfi1, qib, rdmavt}: Move logic to allocate receive WQE into rdmavt
Moving receive-side WQE allocation logic into rdmavt will allow further code reuse between qib and hfi1 drivers. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Brian Welty <brian.welty@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c154
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c154
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c149
-rw-r--r--include/rdma/rdmavt_qp.h1
12 files changed, 170 insertions, 324 deletions
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index da58046a02ea..79ee2b9e28c6 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2123,7 +2123,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2149,7 +2149,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2159,7 +2159,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
case OP(SEND_ONLY_WITH_INVALIDATE):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2271,7 +2271,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index c0071ca4147a..ef4c566e206f 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -53,156 +53,6 @@
#include "verbs_txreq.h"
#include "trace.h"
-/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
@@ -423,7 +273,7 @@ again:
/* FALLTHROUGH */
case IB_WR_SEND:
send:
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -435,7 +285,7 @@ send:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 9d7a3110c14c..b7b671017e59 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -397,7 +397,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
qp->r_sge = qp->s_rdma_read_sge;
} else {
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -542,7 +542,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
rvt_put_ss(&qp->s_rdma_read_sge);
} else {
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 69c17a5ef038..6ad203f6da88 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -163,7 +163,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -974,7 +974,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 081ca52e6621..a16fe5d3f7c4 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -328,8 +328,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet);
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void hfi1_migrate_qp(struct rvt_qp *qp);
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index c9955d48c50f..f35fdeb14347 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1828,7 +1828,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1849,7 +1849,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1858,7 +1858,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1949,7 +1949,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4662cc7bde92..f8a7de795beb 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -38,156 +38,6 @@
#include "qib_mad.h"
/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * qib_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !qib_init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-/*
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
@@ -419,7 +269,7 @@ again:
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -431,7 +281,7 @@ again:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 840eec6ebc33..3e54bc11e0ae 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -335,7 +335,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -471,7 +471,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
else {
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 3e4ff77260c2..f8d029a2390f 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -139,7 +139,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -534,7 +534,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index f887737ac142..f9a46768a19a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -321,8 +321,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
void mr_rcu_callback(struct rcu_head *list);
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index c82e6bb3d77c..6e9a351f45fb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1987,6 +1987,155 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
return 0;
}
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ return 1;
+
+bad_lkey:
+ while (j) {
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ rvt_put_mr(sge->mr);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ return 0;
+}
+
+/**
+ * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+{
+ unsigned long flags;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = rvt_get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_get_rwqe);
+
/**
* qp_comm_est - handle trap with QP established
* @qp: the QP
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 89ab88c342b6..1145a4c154b2 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -663,6 +663,7 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);