aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2020-04-30 13:47:07 -0400
committerChuck Lever <chuck.lever@oracle.com>2020-07-13 17:28:24 -0400
commit3ac56c2fb166fea25974d8c48bb4a72ee298361b (patch)
tree342497ed21c3dbf0a5db7afae9679b19c177e74b
parentsvcrdma: Record Receive completion ID in svc_rdma_decode_rqst (diff)
downloadwireguard-linux-3ac56c2fb166fea25974d8c48bb4a72ee298361b.tar.xz
wireguard-linux-3ac56c2fb166fea25974d8c48bb4a72ee298361b.zip
svcrdma: Introduce Send completion IDs
Set up a completion ID in each svc_rdma_send_ctxt. The ID is used to match an incoming Send completion to a transport and to a previous ib_post_send(). Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/trace/events/rpcrdma.h2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c15
3 files changed, 15 insertions, 4 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c3c1e46f510f..c91e00bc937e 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -151,6 +151,8 @@ struct svc_rdma_recv_ctxt {
struct svc_rdma_send_ctxt {
struct list_head sc_list;
+ struct rpc_rdma_cid sc_cid;
+
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index df49ae5d447b..782a4d826a4b 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1863,7 +1863,7 @@ TRACE_EVENT(svcrdma_post_send,
)
);
-DEFINE_SENDCOMP_EVENT(send);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
TRACE_EVENT(svcrdma_post_recv,
TP_PROTO(
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 38d8f0ee35ec..c720dcf56231 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -122,6 +122,13 @@ svc_rdma_next_send_ctxt(struct list_head *list)
sc_list);
}
+static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
{
@@ -144,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
goto fail2;
+ svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
+
ctxt->sc_send_wr.next = NULL;
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
@@ -268,14 +277,14 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
struct svcxprt_rdma *rdma = cq->cq_context;
struct ib_cqe *cqe = wc->wr_cqe;
- struct svc_rdma_send_ctxt *ctxt;
+ struct svc_rdma_send_ctxt *ctxt =
+ container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
- trace_svcrdma_wc_send(wc);
+ trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
atomic_inc(&rdma->sc_sq_avail);
wake_up(&rdma->sc_send_wait);
- ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
svc_rdma_send_ctxt_put(rdma, ctxt);
if (unlikely(wc->status != IB_WC_SUCCESS)) {