aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sunrpc/svc_rdma.h1
-rw-r--r--include/trace/events/rpcrdma.h18
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c13
3 files changed, 31 insertions, 1 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index a3fa5b4fa2e4..78fe2ac6dc6c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -52,6 +52,7 @@
/* Default and maximum inline threshold sizes */
enum {
+ RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1,
RPCRDMA_DEF_INLINE_THRESH = 4096,
RPCRDMA_MAX_INLINE_THRESH = 65536
};
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 74b68547eefb..9238d233f8cf 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1639,6 +1639,24 @@ TRACE_EVENT(svcrdma_dma_map_rwctx,
)
);
+TRACE_EVENT(svcrdma_send_pullup,
+ TP_PROTO(
+ unsigned int len
+ ),
+
+ TP_ARGS(len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ ),
+
+ TP_printk("len=%u", __entry->len)
+);
+
TRACE_EVENT(svcrdma_send_failed,
TP_PROTO(
const struct svc_rqst *rqst,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 7b9853214769..90cba3058f04 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -541,6 +541,7 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
/**
* svc_rdma_pull_up_needed - Determine whether to use pull-up
* @rdma: controlling transport
+ * @sctxt: send_ctxt for the Send WR
* @rctxt: Write and Reply chunks provided by client
* @xdr: xdr_buf containing RPC message to transmit
*
@@ -549,11 +550,20 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
* %false otherwise
*/
static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *sctxt,
const struct svc_rdma_recv_ctxt *rctxt,
struct xdr_buf *xdr)
{
int elements;
+ /* For small messages, copying bytes is cheaper than DMA mapping.
+ */
+ if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
+ return true;
+
+ /* Check whether the xdr_buf has more elements than can
+ * fit in a single RDMA Send.
+ */
/* xdr->head */
elements = 1;
@@ -636,6 +646,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
memcpy(dst, tailbase, taillen);
sctxt->sc_sges[0].length += xdr->len;
+ trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
return 0;
}
@@ -675,7 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
/* For pull-up, svc_rdma_send() will sync the transport header.
* No additional DMA mapping is necessary.
*/
- if (svc_rdma_pull_up_needed(rdma, rctxt, xdr))
+ if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
++sctxt->sc_cur_sge_no;