From 9b6e27d01adcec58e046c624874f8a124e8b07ec Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 14 Sep 2021 12:30:32 -0400 Subject: nfsd: don't alloc under spinlock in rpc_parse_scope_id Dan Carpenter says: The patch d20c11d86d8f: "nfsd: Protect session creation and client confirm using client_lock" from Jul 30, 2014, leads to the following Smatch static checker warning: net/sunrpc/addr.c:178 rpc_parse_scope_id() warn: sleeping in atomic context Reported-by: Dan Carpenter Fixes: d20c11d86d8f ("nfsd: Protect session creation and client...") Signed-off-by: J. Bruce Fields --- net/sunrpc/addr.c | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index 6e4dbd577a39..d435bffc6199 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -162,8 +162,10 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, const size_t buflen, const char *delim, struct sockaddr_in6 *sin6) { - char *p; + char p[IPV6_SCOPE_ID_LEN + 1]; size_t len; + u32 scope_id = 0; + struct net_device *dev; if ((buf + buflen) == delim) return 1; @@ -175,29 +177,23 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, return 0; len = (buf + buflen) - delim - 1; - p = kmemdup_nul(delim + 1, len, GFP_KERNEL); - if (p) { - u32 scope_id = 0; - struct net_device *dev; - - dev = dev_get_by_name(net, p); - if (dev != NULL) { - scope_id = dev->ifindex; - dev_put(dev); - } else { - if (kstrtou32(p, 10, &scope_id) != 0) { - kfree(p); - return 0; - } - } - - kfree(p); - - sin6->sin6_scope_id = scope_id; - return 1; + if (len > IPV6_SCOPE_ID_LEN) + return 0; + + memcpy(p, delim + 1, len); + p[len] = 0; + + dev = dev_get_by_name(net, p); + if (dev != NULL) { + scope_id = dev->ifindex; + dev_put(dev); + } else { + if (kstrtou32(p, 10, &scope_id) != 0) + return 0; } - return 0; + sin6->sin6_scope_id = scope_id; + return 1; } static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen, -- cgit v1.2.3-59-g8ed1b From 458032fcfa91c8714859b1f01b9ac7dccea5d6cd Mon Sep 17 00:00:00 2001 From: Yang Li Date: Fri, 10 Sep 2021 17:33:24 +0800 Subject: UNRPC: Return specific error code on kmalloc failure Although the callers of this function only care about whether the return value is null or not, we should still give a rigorous error code. Smatch tool warning: net/sunrpc/auth_gss/svcauth_gss.c:784 gss_write_verf() warn: returning -1 instead of -ENOMEM is sloppy No functional change, just more standardized. Reported-by: Abaci Robot Signed-off-by: Yang Li Signed-off-by: J. Bruce Fields --- net/sunrpc/auth_gss/svcauth_gss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 3e776e3dff91..7dba6a9c213a 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -781,7 +781,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); xdr_seq = kmalloc(4, GFP_KERNEL); if (!xdr_seq) - return -1; + return -ENOMEM; *xdr_seq = htonl(seq); iov.iov_base = xdr_seq; -- cgit v1.2.3-59-g8ed1b From f49b68ddc9d7dddf1530312108a648dd815a2f30 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Sep 2021 17:06:15 -0400 Subject: SUNRPC: xdr_stream_subsegment() must handle non-zero page_bases xdr_stream_subsegment() was introduced in commit c1346a1216ab ("NFSD: Replace the internals of the READ_BUF() macro"). There are two call sites for xdr_stream_subsegment(). One is nfsd4_decode_write(), and the other is nfsd4_decode_setxattr(). Currently neither of these call sites calls this API when xdr_buf::page_base is a non-zero value. However, I'm about to add a case where page_base will sometimes not be zero when nfsd4_decode_write() invokes this API. Replace the logic in xdr_stream_subsegment() that advances to the next data item in the xdr_stream with something more generic in order to handle this new use case. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- net/sunrpc/xdr.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index ca10ba2626f2..df194cc07035 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1633,7 +1633,7 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment); * Sets up @subbuf to represent a portion of @xdr. The portion * starts at the current offset in @xdr, and extends for a length * of @nbytes. If this is successful, @xdr is advanced to the next - * position following that portion. + * XDR data item following that portion. * * Return values: * %true: @subbuf has been initialized, and @xdr has been advanced. @@ -1642,29 +1642,31 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment); bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int nbytes) { - unsigned int remaining, offset, len; + unsigned int start = xdr_stream_pos(xdr); + unsigned int remaining, len; - if (xdr_buf_subsegment(xdr->buf, subbuf, xdr_stream_pos(xdr), nbytes)) + /* Extract @subbuf and bounds-check the fn arguments */ + if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes)) return false; - if (subbuf->head[0].iov_len) - if (!__xdr_inline_decode(xdr, subbuf->head[0].iov_len)) - return false; - - remaining = subbuf->page_len; - offset = subbuf->page_base; - while (remaining) { - len = min_t(unsigned int, remaining, PAGE_SIZE) - offset; - + /* Advance @xdr by @nbytes */ + for (remaining = nbytes; remaining;) { if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) return false; - if (!__xdr_inline_decode(xdr, len)) - return false; + len = (char *)xdr->end - (char *)xdr->p; + if (remaining <= len) { + xdr->p = (__be32 *)((char *)xdr->p + + (remaining + xdr_pad_size(nbytes))); + break; + } + + xdr->p = (__be32 *)((char *)xdr->p + len); + xdr->end = xdr->p; remaining -= len; - offset = 0; } + xdr_stream_set_pos(xdr, start + nbytes); return true; } EXPORT_SYMBOL_GPL(xdr_stream_subsegment); -- cgit v1.2.3-59-g8ed1b From dae9a6cab8009e526570e7477ce858dcdfeb256e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Sep 2021 17:06:21 -0400 Subject: NFSD: Have legacy NFSD WRITE decoders use xdr_stream_subsegment() Refactor. Now that the NFSv2 and NFSv3 XDR decoders have been converted to use xdr_streams, the WRITE decoder functions can use xdr_stream_subsegment() to extract the WRITE payload into its own xdr_buf, just as the NFSv4 WRITE XDR decoder currently does. That makes it possible to pass the first kvec, pages array + length, page_base, and total payload length via a single function parameter. The payload's page_base is not yet assigned or used, but will be in subsequent patches. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs3proc.c | 3 +-- fs/nfsd/nfs3xdr.c | 12 ++---------- fs/nfsd/nfs4proc.c | 3 +-- fs/nfsd/nfsproc.c | 3 +-- fs/nfsd/nfsxdr.c | 9 +-------- fs/nfsd/xdr.h | 2 +- fs/nfsd/xdr3.h | 2 +- include/linux/sunrpc/svc.h | 3 +-- net/sunrpc/svc.c | 11 ++++++----- 9 files changed, 15 insertions(+), 33 deletions(-) (limited to 'net/sunrpc') diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 17715a6c7a40..4418517f6f12 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -201,8 +201,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp) fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; - nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, - &argp->first, cnt); + nvecs = svc_fill_write_vector(rqstp, &argp->payload); if (!nvecs) { resp->status = nfserr_io; goto out; diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 3d37923afb06..267e56f218af 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -621,9 +621,6 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p) struct xdr_stream *xdr = &rqstp->rq_arg_stream; struct nfsd3_writeargs *args = rqstp->rq_argp; u32 max_blocksize = svc_max_payload(rqstp); - struct kvec *head = rqstp->rq_arg.head; - struct kvec *tail = rqstp->rq_arg.tail; - size_t remaining; if (!svcxdr_decode_nfs_fh3(xdr, &args->fh)) return 0; @@ -641,17 +638,12 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p) /* request sanity */ if (args->count != args->len) return 0; - remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len; - remaining -= xdr_stream_pos(xdr); - if (remaining < xdr_align_size(args->len)) - return 0; if (args->count > max_blocksize) { args->count = max_blocksize; args->len = max_blocksize; } - - args->first.iov_base = xdr->p; - args->first.iov_len = head->iov_len - xdr_stream_pos(xdr); + if (!xdr_stream_subsegment(xdr, &args->payload, args->count)) + return 0; return 1; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3dc40c1d32bc..5895bbeba373 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1033,8 +1033,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, write->wr_how_written = write->wr_stable_how; - nvecs = svc_fill_write_vector(rqstp, write->wr_payload.pages, - write->wr_payload.head, write->wr_buflen); + nvecs = svc_fill_write_vector(rqstp, &write->wr_payload); WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec)); status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf, diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 90fcd6178823..eea5b59b6a6c 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -234,8 +234,7 @@ nfsd_proc_write(struct svc_rqst *rqstp) SVCFH_fmt(&argp->fh), argp->len, argp->offset); - nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, - &argp->first, cnt); + nvecs = svc_fill_write_vector(rqstp, &argp->payload); if (!nvecs) { resp->status = nfserr_io; goto out; diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 082449c7d0db..ddcc18adfeb1 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -325,10 +325,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; struct nfsd_writeargs *args = rqstp->rq_argp; - struct kvec *head = rqstp->rq_arg.head; - struct kvec *tail = rqstp->rq_arg.tail; u32 beginoffset, totalcount; - size_t remaining; if (!svcxdr_decode_fhandle(xdr, &args->fh)) return 0; @@ -346,12 +343,8 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p) return 0; if (args->len > NFSSVC_MAXBLKSIZE_V2) return 0; - remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len; - remaining -= xdr_stream_pos(xdr); - if (remaining < xdr_align_size(args->len)) + if (!xdr_stream_subsegment(xdr, &args->payload, args->len)) return 0; - args->first.iov_base = xdr->p; - args->first.iov_len = head->iov_len - xdr_stream_pos(xdr); return 1; } diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h index f45b4bc93f52..80fd6d7f3404 100644 --- a/fs/nfsd/xdr.h +++ b/fs/nfsd/xdr.h @@ -33,7 +33,7 @@ struct nfsd_writeargs { svc_fh fh; __u32 offset; int len; - struct kvec first; + struct xdr_buf payload; }; struct nfsd_createargs { diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h index 933008382bbe..712c117300cb 100644 --- a/fs/nfsd/xdr3.h +++ b/fs/nfsd/xdr3.h @@ -40,7 +40,7 @@ struct nfsd3_writeargs { __u32 count; int stable; __u32 len; - struct kvec first; + struct xdr_buf payload; }; struct nfsd3_createargs { diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 064c96157d1f..6263410c948a 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -532,8 +532,7 @@ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, - struct page **pages, - struct kvec *first, size_t total); + struct xdr_buf *payload); char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index a3bbe5ce4570..08ca797bb8a4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1676,16 +1676,17 @@ EXPORT_SYMBOL_GPL(svc_encode_result_payload); /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on - * @pages: list of pages containing data payload - * @first: buffer containing first section of write payload - * @total: total number of bytes of write payload + * @payload: xdr_buf containing only the write data payload * * Fills in rqstp::rq_vec, and returns the number of elements. */ -unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, - struct kvec *first, size_t total) +unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, + struct xdr_buf *payload) { + struct page **pages = payload->pages; + struct kvec *first = payload->head; struct kvec *vec = rqstp->rq_vec; + size_t total = payload->len; unsigned int i; /* Some types of transport can present the write payload -- cgit v1.2.3-59-g8ed1b From 8dcc5721da7888685dde82be765018c8a379298c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 4 Oct 2021 10:16:08 -0400 Subject: svcrdma: Split the svcrdma_wc_receive() tracepoint There are currently three separate purposes being served by a single tracepoint here. They need to be split up. svcrdma_wc_recv: - status is always zero, so there's no value in recording it. - vendor_err is meaningless unless status is not zero, so there's no value in recording it. - This tracepoint is needed only when developing modifications, so it should be left disabled most of the time. svcrdma_wc_recv_flush: - As above, needed only rarely, and not an error. svcrdma_wc_recv_err: - received is always zero, so there's no value in recording it. - This tracepoint can be left enabled because completion errors are run-time problems (except for FLUSHED_ERR). - Tracepoint name now ends in _err to reflect its purpose. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/trace/events/rpcrdma.h | 75 ++++++++++++++++++++++++++++++++- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 9 +++- 2 files changed, 81 insertions(+), 3 deletions(-) (limited to 'net/sunrpc') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index de4195499592..342d6d7b5cd9 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -145,6 +145,77 @@ DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_receive_success_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, received) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->received = wc->byte_len; + ), + + TP_printk("cq.id=%u cid=%d received=%u", + __entry->cq_id, __entry->completion_id, + __entry->received + ) +); + +#define DEFINE_RECEIVE_SUCCESS_EVENT(name) \ + DEFINE_EVENT(rpcrdma_receive_success_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + +DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + __entry->vendor_err = wc->vendor_err; + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_RECEIVE_FLUSH_EVENT(name) \ + DEFINE_EVENT(rpcrdma_receive_flush_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep @@ -1892,7 +1963,9 @@ TRACE_EVENT(svcrdma_post_recv, ) ); -DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); +DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv); +DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush); +DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err); TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 6be23ce7a93d..cf76a6ad127b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -330,9 +330,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) /* WARNING: Only wc->wr_cqe and wc->status are reliable */ ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); - trace_svcrdma_wc_receive(wc, &ctxt->rc_cid); if (wc->status != IB_WC_SUCCESS) goto flushed; + trace_svcrdma_wc_recv(wc, &ctxt->rc_cid); /* If receive posting fails, the connection is about to be * lost anyway. The server will not be able to send a reply @@ -345,7 +345,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) */ if (rdma->sc_pending_recvs < rdma->sc_max_requests) if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false)) - goto flushed; + goto dropped; /* All wc fields are now known to be valid */ ctxt->rc_byte_len = wc->byte_len; @@ -360,6 +360,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) return; flushed: + if (wc->status == IB_WC_WR_FLUSH_ERR) + trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid); + else + trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid); +dropped: svc_rdma_recv_ctxt_put(rdma, ctxt); svc_xprt_deferred_close(&rdma->sc_xprt); } -- cgit v1.2.3-59-g8ed1b From eef2d8d47c33aba5f430fc5f91a17e360f99a591 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 4 Oct 2021 10:16:14 -0400 Subject: svcrdma: Split the svcrdma_wc_send() tracepoint There are currently three separate purposes being served by a single tracepoint here. They need to be split up. svcrdma_wc_send: - status is always zero, so there's no value in recording it. - vendor_err is meaningless unless status is not zero, so there's no value in recording it. - This tracepoint is needed only when developing modifications, so it should be left disabled most of the time. svcrdma_wc_send_flush: - As above, needed only rarely, and not an error. svcrdma_wc_send_err: - This tracepoint can be left persistently enabled because completion errors are run-time problems (except for FLUSHED_ERR). - Tracepoint name now ends in _err to reflect its purpose. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/trace/events/rpcrdma.h | 72 ++++++++++++++++++++++++++++++++++- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 14 +++++-- 2 files changed, 82 insertions(+), 4 deletions(-) (limited to 'net/sunrpc') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 342d6d7b5cd9..1d7c12f65f87 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,6 +60,74 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_send_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + ), + + TP_printk("cq.id=%u cid=%d", + __entry->cq_id, __entry->completion_id + ) +); + +#define DEFINE_SEND_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_send_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + +DECLARE_EVENT_CLASS(rpcrdma_send_flush_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + __entry->vendor_err = wc->vendor_err; + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_SEND_FLUSH_EVENT(name) \ + DEFINE_EVENT(rpcrdma_send_flush_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class, TP_PROTO( const struct ib_wc *wc, @@ -1939,7 +2007,9 @@ TRACE_EVENT(svcrdma_post_send, ) ); -DEFINE_COMPLETION_EVENT(svcrdma_wc_send); +DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send); +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush); +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 599021b2391d..22a871e6fe4d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -280,13 +280,21 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) struct svc_rdma_send_ctxt *ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); - trace_svcrdma_wc_send(wc, &ctxt->sc_cid); - svc_rdma_wake_send_waiters(rdma, 1); complete(&ctxt->sc_done); if (unlikely(wc->status != IB_WC_SUCCESS)) - svc_xprt_deferred_close(&rdma->sc_xprt); + goto flushed; + + trace_svcrdma_wc_send(wc, &ctxt->sc_cid); + return; + +flushed: + if (wc->status != IB_WC_WR_FLUSH_ERR) + trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); + else + trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); + svc_xprt_deferred_close(&rdma->sc_xprt); } /** -- cgit v1.2.3-59-g8ed1b From 45f135846815ef787b41767ad3823194de5ccfdf Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 4 Oct 2021 10:16:20 -0400 Subject: svcrdma: Split svcrmda_wc_{read,write} tracepoints There are currently three separate purposes being served by single tracepoints. Split them up, as was done with wc_send. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/trace/events/rpcrdma.h | 38 ++++++++++++++++++++++++++++++++++++-- net/sunrpc/xprtrdma/svc_rdma_rw.c | 30 ++++++++++++++++++++++++++---- 2 files changed, 62 insertions(+), 6 deletions(-) (limited to 'net/sunrpc') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 1d7c12f65f87..b5a1388e51a4 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -2099,8 +2099,42 @@ DEFINE_POST_CHUNK_EVENT(read); DEFINE_POST_CHUNK_EVENT(write); DEFINE_POST_CHUNK_EVENT(reply); -DEFINE_COMPLETION_EVENT(svcrdma_wc_read); -DEFINE_COMPLETION_EVENT(svcrdma_wc_write); +TRACE_EVENT(svcrdma_wc_read, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid, + unsigned int totalbytes, + const ktime_t posttime + ), + + TP_ARGS(wc, cid, totalbytes, posttime), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(s64, read_latency) + __field(unsigned int, totalbytes) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->totalbytes = totalbytes; + __entry->read_latency = ktime_us_delta(ktime_get(), posttime); + ), + + TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld", + __entry->cq_id, __entry->completion_id, + __entry->totalbytes, __entry->read_latency + ) +); + +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush); +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err); + +DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write); +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush); +DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err); TRACE_EVENT(svcrdma_qp_error, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index e27433f08ca7..5f0155fdefc7 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -155,6 +155,7 @@ struct svc_rdma_chunk_ctxt { struct ib_cqe cc_cqe; struct svcxprt_rdma *cc_rdma; struct list_head cc_rwctxts; + ktime_t cc_posttime; int cc_sqecount; enum ib_wc_status cc_status; struct completion cc_done; @@ -267,7 +268,16 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) struct svc_rdma_write_info *info = container_of(cc, struct svc_rdma_write_info, wi_cc); - trace_svcrdma_wc_write(wc, &cc->cc_cid); + switch (wc->status) { + case IB_WC_SUCCESS: + trace_svcrdma_wc_write(wc, &cc->cc_cid); + break; + case IB_WC_WR_FLUSH_ERR: + trace_svcrdma_wc_write_flush(wc, &cc->cc_cid); + break; + default: + trace_svcrdma_wc_write_err(wc, &cc->cc_cid); + } svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); @@ -320,11 +330,22 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) struct ib_cqe *cqe = wc->wr_cqe; struct svc_rdma_chunk_ctxt *cc = container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); - struct svcxprt_rdma *rdma = cc->cc_rdma; + struct svc_rdma_read_info *info; - trace_svcrdma_wc_read(wc, &cc->cc_cid); + switch (wc->status) { + case IB_WC_SUCCESS: + info = container_of(cc, struct svc_rdma_read_info, ri_cc); + trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes, + cc->cc_posttime); + break; + case IB_WC_WR_FLUSH_ERR: + trace_svcrdma_wc_read_flush(wc, &cc->cc_cid); + break; + default: + trace_svcrdma_wc_read_err(wc, &cc->cc_cid); + } - svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); + svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount); cc->cc_status = wc->status; complete(&cc->cc_done); return; @@ -363,6 +384,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) do { if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { + cc->cc_posttime = ktime_get(); ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); if (ret) break; -- cgit v1.2.3-59-g8ed1b From 22a027e8c03f9a7794d16daa3b4b117ac6d340c3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 4 Oct 2021 10:16:26 -0400 Subject: SUNRPC: Add trace event when alloc_pages_bulk() makes no progress This is an operational low memory situation that needs to be flagged. The new tracepoint records a timestamp and the nfsd thread that failed to allocate pages. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/trace/events/sunrpc.h | 18 ++++++++++++++++++ net/sunrpc/svc_xprt.c | 1 + 2 files changed, 19 insertions(+) (limited to 'net/sunrpc') diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 2d04eb96d418..fb016308c185 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1859,6 +1859,24 @@ TRACE_EVENT(svc_wake_up, TP_printk("pid=%d", __entry->pid) ); +TRACE_EVENT(svc_alloc_arg_err, + TP_PROTO( + unsigned int pages + ), + + TP_ARGS(pages), + + TP_STRUCT__entry( + __field(unsigned int, pages) + ), + + TP_fast_assign( + __entry->pages = pages; + ), + + TP_printk("pages=%u", __entry->pages) +); + TRACE_EVENT(svc_handle_xprt, TP_PROTO(struct svc_xprt *xprt, int len), diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 6316bd2b8f37..1e99ba1b9d72 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -687,6 +687,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) set_current_state(TASK_RUNNING); return -EINTR; } + trace_svc_alloc_arg_err(pages); schedule_timeout(msecs_to_jiffies(500)); } rqstp->rq_page_end = &rqstp->rq_pages[pages]; -- cgit v1.2.3-59-g8ed1b From 0ae93b99beb283438aa571a6add4eab0c077d576 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 7 Oct 2021 16:17:24 -0400 Subject: SUNRPC: Simplify the SVC dispatch code path Micro-optimization: The last user of the generic SVC dispatch code path has been removed, so svc_process_common() can be simplified. This declutters the hot path so that the by-far most common case (a dispatch function exists) is made the /only/ path. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc.h | 5 +---- net/sunrpc/svc.c | 51 ++-------------------------------------------- 2 files changed, 3 insertions(+), 53 deletions(-) (limited to 'net/sunrpc') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 6263410c948a..4205a6ef4770 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -443,10 +443,7 @@ struct svc_version { /* Need xprt with congestion control */ bool vs_need_cong_ctrl; - /* Override dispatch function (e.g. when caching replies). - * A return value of 0 means drop the request. - * vs_dispatch == NULL means use default dispatcher. - */ + /* Dispatch function */ int (*vs_dispatch)(struct svc_rqst *, __be32 *); }; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 08ca797bb8a4..e0dd6e6a4602 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1186,45 +1186,6 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} #endif -static int -svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp) -{ - struct kvec *argv = &rqstp->rq_arg.head[0]; - struct kvec *resv = &rqstp->rq_res.head[0]; - const struct svc_procedure *procp = rqstp->rq_procinfo; - - /* - * Decode arguments - * XXX: why do we ignore the return value? - */ - if (procp->pc_decode && - !procp->pc_decode(rqstp, argv->iov_base)) { - *statp = rpc_garbage_args; - return 1; - } - - *statp = procp->pc_func(rqstp); - - if (*statp == rpc_drop_reply || - test_bit(RQ_DROPME, &rqstp->rq_flags)) - return 0; - - if (rqstp->rq_auth_stat != rpc_auth_ok) - return 1; - - if (*statp != rpc_success) - return 1; - - /* Encode reply */ - if (procp->pc_encode && - !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) { - dprintk("svc: failed to encode reply\n"); - /* serv->sv_stats->rpcsystemerr++; */ - *statp = rpc_system_err; - } - return 1; -} - __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, @@ -1392,16 +1353,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ - if (!process.dispatch) { - if (!svc_generic_dispatch(rqstp, statp)) - goto release_dropit; - if (*statp == rpc_garbage_args) - goto err_garbage; - } else { - dprintk("svc: calling dispatcher\n"); - if (!process.dispatch(rqstp, statp)) - goto release_dropit; /* Release reply info */ - } + if (!process.dispatch(rqstp, statp)) + goto release_dropit; if (rqstp->rq_auth_stat != rpc_auth_ok) goto err_release_bad_auth; -- cgit v1.2.3-59-g8ed1b From 5b747a594b19708e3933eb3ecf447739e930790e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 7 Oct 2021 16:17:31 -0400 Subject: SUNRPC: De-duplicate .pc_release() call sites There was some spaghetti in svc_process_common() that had evolved over time such that there was still one case that needed a call to .pc_release() but never made it. That issue was removed in the previous patch. As additional insurance against missing this important callout, ensure that the .pc_release() method is always called, no matter what the reply_stat is. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'net/sunrpc') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e0dd6e6a4602..4292278a9552 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1252,7 +1252,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) __be32 *statp; u32 prog, vers; __be32 rpc_stat; - int auth_res; + int auth_res, rc; __be32 *reply_statp; rpc_stat = rpc_success; @@ -1353,20 +1353,18 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ - if (!process.dispatch(rqstp, statp)) - goto release_dropit; - + rc = process.dispatch(rqstp, statp); + if (procp->pc_release) + procp->pc_release(rqstp); + if (!rc) + goto dropit; if (rqstp->rq_auth_stat != rpc_auth_ok) - goto err_release_bad_auth; + goto err_bad_auth; /* Check RPC status result */ if (*statp != rpc_success) resv->iov_len = ((void*)statp) - resv->iov_base + 4; - /* Release reply info */ - if (procp->pc_release) - procp->pc_release(rqstp); - if (procp->pc_encode == NULL) goto dropit; @@ -1375,9 +1373,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) goto close_xprt; return 1; /* Caller can now send it */ -release_dropit: - if (procp->pc_release) - procp->pc_release(rqstp); dropit: svc_authorise(rqstp); /* doesn't hurt to call this twice */ dprintk("svc: svc_process dropit\n"); @@ -1404,9 +1399,6 @@ err_bad_rpc: svc_putnl(resv, 2); goto sendit; -err_release_bad_auth: - if (procp->pc_release) - procp->pc_release(rqstp); err_bad_auth: dprintk("svc: authentication failed (%d)\n", be32_to_cpu(rqstp->rq_auth_stat)); -- cgit v1.2.3-59-g8ed1b