aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-10 16:45:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-10 16:45:54 -0800
commit38764c734028bf0ae4cf262f3eb7d965c86298bd (patch)
treede5f7c46f71e34d14bea89aaa95ecf3032285ce3 /net/sunrpc
parentMerge tag 'nfs-for-5.16-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs (diff)
parentnfsd4: remove obselete comment (diff)
downloadlinux-dev-38764c734028bf0ae4cf262f3eb7d965c86298bd.tar.xz
linux-dev-38764c734028bf0ae4cf262f3eb7d965c86298bd.zip
Merge tag 'nfsd-5.16' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "A slow cycle for nfsd: mainly cleanup, including Neil's patch dropping support for a filehandle format deprecated 20 years ago, and further xdr-related cleanup from Chuck" * tag 'nfsd-5.16' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd4: remove obselete comment nfsd: document server-to-server-copy parameters NFSD:fix boolreturn.cocci warning nfsd: update create verifier comment SUNRPC: Change return value type of .pc_encode SUNRPC: Replace the "__be32 *p" parameter to .pc_encode NFSD: Save location of NFSv4 COMPOUND status SUNRPC: Change return value type of .pc_decode SUNRPC: Replace the "__be32 *p" parameter to .pc_decode SUNRPC: De-duplicate .pc_release() call sites SUNRPC: Simplify the SVC dispatch code path SUNRPC: Capture value of xdr_buf::page_base SUNRPC: Add trace event when alloc_pages_bulk() makes no progress svcrdma: Split svcrmda_wc_{read,write} tracepoints svcrdma: Split the svcrdma_wc_send() tracepoint svcrdma: Split the svcrdma_wc_receive() tracepoint NFSD: Have legacy NFSD WRITE decoders use xdr_stream_subsegment() SUNRPC: xdr_stream_subsegment() must handle non-zero page_bases NFSD: Initialize pointer ni with NULL and not plain integer 0 NFSD: simplify struct nfsfh ...
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/addr.c40
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c80
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/sunrpc/xdr.c32
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c9
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c30
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c14
8 files changed, 94 insertions, 114 deletions
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 6e4dbd577a39..d435bffc6199 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -162,8 +162,10 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
const size_t buflen, const char *delim,
struct sockaddr_in6 *sin6)
{
- char *p;
+ char p[IPV6_SCOPE_ID_LEN + 1];
size_t len;
+ u32 scope_id = 0;
+ struct net_device *dev;
if ((buf + buflen) == delim)
return 1;
@@ -175,29 +177,23 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
return 0;
len = (buf + buflen) - delim - 1;
- p = kmemdup_nul(delim + 1, len, GFP_KERNEL);
- if (p) {
- u32 scope_id = 0;
- struct net_device *dev;
-
- dev = dev_get_by_name(net, p);
- if (dev != NULL) {
- scope_id = dev->ifindex;
- dev_put(dev);
- } else {
- if (kstrtou32(p, 10, &scope_id) != 0) {
- kfree(p);
- return 0;
- }
- }
-
- kfree(p);
-
- sin6->sin6_scope_id = scope_id;
- return 1;
+ if (len > IPV6_SCOPE_ID_LEN)
+ return 0;
+
+ memcpy(p, delim + 1, len);
+ p[len] = 0;
+
+ dev = dev_get_by_name(net, p);
+ if (dev != NULL) {
+ scope_id = dev->ifindex;
+ dev_put(dev);
+ } else {
+ if (kstrtou32(p, 10, &scope_id) != 0)
+ return 0;
}
- return 0;
+ sin6->sin6_scope_id = scope_id;
+ return 1;
}
static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1f2817195549..b87565b64928 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -781,7 +781,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
xdr_seq = kmalloc(4, GFP_KERNEL);
if (!xdr_seq)
- return -1;
+ return -ENOMEM;
*xdr_seq = htonl(seq);
iov.iov_base = xdr_seq;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a3bbe5ce4570..4292278a9552 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1186,45 +1186,6 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
#endif
-static int
-svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
-{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
- const struct svc_procedure *procp = rqstp->rq_procinfo;
-
- /*
- * Decode arguments
- * XXX: why do we ignore the return value?
- */
- if (procp->pc_decode &&
- !procp->pc_decode(rqstp, argv->iov_base)) {
- *statp = rpc_garbage_args;
- return 1;
- }
-
- *statp = procp->pc_func(rqstp);
-
- if (*statp == rpc_drop_reply ||
- test_bit(RQ_DROPME, &rqstp->rq_flags))
- return 0;
-
- if (rqstp->rq_auth_stat != rpc_auth_ok)
- return 1;
-
- if (*statp != rpc_success)
- return 1;
-
- /* Encode reply */
- if (procp->pc_encode &&
- !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
- dprintk("svc: failed to encode reply\n");
- /* serv->sv_stats->rpcsystemerr++; */
- *statp = rpc_system_err;
- }
- return 1;
-}
-
__be32
svc_generic_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
@@ -1291,7 +1252,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
__be32 *statp;
u32 prog, vers;
__be32 rpc_stat;
- int auth_res;
+ int auth_res, rc;
__be32 *reply_statp;
rpc_stat = rpc_success;
@@ -1392,28 +1353,18 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
/* Call the function that processes the request. */
- if (!process.dispatch) {
- if (!svc_generic_dispatch(rqstp, statp))
- goto release_dropit;
- if (*statp == rpc_garbage_args)
- goto err_garbage;
- } else {
- dprintk("svc: calling dispatcher\n");
- if (!process.dispatch(rqstp, statp))
- goto release_dropit; /* Release reply info */
- }
-
+ rc = process.dispatch(rqstp, statp);
+ if (procp->pc_release)
+ procp->pc_release(rqstp);
+ if (!rc)
+ goto dropit;
if (rqstp->rq_auth_stat != rpc_auth_ok)
- goto err_release_bad_auth;
+ goto err_bad_auth;
/* Check RPC status result */
if (*statp != rpc_success)
resv->iov_len = ((void*)statp) - resv->iov_base + 4;
- /* Release reply info */
- if (procp->pc_release)
- procp->pc_release(rqstp);
-
if (procp->pc_encode == NULL)
goto dropit;
@@ -1422,9 +1373,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto close_xprt;
return 1; /* Caller can now send it */
-release_dropit:
- if (procp->pc_release)
- procp->pc_release(rqstp);
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
dprintk("svc: svc_process dropit\n");
@@ -1451,9 +1399,6 @@ err_bad_rpc:
svc_putnl(resv, 2);
goto sendit;
-err_release_bad_auth:
- if (procp->pc_release)
- procp->pc_release(rqstp);
err_bad_auth:
dprintk("svc: authentication failed (%d)\n",
be32_to_cpu(rqstp->rq_auth_stat));
@@ -1676,16 +1621,17 @@ EXPORT_SYMBOL_GPL(svc_encode_result_payload);
/**
* svc_fill_write_vector - Construct data argument for VFS write call
* @rqstp: svc_rqst to operate on
- * @pages: list of pages containing data payload
- * @first: buffer containing first section of write payload
- * @total: total number of bytes of write payload
+ * @payload: xdr_buf containing only the write data payload
*
* Fills in rqstp::rq_vec, and returns the number of elements.
*/
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
- struct kvec *first, size_t total)
+unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct xdr_buf *payload)
{
+ struct page **pages = payload->pages;
+ struct kvec *first = payload->head;
struct kvec *vec = rqstp->rq_vec;
+ size_t total = payload->len;
unsigned int i;
/* Some types of transport can present the write payload
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 6316bd2b8f37..1e99ba1b9d72 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -687,6 +687,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
set_current_state(TASK_RUNNING);
return -EINTR;
}
+ trace_svc_alloc_arg_err(pages);
schedule_timeout(msecs_to_jiffies(500));
}
rqstp->rq_page_end = &rqstp->rq_pages[pages];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index ca10ba2626f2..df194cc07035 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1633,7 +1633,7 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
* Sets up @subbuf to represent a portion of @xdr. The portion
* starts at the current offset in @xdr, and extends for a length
* of @nbytes. If this is successful, @xdr is advanced to the next
- * position following that portion.
+ * XDR data item following that portion.
*
* Return values:
* %true: @subbuf has been initialized, and @xdr has been advanced.
@@ -1642,29 +1642,31 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
unsigned int nbytes)
{
- unsigned int remaining, offset, len;
+ unsigned int start = xdr_stream_pos(xdr);
+ unsigned int remaining, len;
- if (xdr_buf_subsegment(xdr->buf, subbuf, xdr_stream_pos(xdr), nbytes))
+ /* Extract @subbuf and bounds-check the fn arguments */
+ if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
return false;
- if (subbuf->head[0].iov_len)
- if (!__xdr_inline_decode(xdr, subbuf->head[0].iov_len))
- return false;
-
- remaining = subbuf->page_len;
- offset = subbuf->page_base;
- while (remaining) {
- len = min_t(unsigned int, remaining, PAGE_SIZE) - offset;
-
+ /* Advance @xdr by @nbytes */
+ for (remaining = nbytes; remaining;) {
if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return false;
- if (!__xdr_inline_decode(xdr, len))
- return false;
+ len = (char *)xdr->end - (char *)xdr->p;
+ if (remaining <= len) {
+ xdr->p = (__be32 *)((char *)xdr->p +
+ (remaining + xdr_pad_size(nbytes)));
+ break;
+ }
+
+ xdr->p = (__be32 *)((char *)xdr->p + len);
+ xdr->end = xdr->p;
remaining -= len;
- offset = 0;
}
+ xdr_stream_set_pos(xdr, start + nbytes);
return true;
}
EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 6be23ce7a93d..cf76a6ad127b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -330,9 +330,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wc->wr_cqe and wc->status are reliable */
ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
- trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
if (wc->status != IB_WC_SUCCESS)
goto flushed;
+ trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
/* If receive posting fails, the connection is about to be
* lost anyway. The server will not be able to send a reply
@@ -345,7 +345,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
*/
if (rdma->sc_pending_recvs < rdma->sc_max_requests)
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
- goto flushed;
+ goto dropped;
/* All wc fields are now known to be valid */
ctxt->rc_byte_len = wc->byte_len;
@@ -360,6 +360,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
return;
flushed:
+ if (wc->status == IB_WC_WR_FLUSH_ERR)
+ trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
+ else
+ trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
+dropped:
svc_rdma_recv_ctxt_put(rdma, ctxt);
svc_xprt_deferred_close(&rdma->sc_xprt);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index e27433f08ca7..5f0155fdefc7 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -155,6 +155,7 @@ struct svc_rdma_chunk_ctxt {
struct ib_cqe cc_cqe;
struct svcxprt_rdma *cc_rdma;
struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
int cc_sqecount;
enum ib_wc_status cc_status;
struct completion cc_done;
@@ -267,7 +268,16 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
struct svc_rdma_write_info *info =
container_of(cc, struct svc_rdma_write_info, wi_cc);
- trace_svcrdma_wc_write(wc, &cc->cc_cid);
+ switch (wc->status) {
+ case IB_WC_SUCCESS:
+ trace_svcrdma_wc_write(wc, &cc->cc_cid);
+ break;
+ case IB_WC_WR_FLUSH_ERR:
+ trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
+ break;
+ default:
+ trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
+ }
svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
@@ -320,11 +330,22 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_chunk_ctxt *cc =
container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
- struct svcxprt_rdma *rdma = cc->cc_rdma;
+ struct svc_rdma_read_info *info;
- trace_svcrdma_wc_read(wc, &cc->cc_cid);
+ switch (wc->status) {
+ case IB_WC_SUCCESS:
+ info = container_of(cc, struct svc_rdma_read_info, ri_cc);
+ trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes,
+ cc->cc_posttime);
+ break;
+ case IB_WC_WR_FLUSH_ERR:
+ trace_svcrdma_wc_read_flush(wc, &cc->cc_cid);
+ break;
+ default:
+ trace_svcrdma_wc_read_err(wc, &cc->cc_cid);
+ }
- svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
+ svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount);
cc->cc_status = wc->status;
complete(&cc->cc_done);
return;
@@ -363,6 +384,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
do {
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
+ cc->cc_posttime = ktime_get();
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
if (ret)
break;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 599021b2391d..22a871e6fe4d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -280,13 +280,21 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
struct svc_rdma_send_ctxt *ctxt =
container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
- trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
-
svc_rdma_wake_send_waiters(rdma, 1);
complete(&ctxt->sc_done);
if (unlikely(wc->status != IB_WC_SUCCESS))
- svc_xprt_deferred_close(&rdma->sc_xprt);
+ goto flushed;
+
+ trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
+ return;
+
+flushed:
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
+ else
+ trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
+ svc_xprt_deferred_close(&rdma->sc_xprt);
}
/**