aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2019-03-02 13:12:22 -0500
committerTrond Myklebust <trond.myklebust@hammerspace.com>2019-03-02 16:25:26 -0500
commit0d1bf3407c4ae887a464d136aaa5e9ef609834f0 (patch)
tree51915396c008d6733f9dd114291d415d9cd69d0a /net
parentNFSv4.1: Bump the default callback session slot count to 16 (diff)
downloadlinux-dev-0d1bf3407c4ae887a464d136aaa5e9ef609834f0.tar.xz
linux-dev-0d1bf3407c4ae887a464d136aaa5e9ef609834f0.zip
SUNRPC: Allow dynamic allocation of back channel slots
Now that the reads happen in a process context rather than a softirq, it is safe to allocate back channel slots using a reclaiming allocation. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/backchannel_rqst.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index b9313c15ee3a..c47d82622fd1 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -235,7 +235,8 @@ out:
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
-static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
+static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
+ struct rpc_rqst *new)
{
struct rpc_rqst *req = NULL;
@@ -243,10 +244,9 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
if (atomic_read(&xprt->bc_free_slots) <= 0)
goto not_found;
if (list_empty(&xprt->bc_pa_list)) {
- req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
- if (!req)
+ if (!new)
goto not_found;
- list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
}
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
@@ -256,8 +256,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
sizeof(req->rq_private_buf));
req->rq_xid = xid;
req->rq_connect_cookie = xprt->connect_cookie;
-not_found:
dprintk("RPC: backchannel req=%p\n", req);
+not_found:
return req;
}
@@ -320,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
*/
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
{
- struct rpc_rqst *req;
-
- spin_lock(&xprt->bc_pa_lock);
- list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
- if (req->rq_connect_cookie != xprt->connect_cookie)
- continue;
- if (req->rq_xid == xid)
- goto found;
- }
- req = xprt_alloc_bc_request(xprt, xid);
+ struct rpc_rqst *req, *new = NULL;
+
+ do {
+ spin_lock(&xprt->bc_pa_lock);
+ list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
+ if (req->rq_connect_cookie != xprt->connect_cookie)
+ continue;
+ if (req->rq_xid == xid)
+ goto found;
+ }
+ req = xprt_get_bc_request(xprt, xid, new);
found:
- spin_unlock(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
+ if (new) {
+ if (req != new)
+ xprt_free_bc_rqst(new);
+ break;
+ } else if (req)
+ break;
+ new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
+ } while (new);
return req;
}