diff options
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 7 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 12 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 35 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 7 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 10 |
8 files changed, 38 insertions, 44 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 17fb1e025654..0f7c465d9a5a 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -279,9 +279,7 @@ out_maperr: static int fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *bad_wr; - - return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL); } /* Invalidate all memory regions that were registered for "req". diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index c040de196e13..1bb00dd6ccdb 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -464,7 +464,7 @@ out_mapmr_err: static int frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *post_wr, *bad_wr; + struct ib_send_wr *post_wr; struct rpcrdma_mr *mr; post_wr = &req->rl_sendctx->sc_wr; @@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) /* If ib_post_send fails, the next ->send_request for * @req will queue these MWs for recovery. */ - return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, post_wr, NULL); } /* Handle a remotely invalidated mr on the @mrs list @@ -517,7 +517,8 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) static void frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) { - struct ib_send_wr *first, **prev, *last, *bad_wr; + struct ib_send_wr *first, **prev, *last; + const struct ib_send_wr *bad_wr; struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 357ba90c382d..134bef6a451e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -94,7 +94,6 @@ static int read_reset_stat(struct ctl_table *table, int write, atomic_set(stat, 0); else { char str_buf[32]; - char *data; int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat)); if (len >= 32) return -EFAULT; @@ -103,7 +102,6 @@ static int read_reset_stat(struct ctl_table *table, int write, *lenp = 0; return 0; } - data = &str_buf[*ppos]; len -= *ppos; if (len > *lenp) len = *lenp; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 841fca143804..b24d5b8f2fee 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { - struct ib_recv_wr *bad_recv_wr; int ret; svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr); + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); if (ret) goto err_post; @@ -366,9 +365,6 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, arg->page_base = 0; arg->buflen = ctxt->rc_byte_len; arg->len = ctxt->rc_byte_len; - - rqstp->rq_respages = &rqstp->rq_pages[0]; - rqstp->rq_next_page = rqstp->rq_respages + 1; } /* This accommodates the largest possible Write chunk, @@ -730,6 +726,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) svc_rdma_build_arg_xdr(rqstp, ctxt); + /* Prevent svc_xprt_release from releasing pages in rq_pages + * if we return 0 or an error. + */ + rqstp->rq_respages = rqstp->rq_pages; + rqstp->rq_next_page = rqstp->rq_respages; + p = (__be32 *)rqstp->rq_arg.head[0].iov_base; ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg); if (ret < 0) diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index ce3ea8419704..dc1951759a8e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -307,7 +307,8 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) { struct svcxprt_rdma *rdma = cc->cc_rdma; struct svc_xprt *xprt = &rdma->sc_xprt; - struct ib_send_wr *first_wr, *bad_wr; + struct ib_send_wr *first_wr; + const struct ib_send_wr *bad_wr; struct list_head *tmp; struct ib_cqe *cqe; int ret; @@ -679,6 +680,7 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_read_info *info, __be32 *p) { + unsigned int i; int ret; ret = -EINVAL; @@ -701,6 +703,12 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, info->ri_chunklen += rs_length; } + /* Pages under I/O have been copied to head->rc_pages. + * Prevent their premature release by svc_xprt_release() . + */ + for (i = 0; i < info->ri_readctxt->rc_page_count; i++) + rqstp->rq_pages[i] = NULL; + return ret; } @@ -816,7 +824,6 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p) { struct svc_rdma_read_info *info; - struct page **page; int ret; /* The request (with page list) is constructed in @@ -843,27 +850,15 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, ret = svc_rdma_build_normal_read_chunk(rqstp, info, p); else ret = svc_rdma_build_pz_read_chunk(rqstp, info, p); - - /* Mark the start of the pages that can be used for the reply */ - if (info->ri_pageoff > 0) - info->ri_pageno++; - rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno]; - rqstp->rq_next_page = rqstp->rq_respages + 1; - if (ret < 0) - goto out; + goto out_err; ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); - -out: - /* Read sink pages have been moved from rqstp->rq_pages to - * head->rc_arg.pages. Force svc_recv to refill those slots - * in rq_pages. - */ - for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++) - *page = NULL; - if (ret < 0) - svc_rdma_read_info_free(info); + goto out_err; + return 0; + +out_err: + svc_rdma_read_info_free(info); return ret; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4a3efaea277c..8602a5f1b515 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) */ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) { - struct ib_send_wr *bad_wr; int ret; might_sleep(); @@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); + ret = ib_post_send(rdma->sc_qp, wr, NULL); trace_svcrdma_post_send(wr, ret); if (ret) { set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); @@ -657,7 +656,9 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, ctxt->sc_pages[i] = rqstp->rq_respages[i]; rqstp->rq_respages[i] = NULL; } - rqstp->rq_next_page = rqstp->rq_respages + 1; + + /* Prevent svc_xprt_release from releasing pages in rq_pages */ + rqstp->rq_next_page = rqstp->rq_respages; } /* Prepare the portion of the RPC Reply that will be transmitted diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index e9535a66bab0..2848cafd4a17 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -296,7 +296,6 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr; - int ret = 0; trace_svcrdma_cm_event(event, sap); @@ -315,7 +314,7 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, break; } - return ret; + return 0; } static int rdma_cma_handler(struct rdma_cm_id *cma_id, @@ -476,7 +475,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) /* Qualify the transport resource defaults with the * capabilities of this particular device */ - newxprt->sc_max_send_sges = dev->attrs.max_sge; + newxprt->sc_max_send_sges = dev->attrs.max_send_sge; /* transport hdr, head iovec, one page list entry, tail iovec */ if (newxprt->sc_max_send_sges < 4) { pr_err("svcrdma: too few Send SGEs available (%d)\n", diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 16161a36dc73..956a5ea47b58 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -280,7 +280,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) ++xprt->rx_xprt.connect_cookie; connstate = -ECONNABORTED; connected: - xprt->rx_buf.rb_credits = 1; ep->rep_connected = connstate; rpcrdma_conn_func(ep); wake_up_all(&ep->rep_connect_wait); @@ -508,7 +507,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, unsigned int max_sge; int rc; - max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, + max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge, RPCRDMA_MAX_SEND_SGES); if (max_sge < RPCRDMA_MIN_SEND_SGES) { pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); @@ -755,6 +754,7 @@ retry: } ep->rep_connected = 0; + rpcrdma_post_recvs(r_xprt, true); rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); if (rc) { @@ -773,8 +773,6 @@ retry: dprintk("RPC: %s: connected\n", __func__); - rpcrdma_post_recvs(r_xprt, true); - out: if (rc) ep->rep_connected = rc; @@ -1171,6 +1169,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) list_add(&req->rl_list, &buf->rb_send_bufs); } + buf->rb_credits = 1; buf->rb_posted_receives = 0; INIT_LIST_HEAD(&buf->rb_recv_bufs); @@ -1559,7 +1558,8 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) if (!count) return; - rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, &bad_wr); + rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, + (const struct ib_recv_wr **)&bad_wr); if (rc) { for (wr = bad_wr; wr; wr = wr->next) { struct rpcrdma_rep *rep; |