aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hfi1/rc.c
diff options
context:
space:
mode:
authorKaike Wan <kaike.wan@intel.com>2019-03-18 09:58:30 -0700
committerJason Gunthorpe <jgg@mellanox.com>2019-04-03 15:27:30 -0300
commitf6f3f532556e4fcaa2d259fd04a800bfb4f9670d (patch)
tree6ee8e59d3dcac951660fb12a8aeeb9d90395d7de /drivers/infiniband/hw/hfi1/rc.c
parentIB/mlx5: Reset access mask when looping inside page fault handler (diff)
downloadlinux-dev-f6f3f532556e4fcaa2d259fd04a800bfb4f9670d.tar.xz
linux-dev-f6f3f532556e4fcaa2d259fd04a800bfb4f9670d.zip
IB/hfi1: Delay the release of destination mr for TID RDMA WRITE DATA
The reference of destination memory region is first obtained when TID RDMA WRITE request is first received on the responder side. This reference is released once all TID RDMA WRITE RESP packets are sent to the requester side, even though not all TID RDMA WRITE DATA packets may have been received. This early release will especially be undesired if the software needs to access the destination memory before the last data packet is received. This patch delays the release of the MR until all TID RDMA DATA packets have been received. A helper function to release the reference is also created to simplify the code. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Kaike Wan <kaike.wan@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/hfi1/rc.c')
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 5991211d72bd..5ba39a9f65ad 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -140,10 +140,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
case OP(RDMA_READ_RESPONSE_LAST):
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
/* FALLTHROUGH */
case OP(ATOMIC_ACKNOWLEDGE):
/*
@@ -343,7 +340,8 @@ write_resp:
break;
e->sent = 1;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ /* Do not free e->rdma_sge until all data are received */
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
break;
case TID_OP(READ_RESP):
@@ -2643,10 +2641,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
len = be32_to_cpu(reth->length);
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
if (len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
u64 vaddr = get_ib_reth_vaddr(reth);
@@ -3088,10 +3083,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
reth = &ohdr->u.rc.reth;
len = be32_to_cpu(reth->length);
if (len) {
@@ -3166,10 +3158,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
+ release_rdma_sge_mr(e);
/* Process OPFN special virtual address */
if (opfn) {
opfn_conn_response(qp, e, ateth);