diff options
author | Bob Pearson <rpearsonhpe@gmail.com> | 2021-09-14 11:42:06 -0500 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-09-24 10:15:00 -0300 |
commit | 647bf13ce944f20f7402f281578423a952274e4a (patch) | |
tree | 47fc68062be3ed0d537d0fdb415bffe6a7f3131d /drivers/infiniband/sw/rxe/rxe_verbs.c | |
parent | RDMA/rxe: Separate HW and SW l/rkeys (diff) | |
download | linux-dev-647bf13ce944f20f7402f281578423a952274e4a.tar.xz linux-dev-647bf13ce944f20f7402f281578423a952274e4a.zip |
RDMA/rxe: Create duplicate mapping tables for FMRs
For fast memory regions create duplicate mapping tables so ib_map_mr_sg()
can build a new mapping table which is then swapped into place
synchronously with the execution of an IB_WR_REG_MR work request.
Currently the rxe driver uses the same table for receiving RDMA operations
and for building new tables in preparation for reusing the MR. This
exposes users to potentially incorrect results.
Link: https://lore.kernel.org/r/20210914164206.19768-5-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 39 |
1 files changed, 12 insertions, 27 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 571cbbeed5d6..9d0bb9aa7514 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -951,41 +951,26 @@ err1: return ERR_PTR(err); } -static int rxe_set_page(struct ib_mr *ibmr, u64 addr) -{ - struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map *map; - struct rxe_phys_buf *buf; - - if (unlikely(mr->nbuf == mr->num_buf)) - return -ENOMEM; - - map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; - buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; - - buf->addr = addr; - buf->size = ibmr->page_size; - mr->nbuf++; - - return 0; -} - +/* build next_map_set from scatterlist + * The IB_WR_REG_MR WR will swap map_sets + */ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rxe_mr *mr = to_rmr(ibmr); + struct rxe_map_set *set = mr->next_map_set; int n; - mr->nbuf = 0; + set->nbuf = 0; - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); + n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page); - mr->va = ibmr->iova; - mr->iova = ibmr->iova; - mr->length = ibmr->length; - mr->page_shift = ilog2(ibmr->page_size); - mr->page_mask = ibmr->page_size - 1; - mr->offset = mr->iova & mr->page_mask; + set->va = ibmr->iova; + set->iova = ibmr->iova; + set->length = ibmr->length; + set->page_shift = ilog2(ibmr->page_size); + set->page_mask = ibmr->page_size - 1; + set->offset = set->iova & set->page_mask; return n; } |