aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/i40iw/i40iw_verbs.c
diff options
context:
space:
mode:
authorShiraz Saleem <shiraz.saleem@intel.com>2019-05-06 08:53:34 -0500
committerJason Gunthorpe <jgg@mellanox.com>2019-05-06 13:08:11 -0300
commiteb52c0333f06b88bca5bac0dc0aeca729de6eb11 (patch)
treec4056004b8d1dc3d51af41251ea07280f028751a /drivers/infiniband/hw/i40iw/i40iw_verbs.c
parentRDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks (diff)
downloadlinux-dev-eb52c0333f06b88bca5bac0dc0aeca729de6eb11.tar.xz
linux-dev-eb52c0333f06b88bca5bac0dc0aeca729de6eb11.zip
RDMA/i40iw: Use core helpers to get aligned DMA address within a supported page size
Call the core helpers to retrieve the HW aligned address to use for the MR, within a supported i40iw page size. Remove code in i40iw to determine when MR is backed by 2M huge pages which involves checking the umem->hugetlb flag and VMA inspection. The new DMA iterator will return the 2M aligned address if the MR is backed by 2M pages. Fixes: f26c7c83395b ("i40iw: Add 2MB page support") Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/i40iw/i40iw_verbs.c')
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c46
1 files changed, 7 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index b8a1412253ae..5689d742bafb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1338,53 +1338,22 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
- struct sg_dma_page_iter sg_iter;
- u64 pg_addr = 0;
+ struct ib_block_iter biter;
u32 idx = 0;
- bool first_pg = true;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
if (iwmr->type == IW_MEMREG_TYPE_QP)
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
- for_each_sg_dma_page (region->sg_head.sgl, &sg_iter, region->nmap, 0) {
- pg_addr = sg_page_iter_dma_address(&sg_iter);
- if (first_pg)
- *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
- else if (!(pg_addr & ~iwmr->page_msk))
- *pbl = cpu_to_le64(pg_addr);
- else
- continue;
-
- first_pg = false;
+ rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
+ iwmr->page_size) {
+ *pbl = rdma_block_iter_dma_address(&biter);
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
}
}
/**
- * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
- * @addr: virtual address
- * @iwmr: mr pointer for this memory registration
- */
-static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
-{
- struct vm_area_struct *vma;
- struct hstate *h;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, addr);
- if (vma && is_vm_hugetlb_page(vma)) {
- h = hstate_vma(vma);
- if (huge_page_size(h) == 0x200000) {
- iwmr->page_size = huge_page_size(h);
- iwmr->page_msk = huge_page_mask(h);
- }
- }
- up_read(&current->mm->mmap_sem);
-}
-
-/**
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
* @arr: lvl1 pbl array
* @npages: page count
@@ -1839,10 +1808,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.device = pd->device;
iwmr->page_size = PAGE_SIZE;
- iwmr->page_msk = PAGE_MASK;
-
- if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
- i40iw_set_hugetlb_values(start, iwmr);
+ if (req.reg_type == IW_MEMREG_TYPE_MEM)
+ iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
+ virt);
region_length = region->length + (start & (iwmr->page_size - 1));
pg_shift = ffs(iwmr->page_size) - 1;