aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/umem_odp.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-08-19 14:17:01 +0300
committerJason Gunthorpe <jgg@mellanox.com>2019-08-21 14:08:42 -0300
commitfd7dbf035edcfb035977423e2a5102832c1427f4 (patch)
tree6b3db77ec053e94fed95479569088d38e2e64bc8 /drivers/infiniband/core/umem_odp.c
parentRDMA/odp: Iterate over the whole rbtree directly (diff)
downloadlinux-dev-fd7dbf035edcfb035977423e2a5102832c1427f4.tar.xz
linux-dev-fd7dbf035edcfb035977423e2a5102832c1427f4.zip
RDMA/odp: Make it clearer when a umem is an implicit ODP umem
Implicit ODP umems are special, they don't have any page lists, they don't exist in the interval tree and they are never DMA mapped. Instead of trying to guess this based on a zero length use an explicit flag. Further, do not allow non-implicit umems to be 0 size. Link: https://lore.kernel.org/r/20190819111710.18440-4-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/core/umem_odp.c')
-rw-r--r--drivers/infiniband/core/umem_odp.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 77adf405e23c..7300d0a10d1e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -176,18 +176,15 @@ static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
down_write(&per_mm->umem_rwsem);
- if (likely(ib_umem_start(umem_odp) != ib_umem_end(umem_odp))) {
- /*
- * Note that the representation of the intervals in the
- * interval tree considers the ending point as contained in
- * the interval, while the function ib_umem_end returns the
- * first address which is not contained in the umem.
- */
- umem_odp->interval_tree.start = ib_umem_start(umem_odp);
- umem_odp->interval_tree.last = ib_umem_end(umem_odp) - 1;
- interval_tree_insert(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- }
+ /*
+ * Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not
+ * contained in the umem.
+ */
+ umem_odp->interval_tree.start = ib_umem_start(umem_odp);
+ umem_odp->interval_tree.last = ib_umem_end(umem_odp) - 1;
+ interval_tree_insert(&umem_odp->interval_tree, &per_mm->umem_tree);
up_write(&per_mm->umem_rwsem);
}
@@ -196,11 +193,8 @@ static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
down_write(&per_mm->umem_rwsem);
- if (likely(ib_umem_start(umem_odp) != ib_umem_end(umem_odp)))
- interval_tree_remove(&umem_odp->interval_tree,
- &per_mm->umem_tree);
+ interval_tree_remove(&umem_odp->interval_tree, &per_mm->umem_tree);
complete_all(&umem_odp->notifier_completion);
-
up_write(&per_mm->umem_rwsem);
}
@@ -320,6 +314,9 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root,
int pages = size >> PAGE_SHIFT;
int ret;
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
if (!odp_data)
return ERR_PTR(-ENOMEM);
@@ -381,6 +378,9 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
struct mm_struct *mm = umem->owning_mm;
int ret_val;
+ if (umem_odp->umem.address == 0 && umem_odp->umem.length == 0)
+ umem_odp->is_implicit_odp = 1;
+
umem_odp->page_shift = PAGE_SHIFT;
if (access & IB_ACCESS_HUGETLB) {
struct vm_area_struct *vma;
@@ -401,7 +401,10 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
init_completion(&umem_odp->notifier_completion);
- if (ib_umem_odp_num_pages(umem_odp)) {
+ if (!umem_odp->is_implicit_odp) {
+ if (!ib_umem_odp_num_pages(umem_odp))
+ return -EINVAL;
+
umem_odp->page_list =
vzalloc(array_size(sizeof(*umem_odp->page_list),
ib_umem_odp_num_pages(umem_odp)));
@@ -420,7 +423,9 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
ret_val = get_per_mm(umem_odp);
if (ret_val)
goto out_dma_list;
- add_umem_to_per_mm(umem_odp);
+
+ if (!umem_odp->is_implicit_odp)
+ add_umem_to_per_mm(umem_odp);
return 0;
@@ -439,13 +444,14 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
-
- remove_umem_from_per_mm(umem_odp);
+ if (!umem_odp->is_implicit_odp) {
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ remove_umem_from_per_mm(umem_odp);
+ vfree(umem_odp->dma_list);
+ vfree(umem_odp->page_list);
+ }
put_per_mm(umem_odp);
- vfree(umem_odp->dma_list);
- vfree(umem_odp->page_list);
}
/*