aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/umem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/umem.c')
-rw-r--r--drivers/infiniband/core/umem.c60
1 files changed, 16 insertions, 44 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c6144df47ea4..fe5551562dbc 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -72,15 +72,16 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
* If access flags indicate ODP memory, avoid pinning. Instead, stores
* the mm for future page fault handling in conjunction with MMU notifiers.
*
- * @context: userspace context to pin memory for
+ * @udata: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
* @dmasync: flush in-flight DMA when the memory region is written
*/
-struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
size_t size, int access, int dmasync)
{
+ struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
@@ -95,6 +96,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct scatterlist *sg, *sg_list_start;
unsigned int gup_flags = FOLL_WRITE;
+ if (!udata)
+ return ERR_PTR(-EIO);
+
+ context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ if (!context)
+ return ERR_PTR(-EIO);
+
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -160,15 +169,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- down_write(&mm->mmap_sem);
- if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
- (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
- up_write(&mm->mmap_sem);
+ new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ atomic64_sub(npages, &mm->pinned_vm);
ret = -ENOMEM;
goto out;
}
- mm->pinned_vm = new_pinned;
- up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
@@ -228,9 +234,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem_release:
__ib_umem_release(context->device, umem, 0);
vma:
- down_write(&mm->mmap_sem);
- mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&mm->mmap_sem);
+ atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
if (vma_list)
free_page((unsigned long) vma_list);
@@ -253,25 +257,12 @@ static void __ib_umem_release_tail(struct ib_umem *umem)
kfree(umem);
}
-static void ib_umem_release_defer(struct work_struct *work)
-{
- struct ib_umem *umem = container_of(work, struct ib_umem, work);
-
- down_write(&umem->owning_mm->mmap_sem);
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&umem->owning_mm->mmap_sem);
-
- __ib_umem_release_tail(umem);
-}
-
/**
* ib_umem_release - release memory pinned with ib_umem_get
* @umem: umem struct to release
*/
void ib_umem_release(struct ib_umem *umem)
{
- struct ib_ucontext *context = umem->context;
-
if (umem->is_odp) {
ib_umem_odp_release(to_ib_umem_odp(umem));
__ib_umem_release_tail(umem);
@@ -280,26 +271,7 @@ void ib_umem_release(struct ib_umem *umem)
__ib_umem_release(umem->context->device, umem, 1);
- /*
- * We may be called with the mm's mmap_sem already held. This
- * can happen when a userspace munmap() is the call that drops
- * the last reference to our file and calls our release
- * method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting a workqueue.
- */
- if (context->closing) {
- if (!down_write_trylock(&umem->owning_mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_release_defer);
- queue_work(ib_wq, &umem->work);
- return;
- }
- } else {
- down_write(&umem->owning_mm->mmap_sem);
- }
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&umem->owning_mm->mmap_sem);
-
+ atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
__ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);