aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2020-06-08 21:33:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 09:39:14 -0700
commitd8ed45c5dcd455fc5848d47f86883a1b872ac0d0 (patch)
treef9270b32da5f3f7be73b086c99d3dfc29a13161a /drivers
parentDMA reservations: use the new mmap locking API (diff)
downloadlinux-dev-d8ed45c5dcd455fc5848d47f86883a1b872ac0d0.tar.xz
linux-dev-d8ed45c5dcd455fc5848d47f86883a1b872ac0d0.zip
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder_alloc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/iommu/amd_iommu_v2.c4
-rw-r--r--drivers/iommu/intel-svm.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c4
-rw-r--r--drivers/misc/cxl/cxllib.c4
-rw-r--r--drivers/misc/cxl/fault.c4
-rw-r--r--drivers/misc/sgi-gru/grufault.c16
-rw-r--r--drivers/misc/sgi-gru/grufile.c4
-rw-r--r--drivers/oprofile/buffer_sync.c10
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c4
-rw-r--r--drivers/tee/optee/call.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c8
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/privcmd.c14
32 files changed, 99 insertions, 99 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2d8b9b91dee0..cbdc43ed0f9f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = alloc->vma;
}
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,7 +303,7 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
@@ -932,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
+ if (!mmap_read_trylock(mm))
goto err_down_read_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
@@ -946,7 +946,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 68e6e1bc8f3a..b91b5171270f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1393,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
is_invalid_userptr = atomic_read(&mem->invalid);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
mutex_lock(&mem->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9cbecd5ba814..e59c01a83dac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -910,7 +910,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out_free_ranges;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
@@ -921,15 +921,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
r = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
@@ -954,7 +954,7 @@ retry:
return 0;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_free_pfns:
kvfree(range->hmm_pfns);
out_free_ranges:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 15476fca8fa6..a9583b95fcc1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
@@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NoExecute = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 70f5f82da288..fe45bd4d63a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
@@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 33776b3f3fa5..81f7656f41c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -200,7 +200,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (IS_ERR(mn))
err = PTR_ERR(mn);
- down_write(&mm->mm->mmap_sem);
+ mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
/* Protected by mmap_sem (write-lock) */
@@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ mmap_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
kfree(mn);
@@ -468,7 +468,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (mmget_not_zero(mm)) {
while (pinned < npages) {
if (!locked) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = 1;
}
ret = pin_user_pages_remote
@@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pinned += ret;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 22f054f7ee3e..ba9f9359c30e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -175,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!cli->svm.svmm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return -EINVAL;
}
@@ -205,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
args->result = 0;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
@@ -355,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
@@ -364,12 +364,12 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
cli->svm.svmm = svmm;
cli->svm.cli = cli;
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
@@ -571,9 +571,9 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(&range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
/*
* FIXME: the input PFN_REQ flags are destroyed on
@@ -705,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0d0ab8e0ff3b..cc31d187042e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 068c3e5da173..3c8f570a20ee 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -342,17 +342,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_reserve(bo, true);
if (r) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (r)
goto release_object;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ad30b112982..63f55685c3d8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
(void) dma_fence_wait(bo->moving, true);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
@@ -144,7 +144,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 3b1e627d9a8d..ccd28405451c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -429,7 +429,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
PAGE_SIZE / sizeof(struct page *));
- down_read(&owning_mm->mmap_sem);
+ mmap_read_lock(owning_mm);
/*
* Note: this might result in redundent page getting. We can
* avoid this by checking dma_list to be 0 before calling
@@ -440,7 +440,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL);
- up_read(&owning_mm->mmap_sem);
+ mmap_read_unlock(owning_mm);
if (npages < 0) {
if (npages != -EAGAIN)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 47794c85e9af..3c7367312398 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -840,7 +840,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!mmget_still_valid(mm))
goto skip_mm;
mutex_lock(&ufile->umap_lock);
@@ -862,7 +862,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e2fb71b23c80..7e0b205c05eb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -380,7 +380,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
@@ -395,7 +395,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return ib_umem_get(device, start, length, access_flags);
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 342e3172ca40..4c24e83f3175 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -106,18 +106,18 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail;
}
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
for (got = 0; got < num_pages; got += ret) {
ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto bail_release;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return 0;
bail_release:
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index bd9f944b68fc..760b254ba42d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -123,7 +123,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -187,7 +187,7 @@ out:
} else
mmgrab(uiomr->owning_mm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
free_page((unsigned long) page_list);
return ret;
}
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 87117781d637..34a910cf0edb 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -394,7 +394,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
if (!writable)
foll_flags |= FOLL_FORCE;
- down_read(&mm_s->mmap_sem);
+ mmap_read_lock(mm_s);
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -438,7 +438,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
num_pages -= got;
}
out_sem_up:
- up_read(&mm_s->mmap_sem);
+ mmap_read_unlock(mm_s);
if (rv > 0)
return umem;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index c8a7b6b39222..e4b025c5637c 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -485,7 +485,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_WRITE;
flags |= FAULT_FLAG_REMOTE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
/* failed to get a vma in the right range */
@@ -497,7 +497,7 @@ static void do_fault(struct work_struct *work)
ret = handle_mm_fault(vma, address, flags);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret & VM_FAULT_ERROR)
/* failed to service fault */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index a035ef911fba..6c87c807a0ab 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -863,7 +863,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!mmget_not_zero(svm->mm))
goto bad_req;
- down_read(&svm->mm->mmap_sem);
+ mmap_read_lock(svm->mm);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
goto invalid;
@@ -878,7 +878,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
result = QI_RESP_SUCCESS;
invalid:
- up_read(&svm->mm->mmap_sem);
+ mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 2686f03b322e..5c91fc3e65b5 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -535,7 +535,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (b->memory == V4L2_MEMORY_MMAP)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
videobuf_queue_lock(q);
retval = -EBUSY;
@@ -622,7 +622,7 @@ done:
videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return retval;
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index aeb2f497c683..52312ce2ba05 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -169,7 +169,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, untagged_baddr);
if (!vma)
@@ -201,7 +201,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
}
out_up:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 5999aad17ed0..672963808ab9 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -200,9 +200,9 @@ static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
{
int ret;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 258c43a95ac3..a2c0fbb5eb28 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -207,7 +207,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
struct vm_area_struct *vma = NULL;
int rc = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
@@ -218,7 +218,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
*vma_start = vma->vm_start;
*vma_end = vma->vm_end;
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return rc;
}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 2297e6fc1544..01153b74334a 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -321,7 +321,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
return;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
@@ -336,7 +336,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
last_esid = slb.esid;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index ddd7312e7c0c..8f4e2895b565 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -68,14 +68,14 @@ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (gts)
mutex_lock(&gts->ts_ctxlock);
else
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return gts;
}
@@ -85,7 +85,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = gru_find_vma(vaddr);
if (!vma)
goto err;
@@ -94,11 +94,11 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
if (IS_ERR(gts))
goto err;
mutex_lock(&gts->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
+ mmap_write_downgrade(mm);
return gts;
err:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return gts;
}
@@ -108,7 +108,7 @@ err:
static void gru_unlock_gts(struct gru_thread_state *gts)
{
mutex_unlock(&gts->ts_ctxlock);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
/*
@@ -574,9 +574,9 @@ static irqreturn_t gru_intr(int chiplet, int blade)
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
- down_read_trylock(&gts->ts_mm->mmap_sem)) {
+ mmap_read_trylock(gts->ts_mm)) {
gru_try_dropin(gru, gts, tfh, NULL);
- up_read(&gts->ts_mm->mmap_sem);
+ mmap_read_unlock(gts->ts_mm);
} else {
tfh_user_polling_mode(tfh);
STAT(intr_mm_lock_failed);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 9d042310214f..93bb49ddda1f 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -135,7 +135,7 @@ static int gru_create_new_context(unsigned long arg)
if (!(req.options & GRU_OPT_MISS_MASK))
req.options |= GRU_OPT_MISS_FMM_INTR;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
vma = gru_find_vma(req.gseg);
if (vma) {
vdata = vma->vm_private_data;
@@ -146,7 +146,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ac27f3d3fbb4..d3b017af7758 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -91,11 +91,11 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
struct mm_struct *mm = current->mm;
struct vm_area_struct *mpnt;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
mpnt = find_vma(mm, addr);
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
@@ -103,7 +103,7 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
return 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
}
@@ -256,7 +256,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -276,7 +276,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return cookie;
}
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 7caabdd77bbf..89753463e926 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -75,9 +75,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
}
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
- down_read(&current->mm->mmap_sem); /* get memory map semaphore */
+ mmap_read_lock(current->mm); /* get memory map semaphore */
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
- up_read(&current->mm->mmap_sem); /* release the semaphore */
+ mmap_read_unlock(current->mm); /* release the semaphore */
if (rv != acd->page_count) {
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
goto err_get_user_pages;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index dbed3f480dc0..20b6fd7383c5 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -565,10 +565,10 @@ static int check_mem_type(unsigned long start, size_t num_pages)
if (virt_addr_valid(start))
return 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return rc;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 391fafe82c5c..186acd8675ff 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -376,11 +376,11 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
if (!mm)
return -ESRCH; /* process exited */
- ret = down_write_killable(&mm->mmap_sem);
+ ret = mmap_write_lock_killable(mm);
if (!ret) {
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
dma->lock_cap);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
if (async)
@@ -452,7 +452,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
page, NULL, NULL);
if (ret == 1) {
@@ -475,7 +475,7 @@ retry:
ret = -EFAULT;
}
done:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 0968361e3b77..40e2a5747c98 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -527,7 +527,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (!npages)
return -EINVAL;
- down_read(&dev->mm->mmap_sem);
+ mmap_read_lock(dev->mm);
locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -580,7 +580,7 @@ out:
vhost_vdpa_unmap(v, msg->iova, msg->size);
atomic64_sub(npages, &dev->mm->pinned_vm);
}
- up_read(&dev->mm->mmap_sem);
+ mmap_read_unlock(dev->mm);
free_page((unsigned long)page_list);
return ret;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 50651e566564..3a939e358284 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -625,7 +625,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, op.vaddr);
if (!vma || vma->vm_ops != &gntdev_vmops)
goto out_unlock;
@@ -639,7 +639,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
rv = 0;
out_unlock:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 989ae9bd02c0..a250d118144a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -276,7 +276,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
if (rc || list_empty(&pagelist))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
{
struct page *page = list_first_entry(&pagelist,
@@ -301,7 +301,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
out:
free_page_list(&pagelist);
@@ -497,7 +497,7 @@ static long privcmd_ioctl_mmap_batch(
}
}
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = find_vma(mm, m.addr);
if (!vma ||
@@ -553,7 +553,7 @@ static long privcmd_ioctl_mmap_batch(
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state));
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (state.global_error) {
/* Write back errors in second pass. */
@@ -574,7 +574,7 @@ out:
return ret;
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out;
}
@@ -739,7 +739,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
@@ -818,7 +818,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
}
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
kfree(pfns);
return rc;