aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2022-10-04 18:17:06 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-10-07 14:28:40 -0700
commitecfbd733878da48ed03a5b8a9c301366a03e3cca (patch)
treefd047a5496b89d089bc1fd721ea3536901a98ef9 /mm/hugetlb.c
parenthugetlb: fix vma lock handling during split vma and range unmapping (diff)
downloadlinux-dev-ecfbd733878da48ed03a5b8a9c301366a03e3cca.tar.xz
linux-dev-ecfbd733878da48ed03a5b8a9c301366a03e3cca.zip
hugetlb: take hugetlb vma_lock when clearing vma_lock->vma pointer
hugetlb file truncation/hole punch code may need to back out and take locks in order in the routine hugetlb_unmap_file_folio(). This code could race with vma freeing as pointed out in [1] and result in accessing a stale vma pointer. To address this, take the vma_lock when clearing the vma_lock->vma pointer. [1] https://lore.kernel.org/linux-mm/01f10195-7088-4462-6def-909549c75ef4@huawei.com/ [mike.kravetz@oracle.com: address build issues] Link: https://lkml.kernel.org/r/Yz5L1uxQYR1VqFtJ@monkey Link: https://lkml.kernel.org/r/20221005011707.514612-3-mike.kravetz@oracle.com Fixes: "hugetlb: use new vma_lock for pmd sharing synchronization" Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: James Houghton <jthoughton@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Prakash Sangappa <prakash.sangappa@oracle.com> Cc: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 45e305d182f6..01f3e36caa6c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -93,6 +93,7 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -5188,8 +5189,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* be asynchrously deleted. If the page tables are shared, there
* will be issues when accessed by someone else.
*/
- hugetlb_vma_unlock_write(vma);
- hugetlb_vma_lock_free(vma);
+ __hugetlb_vma_unlock_write_free(vma);
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
@@ -6828,6 +6828,30 @@ void hugetlb_vma_lock_release(struct kref *kref)
kfree(vma_lock);
}
+void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+{
+ struct vm_area_struct *vma = vma_lock->vma;
+
+ /*
+ * vma_lock structure may or not be released as a result of put,
+ * it certainly will no longer be attached to vma so clear pointer.
+ * Semaphore synchronizes access to vma_lock->vma field.
+ */
+ vma_lock->vma = NULL;
+ vma->vm_private_data = NULL;
+ up_write(&vma_lock->rw_sema);
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+}
+
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ __hugetlb_vma_unlock_write_put(vma_lock);
+ }
+}
+
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
/*
@@ -6839,14 +6863,8 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
if (vma->vm_private_data) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- /*
- * vma_lock structure may or not be released, but it
- * certainly will no longer be attached to vma so clear
- * pointer.
- */
- vma_lock->vma = NULL;
- kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
- vma->vm_private_data = NULL;
+ down_write(&vma_lock->rw_sema);
+ __hugetlb_vma_unlock_write_put(vma_lock);
}
}
@@ -6997,6 +7015,10 @@ void hugetlb_vma_lock_release(struct kref *kref)
{
}
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+}
+
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{
}