aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..0ce968b00b7c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -109,7 +109,7 @@ int pgd_huge(pgd_t pgd)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
/* Only called for hugetlbfs pages, hence can ignore THP */
- return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
+ return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
}
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -681,28 +682,35 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} while (addr = next, addr != end);
}
+/*
+ * We are holding mmap_sem, so a parallel huge page collapse cannot run.
+ * To prevent hugepage split, disable irq.
+ */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
pte_t *ptep;
struct page *page;
unsigned shift;
- unsigned long mask;
+ unsigned long mask, flags;
/*
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
/* Verify it is a huge page else bail. */
- if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
+ if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
+ local_irq_restore(flags);
return ERR_PTR(-EINVAL);
-
+ }
mask = (1UL << shift) - 1;
page = pte_page(*ptep);
if (page)
page += (address & mask) / PAGE_SIZE;
+ local_irq_restore(flags);
return page;
}
@@ -949,9 +957,12 @@ void flush_dcache_icache_hugepage(struct page *page)
*
* So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it.
+ * This function need to be called with interrupts disabled. We use this variant
+ * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/
-pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
+pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift)
{
pgd_t pgd, *pgdp;
pud_t pud, *pudp;
@@ -964,7 +975,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0;
pgdp = pgdir + pgd_index(ea);
- pgd = ACCESS_ONCE(*pgdp);
+ pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
@@ -1003,12 +1014,11 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
*
- * A hugepage split is captured by pmd_trans_splitting
- * because we mark the pmd trans splitting and do a
- * hpte invalidate
- *
+ * We don't worry about pmd_trans_splitting here, The
+ * caller if it needs to handle the splitting case
+ * should check for that.
*/
- if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ if (pmd_none(pmd))
return NULL;
if (pmd_huge(pmd) || pmd_large(pmd)) {
@@ -1030,7 +1040,7 @@ out:
*shift = pdshift;
return ret_pte;
}
-EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
+EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
@@ -1045,7 +1055,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if (pte_end < end)
end = pte_end;
- pte = ACCESS_ONCE(*ptep);
+ pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_USER;
if (write)
mask |= _PAGE_RW;