aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-08-16 05:41:41 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-08-20 21:22:14 +1000
commit4c1616ef036ffaaea95a29d7b6abf9d3e8eb9d92 (patch)
tree10415f5ec7202569616b2c663913d930982fa34f /arch/powerpc/mm/nohash/book3e_hugetlbpage.c
parentpowerpc/mm: define empty update_mmu_cache() as static inline (diff)
downloadlinux-dev-4c1616ef036ffaaea95a29d7b6abf9d3e8eb9d92.tar.xz
linux-dev-4c1616ef036ffaaea95a29d7b6abf9d3e8eb9d92.zip
powerpc/mm: move FSL_BOOK3 version of update_mmu_cache()
Move FSL_BOOK3E version of update_mmu_cache() at the same place as book3e_hugetlb_preload() as update_mmu_cache() is the only user of book3e_hugetlb_preload(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/4d69fdc86df9c74adc71a60331a86f6afb8b5e9e.1565933217.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/mm/nohash/book3e_hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/nohash/book3e_hugetlbpage.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
index 61915f4d3c7f..8b88be91b622 100644
--- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
@@ -122,8 +122,8 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
return found;
}
-void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
- pte_t pte)
+static void
+book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
{
unsigned long mas1, mas2;
u64 mas7_3;
@@ -183,6 +183,18 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
local_irq_restore(flags);
}
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ *
+ * This must always be called with the pte lock held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ if (is_vm_hugetlb_page(vma))
+ book3e_hugetlb_preload(vma, address, *ptep);
+}
+
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct hstate *hstate = hstate_file(vma->vm_file);