aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h14
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h22
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h42
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h56
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h15
-rw-r--r--arch/powerpc/include/asm/cpu_setup.h49
-rw-r--r--arch/powerpc/include/asm/cpu_setup_power.h12
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/cputime.h2
-rw-r--r--arch/powerpc/include/asm/dtl.h8
-rw-r--r--arch/powerpc/include/asm/hugetlb.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h50
-rw-r--r--arch/powerpc/include/asm/interrupt.h41
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/kfence.h15
-rw-r--r--arch/powerpc/include/asm/kgdb.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h10
-rw-r--r--arch/powerpc/include/asm/machdep.h1
-rw-r--r--arch/powerpc/include/asm/mmu.h11
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-85xx.h (renamed from arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h)6
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/hugetlb-e500.h (renamed from arch/powerpc/include/asm/nohash/hugetlb-book3e.h)8
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-e500.h (renamed from arch/powerpc/include/asm/nohash/mmu-book3e.h)0
-rw-r--r--arch/powerpc/include/asm/nohash/mmu.h4
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h30
-rw-r--r--arch/powerpc/include/asm/nohash/pte-e500.h (renamed from arch/powerpc/include/asm/nohash/pte-book3e.h)6
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/opal.h6
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/page.h8
-rw-r--r--arch/powerpc/include/asm/paravirt.h12
-rw-r--r--arch/powerpc/include/asm/paravirt_api_clock.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h19
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h52
-rw-r--r--arch/powerpc/include/asm/processor.h21
-rw-r--r--arch/powerpc/include/asm/ps3av.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h9
-rw-r--r--arch/powerpc/include/asm/reg_booke.h6
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/runlatch.h6
-rw-r--r--arch/powerpc/include/asm/sections.h17
-rw-r--r--arch/powerpc/include/asm/setup.h5
-rw-r--r--arch/powerpc/include/asm/synch.h2
-rw-r--r--arch/powerpc/include/asm/syscall.h11
-rw-r--r--arch/powerpc/include/asm/syscall_wrapper.h49
-rw-r--r--arch/powerpc/include/asm/syscalls.h158
-rw-r--r--arch/powerpc/include/asm/syscalls_32.h60
-rw-r--r--arch/powerpc/include/asm/termios.h18
-rw-r--r--arch/powerpc/include/asm/time.h5
-rw-r--r--arch/powerpc/include/asm/udbg.h54
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/asm/vdso.h3
-rw-r--r--arch/powerpc/include/asm/vdso/processor.h8
-rw-r--r--arch/powerpc/include/asm/vdso/timebase.h2
-rw-r--r--arch/powerpc/include/asm/xics.h1
68 files changed, 669 insertions, 360 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 81631e64dbeb..274bce76f5da 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -36,6 +36,7 @@ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
int64_t opcode, uint64_t msr);
/* misc runtime */
+void enable_machine_check(void);
extern u64 __bswapdi2(u64);
extern s64 __lshrdi3(s64, int);
extern s64 __ashldi3(s64, int);
@@ -55,19 +56,6 @@ struct kvm_vcpu;
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
-/* Patch sites */
-extern s32 patch__call_flush_branch_caches1;
-extern s32 patch__call_flush_branch_caches2;
-extern s32 patch__call_flush_branch_caches3;
-extern s32 patch__flush_count_cache_return;
-extern s32 patch__flush_link_stack_return;
-extern s32 patch__call_kvm_flush_link_stack;
-extern s32 patch__call_kvm_flush_link_stack_p9;
-extern s32 patch__memset_nocache, patch__memcpy_nocache;
-
-extern long flush_branch_caches;
-extern long kvm_flush_link_stack;
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index ef2d8b15eaab..e80b2c0e9315 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -86,7 +86,7 @@ do { \
#ifdef CONFIG_PPC_BOOK3S_64
#define NOSPEC_BARRIER_SLOT nop
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
#define NOSPEC_BARRIER_SLOT nop; nop
#endif
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 40041ac713d9..75823f39e042 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -112,31 +112,11 @@ static inline bool pte_user(pte_t pte)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-
-/* Advertise special mapping type for AGP */
-#define PAGE_AGP (PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
#define PTE_INDEX_SIZE PTE_SHIFT
#define PMD_INDEX_SIZE 0
#define PUD_INDEX_SIZE 0
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e1af0b394ceb..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
/*
* Early pud pages allocated via memblock allocator
- * can't be directly freed to slab
+ * can't be directly freed to slab. KFENCE pages have
+ * both reserved and slab flags set so need to be freed
+ * kmem_cache_free.
*/
- if (PageReserved(page))
+ if (PageReserved(page) && !PageSlab(page))
free_reserved_page(page);
else
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index 4e697bc2f4cd..48f21820afe2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -26,16 +26,6 @@ static inline int pud_huge(pud_t pud)
return 0;
}
-static inline int pgd_huge(pgd_t pgd)
-{
- /*
- * leaf pte for huge page
- */
- if (radix_enabled())
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
- return 0;
-}
-#define pgd_huge pgd_huge
/*
* With radix , we have hugepage ptes in the pud and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index 34d1018896b3..2fce3498b000 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -30,15 +30,6 @@ static inline int pud_huge(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
-static inline int pgd_huge(pgd_t pgd)
-{
- /*
- * leaf pte for huge page
- */
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
-}
-#define pgd_huge pgd_huge
-
/*
* With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 392ff48f77df..c436d8422654 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -117,8 +117,7 @@
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
-#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
- _PAGE_RW | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
/*
* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
@@ -151,33 +150,17 @@
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
+#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
-#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_TOLERANT)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NON_IDEMPOTENT)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-#define PAGE_AGP (PAGE_KERNEL_NC)
-
#ifndef __ASSEMBLY__
/*
* page table defines
@@ -333,9 +316,6 @@ extern unsigned long pci_io_base;
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
-/* Advertise special mapping type for AGP */
-#define HAVE_PAGE_AGP
-
#ifndef __ASSEMBLY__
/*
@@ -411,6 +391,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
* event of it not getting flushed for a long time the delay
* shouldn't really matter because there's no real memory
* pressure for swapout to react to. ]
+ *
+ * Note: this optimisation also exists in pte_needs_flush() and
+ * huge_pmd_needs_flush().
*/
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young ptep_test_and_clear_young
@@ -1123,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (radix_enabled())
@@ -1457,12 +1440,5 @@ static inline bool pud_is_leaf(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
-#define p4d_is_leaf p4d_is_leaf
-#define p4d_leaf p4d_is_leaf
-static inline bool p4d_is_leaf(p4d_t p4d)
-{
- return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE));
-}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 8b762f282190..fab8332fe1ad 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -112,13 +112,11 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
-void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Private function for use by PCI IO mapping code */
extern void __flush_hash_table_range(unsigned long start, unsigned long end);
-extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr);
+void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
#else
static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 206f920fe5b9..67655cd60545 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -163,6 +163,62 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
*/
}
+static inline bool __pte_flags_need_flush(unsigned long oldval,
+ unsigned long newval)
+{
+ unsigned long delta = oldval ^ newval;
+
+ /*
+ * The return value of this function doesn't matter for hash,
+ * ptep_modify_prot_start() does a pte_update() which does or schedules
+ * any necessary hash table update and flush.
+ */
+ if (!radix_enabled())
+ return true;
+
+ /*
+ * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+ */
+ VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
+
+ /*
+ * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
+ *
+ * In theory, some changed software bits could be tolerated, in
+ * practice those should rarely if ever matter.
+ */
+
+ if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
+ return true;
+
+ /*
+ * If any of the above was present in old but cleared in new, flush.
+ * With the exception of _PAGE_ACCESSED, don't worry about flushing
+ * if that was cleared (see the comment in ptep_clear_flush_young()).
+ */
+ if ((delta & ~_PAGE_ACCESSED) & oldval)
+ return true;
+
+ return false;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
+}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
+
extern bool tlbie_capable;
extern bool tlbie_enabled;
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index e8269434ecbe..d18b748ea3ae 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
-#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
@@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
-#else
-static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
-#endif
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+ if (radix_enabled())
+ return;
+ __update_mmu_cache(vma, address, ptep);
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/cpu_setup.h b/arch/powerpc/include/asm/cpu_setup.h
new file mode 100644
index 000000000000..30e2fe389502
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_setup.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_CPU_SETUP_H
+#define _ASM_POWERPC_CPU_SETUP_H
+void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_power7(void);
+void __restore_cpu_power8(void);
+void __restore_cpu_power9(void);
+void __restore_cpu_power10(void);
+
+void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440ep(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440epx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440gx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440grx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440spe(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440x5(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460ex(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460gt(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_603(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_604(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750cx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750fx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7400(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7410(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_745x(unsigned long offset, struct cpu_spec *spec);
+
+void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_pa6t(void);
+void __restore_cpu_ppc970(void);
+
+void __setup_cpu_e5500(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e6500(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_e5500(void);
+void __restore_cpu_e6500(void);
+#endif /* _ASM_POWERPC_CPU_SETUP_H */
diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h
deleted file mode 100644
index 24be9131f803..000000000000
--- a/arch/powerpc/include/asm/cpu_setup_power.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2020 IBM Corporation
- */
-void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec);
-void __restore_cpu_power7(void);
-void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec);
-void __restore_cpu_power8(void);
-void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec);
-void __restore_cpu_power9(void);
-void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec);
-void __restore_cpu_power10(void);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index ae8c3e13cfce..757dbded11dc 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -463,7 +463,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2)
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500)
#else
#ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -510,7 +510,7 @@ enum {
#elif defined(CONFIG_44x)
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 |
#endif
#ifdef CONFIG_PPC_E500MC
@@ -521,7 +521,7 @@ enum {
#endif /* __powerpc64__ */
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
#else
@@ -584,7 +584,7 @@ enum {
#elif defined(CONFIG_44x)
CPU_FTRS_44X & CPU_FTRS_440x6 &
#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 &
#endif
#ifdef CONFIG_PPC_E500MC
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 6d2b27997492..431ae2343022 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -95,7 +95,7 @@ static notrace inline void account_stolen_time(void)
struct lppaca *lp = local_paca->lppaca_ptr;
if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
- accumulate_stolen_time();
+ pseries_accumulate_stolen_time();
}
#endif
}
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
index 1625888f27ef..4bcb9f9ac764 100644
--- a/arch/powerpc/include/asm/dtl.h
+++ b/arch/powerpc/include/asm/dtl.h
@@ -37,14 +37,6 @@ struct dtl_entry {
extern struct kmem_cache *dtl_cache;
extern rwlock_t dtl_access_lock;
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
- * reading from the dispatch trace log. If other code wants to consume
- * DTL entries, it can set this pointer to a function that will get
- * called once for each DTL entry that gets processed.
- */
-extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
-
extern void register_dtl_buffer(int cpu);
extern void alloc_dtl_buffers(unsigned long *time_limit);
extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 32ce0fb7548f..ea71f7245a63 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -7,8 +7,8 @@
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h>
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
-#include <asm/nohash/hugetlb-book3e.h>
+#elif defined(CONFIG_PPC_E500)
+#include <asm/nohash/hugetlb-e500.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/hugetlb-8xx.h>
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 983551859891..77fa88c2aed0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -157,36 +157,18 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
- unsigned long flags;
-
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(mask && !(mask & IRQS_DISABLED));
-#endif
+ unsigned long flags = irq_soft_mask_return();
- asm volatile(
- "lbz %0,%1(13); stb %2,%1(13)"
- : "=&r" (flags)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ irq_soft_mask_set(mask);
return flags;
}
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
- unsigned long flags, tmp;
-
- asm volatile(
- "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
- : "=&r" (flags), "=r" (tmp)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ unsigned long flags = irq_soft_mask_return();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
-#endif
+ irq_soft_mask_set(flags | mask);
return flags;
}
@@ -489,6 +471,30 @@ static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned l
}
#endif /* CONFIG_PPC64 */
+static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
+{
+#ifdef CONFIG_PPC64
+ if (arch_irqs_disabled()) {
+ /*
+ * With soft-masking, MSR[EE] can change from 1 to 0
+ * asynchronously when irqs are disabled, and we don't want to
+ * set MSR[EE] back to 1 here if that has happened. A race-free
+ * way to do this is ensure EE is already 0. Another way it
+ * could be done is with a RESTART_TABLE handler, but that's
+ * probably overkill here.
+ */
+ msr &= ~MSR_EE;
+ mtmsr_isync(msr);
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else
+#endif
+ mtmsr_isync(msr);
+
+ return msr;
+}
+
+
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 8069dbc4b8d1..4745bb9998bd 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -74,6 +74,19 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
+#ifdef CONFIG_PPC64
+/*
+ * WARN/BUG is handled with a program interrupt so minimise checks here to
+ * avoid recursion and maximise the chance of getting the first oops handled.
+ */
+#define INT_SOFT_MASK_BUG_ON(regs, cond) \
+do { \
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \
+ (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
+ BUG_ON(cond); \
+} while (0)
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
bool search_kernel_soft_mask_table(unsigned long addr);
@@ -170,8 +183,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* context.
*/
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!(regs->msr & MSR_EE));
+ INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));
__hard_irq_enable();
} else {
__hard_RI_enable();
@@ -194,19 +206,15 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM) {
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(is_implicit_soft_masked(regs));
- }
-
- /* Move this under a debugging check */
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
- arch_irq_disabled_regs(regs))
- BUG_ON(search_kernel_restart_table(regs->nip));
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
+ CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
+ ct_state() != CONTEXT_IDLE);
+ INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
+ INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
+ search_kernel_restart_table(regs->nip));
}
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
+ INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
+ !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0();
@@ -281,7 +289,7 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs)
if (TRAP(regs) == INTERRUPT_PERFMON)
return false;
}
- if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
if (TRAP(regs) == INTERRUPT_PERFMON)
return false;
}
@@ -665,8 +673,7 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
local_irq_enable();
}
-long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8,
- unsigned long r0, struct pt_regs *regs);
+long system_call_exception(struct pt_regs *regs, unsigned long r0);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f8d122d16af4..a1ddba01e7d1 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -3,7 +3,7 @@
#define _ASM_POWERPC_KEXEC_H
#ifdef __KERNEL__
-#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x)
+#if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x)
/*
* On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..6fd2b4d486c5 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,25 @@
#include <linux/mm.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+#define ARCH_FUNC_PREFIX "."
+#endif
+
static inline bool arch_kfence_init_pool(void)
{
return true;
}
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page = virt_to_page(addr);
+
+ __kernel_map_pages(page, 1, !protect);
+
+ return true;
+}
+#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#endif
#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
index a9e098a3b881..715c18b75334 100644
--- a/arch/powerpc/include/asm/kgdb.h
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -52,7 +52,7 @@ static inline void arch_kgdb_breakpoint(void)
/* On non-E500 family PPC32 we determine the size by picking the last
* register we need, but on E500 we skip sections so we list what we
* need to store, and add it up. */
-#ifndef CONFIG_E500
+#ifndef CONFIG_PPC_E500
#define MAXREG (PT_FPSCR+1)
#else
/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c2b003550dc9..caea15dcb91d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -443,7 +443,7 @@ struct kvmppc_passthru_irqmap {
};
#endif
-# ifdef CONFIG_PPC_FSL_BOOK3E
+# ifdef CONFIG_PPC_E500
#define KVMPPC_BOOKE_IAC_NUM 2
#define KVMPPC_BOOKE_DAC_NUM 2
# else
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9f625af3b65b..bfacf12784dd 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -104,7 +104,6 @@ extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
unsigned int gtlb_idx);
-extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
@@ -153,7 +152,6 @@ extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
-extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
@@ -162,8 +160,6 @@ extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
extern void kvmppc_rmap_reset(struct kvm *kvm);
-extern long kvmppc_prepare_vrma(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index c390ec377bae..34d44cb17c87 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -104,14 +104,18 @@ struct lppaca {
volatile __be32 dispersion_count; /* dispatch changed physical cpu */
volatile __be64 cmo_faults; /* CMO page fault count */
volatile __be64 cmo_fault_time; /* CMO page fault time */
- u8 reserved10[104];
+ u8 reserved10[64]; /* [S]PURR expropriated/donated */
+ volatile __be64 enqueue_dispatch_tb; /* Total TB enqueue->dispatch */
+ volatile __be64 ready_enqueue_tb; /* Total TB ready->enqueue */
+ volatile __be64 wait_ready_tb; /* Total TB wait->ready */
+ u8 reserved11[16];
/* cacheline 4-5 */
__be32 page_ins; /* CMO Hint - # page ins by OS */
- u8 reserved11[148];
+ u8 reserved12[148];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
- u8 reserved12[96];
+ u8 reserved13[96];
} ____cacheline_aligned;
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8cb83600c434..378b8d5836a7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -204,7 +204,6 @@ struct machdep_calls {
extern void e500_idle(void);
extern void power4_idle(void);
extern void ppc6xx_idle(void);
-extern void book3e_idle(void);
/*
* ppc_md contains a copy of the machine description structure for the
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 860d0290ca4d..94b981152667 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -120,6 +120,9 @@
*/
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
+// NX paste RMA reject in DSI
+#define MMU_FTR_NX_DSI ASM_CONST(0x80000000)
+
/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
#define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2
@@ -141,7 +144,7 @@
typedef pte_t *pgtable_t;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#include <asm/percpu.h>
DECLARE_PER_CPU(int, next_tlbcam_idx);
#endif
@@ -162,7 +165,7 @@ enum {
#elif defined(CONFIG_44x)
MMU_FTR_TYPE_44x |
#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
#endif
#ifdef CONFIG_PPC_BOOK3S_32
@@ -181,7 +184,7 @@ enum {
#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
- MMU_FTR_GTSE |
+ MMU_FTR_GTSE | MMU_FTR_NX_DSI |
#endif /* CONFIG_PPC_RADIX_MMU */
#endif
#ifdef CONFIG_PPC_KUAP
@@ -211,7 +214,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 3f25bd3e14eb..c1ea270bb848 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -31,7 +31,6 @@ extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
extern long mm_iommu_put(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm);
-extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
@@ -117,7 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 9091e4904a6b..0d40b33184eb 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -130,10 +130,10 @@ void unmap_kernel_page(unsigned long va);
#include <asm/nohash/32/pte-40x.h>
#elif defined(CONFIG_44x)
#include <asm/nohash/32/pte-44x.h>
-#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
-#include <asm/nohash/pte-book3e.h>
-#elif defined(CONFIG_FSL_BOOKE)
-#include <asm/nohash/32/pte-fsl-booke.h>
+#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
+#include <asm/nohash/pte-e500.h>
+#elif defined(CONFIG_PPC_85xx)
+#include <asm/nohash/32/pte-85xx.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/pte-8xx.h>
#endif
diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
index 0fc1bd42bb3e..93fb8e11a3f1 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
-#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H
#ifdef __KERNEL__
/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
@@ -71,4 +71,4 @@
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 599921cc257e..879e9a6e5a87 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -70,7 +70,7 @@
/*
* Include the PTE bits definitions
*/
-#include <asm/nohash/pte-book3e.h>
+#include <asm/nohash/pte-e500.h>
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
index ecd8694cb229..8f04ad20e040 100644
--- a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h
+++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
+#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H
static inline pte_t *hugepd_page(hugepd_t hpd)
{
@@ -30,7 +30,7 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- /* We use the old format for PPC_FSL_BOOK3E */
+ /* We use the old format for PPC_E500 */
*hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
}
@@ -42,4 +42,4 @@ static inline int check_and_get_huge_psize(int shift)
return shift_to_mmu_psize(shift);
}
-#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */
+#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
index e43a418d3ccd..e43a418d3ccd 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h
index edc793e5f08f..e264be219fdb 100644
--- a/arch/powerpc/include/asm/nohash/mmu.h
+++ b/arch/powerpc/include/asm/nohash/mmu.h
@@ -8,9 +8,9 @@
#elif defined(CONFIG_44x)
/* 44x-style software loaded TLB */
#include <asm/nohash/32/mmu-44x.h>
-#elif defined(CONFIG_PPC_BOOK3E_MMU)
+#elif defined(CONFIG_PPC_E500)
/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
-#include <asm/nohash/mmu-book3e.h>
+#include <asm/nohash/mmu-e500.h>
#elif defined (CONFIG_PPC_8xx)
/* Motorola/Freescale 8xx software loaded TLB */
#include <asm/nohash/32/mmu-8xx.h>
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 29c43665a753..4b62376318e1 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -15,7 +15,7 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
{
}
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* !CONFIG_PPC_BOOK3E_64 */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index b499da6c1a99..d9067dfc531c 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -11,31 +11,11 @@
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-
-/* Advertise special mapping type for AGP */
-#define PAGE_AGP (PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
#ifndef __ASSEMBLY__
/* Generic accessors to PTE bits */
@@ -277,12 +257,6 @@ static inline int pud_huge(pud_t pud)
return 0;
}
-static inline int pgd_huge(pgd_t pgd)
-{
- return 0;
-}
-#define pgd_huge pgd_huge
-
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
@@ -292,7 +266,7 @@ static inline int pgd_huge(pgd_t pgd)
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
*/
-#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
+#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
#else
static inline
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index f798640422c2..0934e8965e4e 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H
+#define _ASM_POWERPC_NOHASH_PTE_E500_H
#ifdef __KERNEL__
/* PTE bit definitions for processors compliant to the Book3E
@@ -126,4 +126,4 @@ static inline pte_t pte_mkexec(pte_t pte)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
+#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
index 698935d4f72d..bdaf34ad41ea 100644
--- a/arch/powerpc/include/asm/nohash/tlbflush.h
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -18,7 +18,7 @@
/*
* TLB flushing for software loaded TLB chips
*
- * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range &
* flush_tlb_kernel_range are best implemented as tlbia vs
* specific tlbie's
*/
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index bfd3142cd0ba..726125a534de 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -324,16 +324,10 @@ extern int opal_flush_console(uint32_t vtermno);
extern void hvc_opal_init_early(void);
-extern int opal_notifier_register(struct notifier_block *nb);
-extern int opal_notifier_unregister(struct notifier_block *nb);
-
extern int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb);
extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
struct notifier_block *nb);
-extern void opal_notifier_enable(void);
-extern void opal_notifier_disable(void);
-extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
extern int opal_async_get_token_interruptible(void);
extern int opal_async_release_token(int token);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 4d7aaab82702..09f1790d0ae1 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -18,7 +18,7 @@
#include <asm/lppaca.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#include <asm/exception-64e.h>
#else
#include <asm/exception-64s.h>
@@ -127,7 +127,7 @@ struct paca_struct {
#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
u64 exgen[8] __aligned(0x40);
/* Keep pgd in the same cacheline as the start of extlb */
pgd_t *pgd __aligned(0x40); /* Current PGD */
@@ -151,7 +151,7 @@ struct paca_struct {
void *dbg_kstack;
struct tlb_core_data tcd;
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
@@ -168,7 +168,7 @@ struct paca_struct {
#ifdef CONFIG_PPC64
u64 exit_save_r1; /* Syscall/interrupt R1 save */
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
u16 trap_save; /* Used when bad stack is encountered */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -263,7 +263,6 @@ struct paca_struct {
u64 l1d_flush_size;
#endif
#ifdef CONFIG_PPC_PSERIES
- struct rtas_args *rtas_args_reentrant;
u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index e5f75c70eda8..edf1dd1b0ca9 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -31,7 +31,7 @@ extern unsigned int hpage_shift;
#define HPAGE_SHIFT hpage_shift
#elif defined(CONFIG_PPC_8xx)
#define HPAGE_SHIFT 19 /* 512k pages */
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
#define HPAGE_SHIFT 22 /* 4M pages */
#endif
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
@@ -308,12 +308,6 @@ static inline bool pfn_valid(unsigned long pfn)
#include <asm/pgtable-types.h>
#endif
-
-#ifndef CONFIG_HUGETLB_PAGE
-#define is_hugepd(pdep) (0)
-#define pgd_huge(pgd) (0)
-#endif /* CONFIG_HUGETLB_PAGE */
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index eb7df559ae74..f5ba1a3c41f8 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -21,6 +21,18 @@ static inline bool is_shared_processor(void)
return static_branch_unlikely(&shared_processor);
}
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 pseries_paravirt_steal_clock(int cpu);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return pseries_paravirt_steal_clock(cpu);
+}
+#endif
+
/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
static inline u32 yield_count_of(int cpu)
{
diff --git a/arch/powerpc/include/asm/paravirt_api_clock.h b/arch/powerpc/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..d25ca7ac57c7
--- /dev/null
+++ b/arch/powerpc/include/asm/paravirt_api_clock.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/paravirt.h>
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index b169bbf95fcb..82633200b500 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -101,6 +101,7 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev;
}
+#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { __be64 pdbe; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
@@ -108,5 +109,6 @@ static inline unsigned long hpd_val(hugepd_t x)
{
return be64_to_cpu(x.pdbe);
}
+#endif
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index efed0db7b1db..082c85cc09b1 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -83,11 +83,13 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
}
#endif
+#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
static inline unsigned long hpd_val(hugepd_t x)
{
return x.pd;
}
+#endif
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 33f4bf8d22b0..283f40d05a4d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,6 +20,25 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
#ifndef __ASSEMBLY__
#ifndef MAX_PTRS_PER_PGD
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c6d724104ed1..21e33e46f4b8 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -330,6 +330,7 @@
#define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21)
#define __PPC_XTP(s) __PPC_XSP(s)
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
+#define __PPC_PL(p) (((p) & 0x3) << 16)
#define __PPC_WC(w) (((w) & 0x3) << 21)
#define __PPC_WS(w) (((w) & 0x1f) << 11)
#define __PPC_SH(s) __PPC_WS(s)
@@ -388,7 +389,8 @@
#define PPC_RAW_RFDI (0x4c00004e)
#define PPC_RAW_RFMCI (0x4c00004c)
#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
-#define PPC_RAW_WAIT(w) (0x7c00007c | __PPC_WC(w))
+#define PPC_RAW_WAIT_v203 (0x7c00007c)
+#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p))
#define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp))
#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \
(0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
@@ -606,7 +608,8 @@
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
-#define PPC_WAIT(w) stringify_in_c(.long PPC_RAW_WAIT(w))
+#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203)
+#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p))
#define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a))
#define PPC_TLBIE_5(rb, rs, ric, prs, r) \
stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 83c02f5a7f2a..753a2757bcd4 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -34,6 +34,20 @@
.endm
/*
+ * This expands to a sequence of register clears for regs start to end
+ * inclusive, of the form:
+ *
+ * li rN, 0
+ */
+.macro ZEROIZE_REGS start, end
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ li .Lreg, 0
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
@@ -49,6 +63,14 @@
#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
#endif
+#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end
+#ifdef __powerpc64__
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31)
+#else
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31)
+#endif
+#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n)
+
#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
#define REST_GPR(n, base) REST_GPRS(n, n, base)
@@ -305,6 +327,12 @@ n:
#ifdef __powerpc64__
+#define __LOAD_PACA_TOC(reg) \
+ ld reg,PACATOC(r13)
+
+#define LOAD_PACA_TOC() \
+ __LOAD_PACA_TOC(r2)
+
#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
@@ -315,7 +343,19 @@ n:
rldimi reg, tmp, 32, 0
#define LOAD_REG_ADDR(reg,name) \
- ld reg,name@got(r2)
+ addis reg,r2,name@toc@ha; \
+ addi reg,reg,name@toc@l
+
+#ifdef CONFIG_PPC_BOOK3E_64
+/*
+ * This is used in register-constrained interrupt handlers. Not to be used
+ * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2
+ * is not used for the TOC offset, so use @got(tocreg). If the interrupt
+ * handlers saved r2 instead, LOAD_REG_ADDR could be used.
+ */
+#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \
+ ld reg,name@got(tocreg)
+#endif
#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
#define ADDROFF(name) 0
@@ -342,7 +382,7 @@ n:
#endif
/* various errata or part fixups */
-#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)
#define MFTB(dest) \
90: mfspr dest, SPRN_TBRL; \
BEGIN_FTR_SECTION_NESTED(96); \
@@ -709,7 +749,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
* kernel is built for.
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#define FIXUP_ENDIAN
#else
/*
@@ -749,7 +789,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
.long 0x2402004c; /* hrfid */ \
191:
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* !CONFIG_PPC_BOOK3E_64 */
#endif /* __ASSEMBLY__ */
@@ -768,7 +808,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.llong (_target);) \
stringify_in_c(.previous)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
ori reg,reg,BUCSR_INIT@l; \
@@ -776,6 +816,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
isync;
#else
#define BTB_FLUSH(reg)
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index fdfaae194ddd..631802999d59 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -75,7 +75,6 @@ extern int _chrp_type;
struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
-void release_thread(struct task_struct *);
#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
@@ -355,11 +354,23 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#ifdef CONFIG_PPC64
-#define spin_begin() HMT_low()
+#define spin_begin() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 1,1,1", /* HMT_LOW */ \
+ "nop", /* v3.1 uses pause_short in cpu_relax instead */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
-#define spin_cpu_relax() barrier()
+#define spin_cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "nop", /* Before v3.1 use priority nops in spin_begin/end */ \
+ PPC_WAIT(2, 0), /* aka pause_short */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
-#define spin_end() HMT_medium()
+#define spin_end() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 2,2,2", /* HMT_MEDIUM */ \
+ "nop", \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
#endif
@@ -426,6 +437,8 @@ extern int fix_alignment(struct pt_regs *);
#endif
int do_mathemu(struct pt_regs *regs);
+int do_spe_mathemu(struct pt_regs *regs);
+int speround_handler(struct pt_regs *regs);
/* VMX copying */
int enter_vmx_usercopy(void);
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 82db78fc169d..c8b0f2ffcd35 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -726,6 +726,4 @@ extern int ps3av_video_mode2res(u32, u32 *, u32 *);
extern int ps3av_video_mute(int);
extern int ps3av_audio_mute(int);
extern int ps3av_audio_mute_analog(int);
-extern int ps3av_dev_open(void);
-extern int ps3av_dev_close(void);
#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index a03403695cd4..2efec6d87049 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -99,6 +99,13 @@ struct pt_regs
#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
+// Always displays as "REGS" in memory dumps
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753)
+#else
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552)
+#endif
+
#ifdef __powerpc64__
/*
@@ -115,7 +122,6 @@ struct pt_regs
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
-#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
@@ -136,7 +142,6 @@ struct pt_regs
#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
-#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
#define STACK_FRAME_MARKER 2
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 17b8dcd9a40d..af56980b6cdb 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -246,7 +246,7 @@
#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
/* All e500 */
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@@ -282,7 +282,7 @@
#endif
/* Bit definitions for the HID1 */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
/* e500v1/v2 */
#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
#define HID1_RFXE 0x00020000 /* Read fault exception enable */
@@ -545,7 +545,7 @@
#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
#define TCR_ARE 0x00400000 /* Auto Reload Enable */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
(((tcr) & 0x1E0000) >> 15))
#else
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 00531af17ce0..56319aea646e 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -240,7 +240,6 @@ extern struct rtas_t rtas;
extern int rtas_token(const char *service);
extern int rtas_service_present(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
-int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...);
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
int nret, ...);
extern void __noreturn rtas_restart(char *cmd);
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
index cfb390edf7d0..ceb66d761fe1 100644
--- a/arch/powerpc/include/asm/runlatch.h
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -19,10 +19,9 @@ extern void __ppc64_runlatch_off(void);
do { \
if (cpu_has_feature(CPU_FTR_CTRL) && \
test_thread_local_flags(_TLF_RUNLATCH)) { \
- unsigned long msr = mfmsr(); \
__hard_irq_disable(); \
__ppc64_runlatch_off(); \
- if (msr & MSR_EE) \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
__hard_irq_enable(); \
} \
} while (0)
@@ -31,10 +30,9 @@ extern void __ppc64_runlatch_off(void);
do { \
if (cpu_has_feature(CPU_FTR_CTRL) && \
!test_thread_local_flags(_TLF_RUNLATCH)) { \
- unsigned long msr = mfmsr(); \
__hard_irq_disable(); \
__ppc64_runlatch_on(); \
- if (msr & MSR_EE) \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
__hard_irq_enable(); \
} \
} while (0)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 8be2c491c733..9c00c9c0ca8f 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -13,15 +13,26 @@ typedef struct func_desc func_desc_t;
#include <asm-generic/sections.h>
extern char __head_end[];
+extern char __srwx_boundary[];
+
+/* Patch sites */
+extern s32 patch__call_flush_branch_caches1;
+extern s32 patch__call_flush_branch_caches2;
+extern s32 patch__call_flush_branch_caches3;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
+
+extern long flush_branch_caches;
+extern long kvm_flush_link_stack;
#ifdef __powerpc64__
extern char __start_interrupts[];
extern char __end_interrupts[];
-extern char __prom_init_toc_start[];
-extern char __prom_init_toc_end[];
-
#ifdef CONFIG_PPC_POWERNV
extern char start_real_trampolines[];
extern char end_real_trampolines[];
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index d8c28902cf59..e29e83f8a89c 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -7,7 +7,6 @@
#ifndef __ASSEMBLY__
extern void ppc_printk_progress(char *s, unsigned short hex);
-extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
@@ -70,7 +69,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
void __init setup_spectre_v2(void);
#else
static inline void setup_spectre_v2(void) {}
@@ -89,6 +88,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unsigned long pp, unsigned long r6,
unsigned long r7, unsigned long kbase);
+extern struct seq_buf ppc_hw_desc;
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 7130176d8cb8..b0b4c64870d7 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -44,7 +44,7 @@ static inline void ppc_after_tlbiel_barrier(void)
#if defined(__powerpc64__)
# define LWSYNC lwsync
-#elif defined(CONFIG_E500)
+#elif defined(CONFIG_PPC_E500)
# define LWSYNC \
START_LWSYNC_SECTION(96); \
sync; \
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 25fc8ad9a27a..3dd36c5e334a 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -14,9 +14,16 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+typedef long (*syscall_fn)(const struct pt_regs *);
+#else
+typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+#endif
+
/* ftrace syscalls requires exporting the sys_call_table */
-extern const unsigned long sys_call_table[];
-extern const unsigned long compat_sys_call_table[];
+extern const syscall_fn sys_call_table[];
+extern const syscall_fn compat_sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..67486c67e8a2
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall_wrapper.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - powerpc specific wrappers to syscall definitions
+ *
+ * Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h
+ */
+
+#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H
+#define __ASM_POWERPC_SYSCALL_WRAPPER_H
+
+struct pt_regs;
+
+#define SC_POWERPC_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \
+ ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8])
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ long sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ long sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ long sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \
+ long sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ long sys_##name(const struct pt_regs *regs); \
+ long __weak sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index a2b13e55254f..a1142496cd58 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -8,49 +8,147 @@
#include <linux/types.h>
#include <linux/compat.h>
+#include <asm/syscall.h>
+#ifdef CONFIG_PPC64
+#include <asm/syscalls_32.h>
+#endif
+#include <asm/unistd.h>
+#include <asm/ucontext.h>
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+long sys_ni_syscall(void);
+#else
+long sys_ni_syscall(const struct pt_regs *regs);
+#endif
+
struct rtas_args;
-asmlinkage long sys_mmap(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset);
-asmlinkage long sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage long ppc64_personality(unsigned long personality);
-asmlinkage long sys_rtas(struct rtas_args __user *uargs);
-int ppc_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct __kernel_old_timeval __user *tvp);
-long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
- u32 len_high, u32 len_low);
+/*
+ * long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * High and low parts are swapped depending on endian mode,
+ * so define a macro (similar to mips linux32) to handle that.
+ */
+#ifdef __LITTLE_ENDIAN__
+#define merge_64(low, high) (((u64)high << 32) | low)
+#else
+#define merge_64(high, low) (((u64)high << 32) | low)
+#endif
+
+/*
+ * PowerPC architecture-specific syscalls
+ */
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+
+long sys_rtas(struct rtas_args __user *uargs);
+#ifdef CONFIG_PPC64
+long sys_ppc64_personality(unsigned long personality);
#ifdef CONFIG_COMPAT
-unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
+long compat_sys_ppc64_personality(unsigned long personality);
+#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_PPC64 */
-compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
- u32 reg6, u32 pos1, u32 pos2);
+long sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx, long ctx_size);
+long sys_mmap(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset);
+long sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long sys_switch_endian(void);
-compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
- u32 reg6, u32 pos1, u32 pos2);
+#ifdef CONFIG_PPC32
+long sys_sigreturn(void);
+long sys_debug_setcontext(struct ucontext __user *ctx, int ndbg,
+ struct sig_dbg_op __user *dbg);
+#endif
-compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count);
+long sys_rt_sigreturn(void);
-int compat_sys_truncate64(const char __user *path, u32 reg4,
- unsigned long len1, unsigned long len2);
+long sys_subpage_prot(unsigned long addr,
+ unsigned long len, u32 __user *map);
-long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2);
+#ifdef CONFIG_COMPAT
+long compat_sys_swapcontext(struct ucontext32 __user *old_ctx,
+ struct ucontext32 __user *new_ctx,
+ int ctx_size);
+long compat_sys_old_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+long compat_sys_sigreturn(void);
+long compat_sys_rt_sigreturn(void);
+#endif /* CONFIG_COMPAT */
-int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1,
- unsigned long len2);
+/*
+ * Architecture specific signatures required by long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * The following signatures provide a machine long parameter for
+ * each register that will be supplied. The implementation is
+ * responsible for combining parameter pairs.
+ */
-long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
- size_t len, int advice);
+#ifdef CONFIG_PPC32
+long sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+#endif
+#ifdef CONFIG_COMPAT
+long compat_sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long compat_sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long compat_sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+long compat_sys_ppc_sync_file_range2(int fd, unsigned int flags,
+ unsigned int offset1,
+ unsigned int offset2,
+ unsigned int nbytes1,
+ unsigned int nbytes2);
+#endif /* CONFIG_COMPAT */
-long compat_sys_sync_file_range2(int fd, unsigned int flags,
- unsigned int offset1, unsigned int offset2,
- unsigned int nbytes1, unsigned int nbytes2);
+#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT)
+long sys_ppc_fadvise64_64(int fd, int advice,
+ u32 offset_high, u32 offset_low,
+ u32 len_high, u32 len_low);
#endif
+#else
+
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) \
+ long entry(const struct pt_regs *regs);
+
+#ifdef CONFIG_PPC64
+#include <asm/syscall_table_64.h>
+#else
+#include <asm/syscall_table_32.h>
+#endif /* CONFIG_PPC64 */
+
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/syscalls_32.h b/arch/powerpc/include/asm/syscalls_32.h
new file mode 100644
index 000000000000..749255568be9
--- /dev/null
+++ b/arch/powerpc/include/asm/syscalls_32.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_SYSCALLS_32_H
+#define _ASM_POWERPC_SYSCALLS_32_H
+
+#include <linux/compat.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+/*
+ * Data types and macros for providing 32b PowerPC support.
+ */
+
+/* These are here to support 32-bit syscalls on a 64-bit kernel. */
+
+struct pt_regs32 {
+ unsigned int gpr[32];
+ unsigned int nip;
+ unsigned int msr;
+ unsigned int orig_gpr3; /* Used for restarting system calls */
+ unsigned int ctr;
+ unsigned int link;
+ unsigned int xer;
+ unsigned int ccr;
+ unsigned int mq; /* 601 only (not used at present) */
+ unsigned int trap; /* Reason for being here */
+ unsigned int dar; /* Fault registers */
+ unsigned int dsisr;
+ unsigned int result; /* Result of a system call */
+};
+
+struct sigcontext32 {
+ unsigned int _unused[4];
+ int signal;
+ compat_uptr_t handler;
+ unsigned int oldmask;
+ compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
+};
+
+struct mcontext32 {
+ elf_gregset_t32 mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned int mc_pad[2];
+ elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
+ elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16)));
+};
+
+struct ucontext32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ compat_stack_t uc_stack;
+ int uc_pad[7];
+ compat_uptr_t uc_regs; /* points to uc_mcontext field */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext32 uc_mcontext;
+};
+
+#endif // _ASM_POWERPC_SYSCALLS_32_H
diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h
deleted file mode 100644
index 205de8f8a9d3..000000000000
--- a/arch/powerpc/include/asm/termios.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Liberally adapted from alpha/termios.h. In particular, the c_cc[]
- * fields have been reordered so that termio & termios share the
- * common subset in the same order (for brain dead programs that don't
- * know or care about the differences).
- */
-#ifndef _ASM_POWERPC_TERMIOS_H
-#define _ASM_POWERPC_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */
-#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
-
-#include <asm-generic/termios-base.h>
-
-#endif /* _ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1e5643a9b1f2..9f50766c4623 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -116,8 +116,9 @@ unsigned long long tb_to_ns(unsigned long long tb_ticks);
void timer_broadcast_interrupt(void);
-/* SPLPAR */
-void accumulate_stolen_time(void);
+/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */
+void pseries_accumulate_stolen_time(void);
+u64 pseries_calculate_stolen_time(u64 stop_tb);
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index b4aa0d88ce2c..b1f094728b35 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -15,13 +15,13 @@ extern void (*udbg_flush)(void);
extern int (*udbg_getc)(void);
extern int (*udbg_getc_poll)(void);
-extern void udbg_puts(const char *s);
-extern int udbg_write(const char *s, int n);
+void udbg_puts(const char *s);
+int udbg_write(const char *s, int n);
-extern void register_early_udbg_console(void);
-extern void udbg_printf(const char *fmt, ...)
+void register_early_udbg_console(void);
+void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
-extern void udbg_progress(char *s, unsigned short hex);
+void udbg_progress(char *s, unsigned short hex);
void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
void __init udbg_uart_init_pio(unsigned long port, unsigned int stride);
@@ -31,28 +31,28 @@ unsigned int __init udbg_probe_uart_speed(unsigned int clock);
struct device_node;
void __init udbg_scc_init(int force_scc);
-extern int udbg_adb_init(int force_btext);
-extern void udbg_adb_init_early(void);
-
-extern void __init udbg_early_init(void);
-extern void __init udbg_init_debug_lpar(void);
-extern void __init udbg_init_debug_lpar_hvsi(void);
-extern void __init udbg_init_pmac_realmode(void);
-extern void __init udbg_init_maple_realmode(void);
-extern void __init udbg_init_pas_realmode(void);
-extern void __init udbg_init_rtas_panel(void);
-extern void __init udbg_init_rtas_console(void);
-extern void __init udbg_init_debug_beat(void);
-extern void __init udbg_init_btext(void);
-extern void __init udbg_init_44x_as1(void);
-extern void __init udbg_init_40x_realmode(void);
-extern void __init udbg_init_cpm(void);
-extern void __init udbg_init_usbgecko(void);
-extern void __init udbg_init_memcons(void);
-extern void __init udbg_init_ehv_bc(void);
-extern void __init udbg_init_ps3gelic(void);
-extern void __init udbg_init_debug_opal_raw(void);
-extern void __init udbg_init_debug_opal_hvsi(void);
+int udbg_adb_init(int force_btext);
+void udbg_adb_init_early(void);
+
+void __init udbg_early_init(void);
+void __init udbg_init_debug_lpar(void);
+void __init udbg_init_debug_lpar_hvsi(void);
+void __init udbg_init_pmac_realmode(void);
+void __init udbg_init_maple_realmode(void);
+void __init udbg_init_pas_realmode(void);
+void __init udbg_init_rtas_panel(void);
+void __init udbg_init_rtas_console(void);
+void __init udbg_init_btext(void);
+void __init udbg_init_44x_as1(void);
+void __init udbg_init_40x_realmode(void);
+void __init udbg_init_cpm(void);
+void __init udbg_init_usbgecko(void);
+void __init udbg_init_memcons(void);
+void __init udbg_init_ehv_bc(void);
+void __init udbg_init_ps3gelic(void);
+void __init udbg_init_debug_opal_raw(void);
+void __init udbg_init_debug_opal_hvsi(void);
+void __init udbg_init_debug_16550(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index b1129b4ef57d..659a996c75aa 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -45,6 +45,7 @@
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_STAT
+#define __ARCH_WANT_COMPAT_FALLOCATE
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 8542e9bbeead..7650b6ce14c8 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -2,9 +2,6 @@
#ifndef _ASM_POWERPC_VDSO_H
#define _ASM_POWERPC_VDSO_H
-/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE 0x100000
-
#define VDSO_VERSION_STRING LINUX_2.6.15
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h
index 8d79f994b4aa..80d13207c568 100644
--- a/arch/powerpc/include/asm/vdso/processor.h
+++ b/arch/powerpc/include/asm/vdso/processor.h
@@ -22,7 +22,13 @@
#endif
#ifdef CONFIG_PPC64
-#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
+#define cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ /* Pre-POWER10 uses low ; medium priority nops */ \
+ "or 1,1,1 ; or 2,2,2", \
+ /* POWER10 onward uses pause_short (wait 2,0) */ \
+ PPC_WAIT(2, 0), \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
#else
#define cpu_relax() barrier()
#endif
diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h
index 891c9d5eaabe..e9245f86a46c 100644
--- a/arch/powerpc/include/asm/vdso/timebase.h
+++ b/arch/powerpc/include/asm/vdso/timebase.h
@@ -12,7 +12,7 @@
* We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
* version below in the else case of the ifdef.
*/
-#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_E500))
+#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500))
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mfspr %0, %2;\n" \
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index e2e704eca5f6..89090485bec1 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -159,7 +159,6 @@ extern void xics_setup_cpu(void);
extern void xics_update_irq_servers(void);
extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
extern void xics_mask_unknown_vec(unsigned int vec);
-extern irqreturn_t xics_ipi_dispatch(int cpu);
extern void xics_smp_probe(void);
extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);