diff options
97 files changed, 3315 insertions, 2745 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 742f69d18fc8..8978c26cacdd 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2978,6 +2978,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. may be specified. Format: <port>,<port>.... + ppc_strict_facility_enable + [PPC] This option catches any kernel floating point, + Altivec, VSX and SPE outside of regions specifically + allowed (eg kernel_enable_fpu()/kernel_disable_fpu()). + There is some performance impact when enabling this. + print-fatal-signals= [KNL] debug: print fatal signals diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index ceaa75d5a684..6a19fcef5596 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -154,7 +154,7 @@ if [ -z "$kernel" ]; then kernel=vmlinux fi -elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" +LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" case "$elfformat" in elf64-powerpcle) format=elf64lppc ;; elf64-powerpc) format=elf32ppc ;; diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index bd5e63f72ad4..93ee046d12cd 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -85,6 +85,7 @@ static void spe_begin(void) static void spe_end(void) { + disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index 3e1d22212521..f9ebc38d3fe7 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -46,6 +46,7 @@ static void spe_begin(void) static void spe_end(void) { + disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c index f4a616fe1a82..718a079dcdbf 100644 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ b/arch/powerpc/crypto/sha256-spe-glue.c @@ -47,6 +47,7 @@ static void spe_begin(void) static void spe_end(void) { + disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/book3s/32/hash.h index 62cfb0c663bb..264b754d65b0 100644 --- a/arch/powerpc/include/asm/pte-hash32.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_HASH32_H -#define _ASM_POWERPC_PTE_HASH32_H +#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H +#define _ASM_POWERPC_BOOK3S_32_HASH_H #ifdef __KERNEL__ /* @@ -43,4 +43,4 @@ #define PTE_ATOMIC_UPDATES 1 #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_HASH32_H */ +#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h new file mode 100644 index 000000000000..38b33dcfcc9d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -0,0 +1,482 @@ +#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H +#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H + +#include <asm-generic/pgtable-nopmd.h> + +#include <asm/book3s/32/hash.h> + +/* And here we include common definitions */ +#include <asm/pte-common.h> + +/* + * The normal case is that PTEs are 32-bits and we have a 1-page + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus + * + * For any >32-bit physical address platform, we can use the following + * two level page table layout where the pgdir is 8KB and the MS 13 bits + * are an index to the second level table. The combined pgdir/pmd first + * level has 2048 entries and the second level has 512 64-bit PTE entries. + * -Matt + */ +/* PGDIR_SHIFT determines what a top-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define PTRS_PER_PTE (1 << PTE_SHIFT) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +/* + * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary + * value (for now) on others, from where we can start layout kernel + * virtual space that goes below PKMAP and FIXMAP + */ +#ifdef CONFIG_HIGHMEM +#define KVIRT_TOP PKMAP_BASE +#else +#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ +#endif + +/* + * ioremap_bot starts at that address. Early ioremaps move down from there, + * until mem_init() at which point this becomes the top of the vmalloc + * and ioremap space + */ +#ifdef CONFIG_NOT_COHERENT_CACHE +#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) +#else +#define IOREMAP_TOP KVIRT_TOP +#endif + +/* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 16MB value just means that there will be a 64MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + * + * We no longer map larger than phys RAM with the BATs so we don't have + * to worry about the VMALLOC_OFFSET causing problems. We do have to worry + * about clashes between our early calls to ioremap() that start growing down + * from ioremap_base being run into the VM area allocations (growing upwards + * from VMALLOC_START). For this reason we have ioremap_bot to check when + * we actually run into our mappings setup in the early boot with the VM + * system. This really does become a problem for machines with good amounts + * of RAM. -- Cort + */ +#define VMALLOC_OFFSET (0x1000000) /* 16M */ +#ifdef PPC_PIN_SIZE +#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#else +#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#endif +#define VMALLOC_END ioremap_bot + +#ifndef __ASSEMBLY__ +#include <linux/sched.h> +#include <linux/threads.h> +#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ + +extern unsigned long ioremap_bot; + +/* + * entries per page directory level: our page-table tree is two-level, so + * we don't really have any PMD directory. + */ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ + (unsigned long long)pte_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) +/* + * Bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. + */ + +#define pte_clear(mm, addr, ptep) \ + do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) +#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + + +/* + * When flushing the tlb entry for a page, we also need to flush the hash + * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. + */ +extern int flush_hash_pages(unsigned context, unsigned long va, + unsigned long pmdval, int count); + +/* Add an HPTE to the hash table */ +extern void add_hash_page(unsigned context, unsigned long va, + unsigned long pmdval); + +/* Flush an entry from the TLB/hash table */ +extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, + unsigned long address); + +/* + * PTE updates. This function is called whenever an existing + * valid PTE is updated. This does -not- include set_pte_at() + * which nowadays only sets a new PTE. + * + * Depending on the type of MMU, we may need to use atomic updates + * and the PTE may be either 32 or 64 bit wide. In the later case, + * when using atomic updates, only the low part of the PTE is + * accessed atomically. + * + * In addition, on 44x, we also maintain a global flag indicating + * that an executable user mapping was modified, which is needed + * to properly flush the virtually tagged instruction cache of + * those implementations. + */ +#ifndef CONFIG_PTE_64BIT +static inline unsigned long pte_update(pte_t *p, + unsigned long clr, + unsigned long set) +{ + unsigned long old, tmp; + + __asm__ __volatile__("\ +1: lwarx %0,0,%3\n\ + andc %1,%0,%4\n\ + or %1,%1,%5\n" + PPC405_ERR77(0,%3) +" stwcx. %1,0,%3\n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p) + : "r" (p), "r" (clr), "r" (set), "m" (*p) + : "cc" ); + + return old; +} +#else /* CONFIG_PTE_64BIT */ +static inline unsigned long long pte_update(pte_t *p, + unsigned long clr, + unsigned long set) +{ + unsigned long long old; + unsigned long tmp; + + __asm__ __volatile__("\ +1: lwarx %L0,0,%4\n\ + lwzx %0,0,%3\n\ + andc %1,%L0,%5\n\ + or %1,%1,%6\n" + PPC405_ERR77(0,%3) +" stwcx. %1,0,%4\n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p) + : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) + : "cc" ); + + return old; +} +#endif /* CONFIG_PTE_64BIT */ + +/* + * 2.6 calls this without flushing the TLB entry; this is wrong + * for our hash-based implementation, we fix that up here. + */ +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) +{ + unsigned long old; + old = pte_update(ptep, _PAGE_ACCESSED, 0); + if (old & _PAGE_HASHPTE) { + unsigned long ptephys = __pa(ptep) & PAGE_MASK; + flush_hash_pages(context, addr, ptephys, 1); + } + return (old & _PAGE_ACCESSED) != 0; +} +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ + __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); +} +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + + +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) +{ + unsigned long set = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + + pte_update(ptep, clr, set); +} + +#define __HAVE_ARCH_PTE_SAME +#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) + +/* + * Note that on Book E processors, the pmd contains the kernel virtual + * (lowmem) address of the pte page. The physical address is less useful + * because everything runs with translation enabled (even the TLB miss + * handler). On everything else the pmd contains the physical address + * of the pte page. -- paulus + */ +#ifndef CONFIG_BOOKE +#define pmd_page_vaddr(pmd) \ + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) \ + pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) +#else +#define pmd_page_vaddr(pmd) \ + ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) \ + pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) +#endif + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +#define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +/* Find an entry in the third-level page table.. */ +#define pte_index(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, addr) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) +#define pte_offset_map(dir, addr) \ + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) +#define pte_unmap(pte) kunmap_atomic(pte) + +/* + * Encode and decode a swap entry. + * Note that the bits we use in a PTE for representing a swap entry + * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). + * -- paulus + */ +#define __swp_type(entry) ((entry).val & 0x1f) +#define __swp_offset(entry) ((entry).val >> 5) +#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) + +#ifndef CONFIG_PPC_4K_PAGES +void pgtable_cache_init(void); +#else +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) +#endif + +extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, + pmd_t **pmdp); + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } +static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } +static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); +} + +static inline unsigned long pte_pfn(pte_t pte) +{ + return pte_val(pte) >> PTE_RPN_SHIFT; +} + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + + + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) + /* Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else +#error "Not supported " +#endif +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached pgprot_noncached +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE | _PAGE_GUARDED); +} + +#define pgprot_noncached_wc pgprot_noncached_wc +static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE); +} + +#define pgprot_cached pgprot_cached +static inline pgprot_t pgprot_cached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT); +} + +#define pgprot_cached_wthru pgprot_cached_wthru +static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT | _PAGE_WRITETHRU); +} + +#define pgprot_cached_noncoherent pgprot_cached_noncoherent +static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t prot) +{ + return pgprot_noncached_wc(prot); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h new file mode 100644 index 000000000000..e59832c94609 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -0,0 +1,132 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H +#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H +/* + * Entries per page directory level. The PTE level must use a 64b record + * for each page table entry. The PMD and PGD level use a 32b record for + * each entry by assuming that each entry is page aligned. + */ +#define PTE_INDEX_SIZE 9 +#define PMD_INDEX_SIZE 7 +#define PUD_INDEX_SIZE 9 +#define PGD_INDEX_SIZE 9 + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PMD_SHIFT + +/* PUD_SHIFT determines what a third-level page table entry can map */ +#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ +#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* Bits to mask out from a PMD to get to the PTE page */ +#define PMD_MASKED_BITS 0 +/* Bits to mask out from a PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0 +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ + _PAGE_F_SECOND | _PAGE_F_GIX) + +/* shift to put page number into pte */ +#define PTE_RPN_SHIFT (17) + +#define _PAGE_4K_PFN 0 +#ifndef __ASSEMBLY__ +/* + * 4-level page tables related bits + */ + +#define pgd_none(pgd) (!pgd_val(pgd)) +#define pgd_bad(pgd) (pgd_val(pgd) == 0) +#define pgd_present(pgd) (pgd_val(pgd) != 0) +#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) + +static inline void pgd_clear(pgd_t *pgdp) +{ + *pgdp = __pgd(0); +} + +static inline pte_t pgd_pte(pgd_t pgd) +{ + return __pte(pgd_val(pgd)); +} + +static inline pgd_t pte_pgd(pte_t pte) +{ + return __pgd(pte_val(pte)); +} +extern struct page *pgd_page(pgd_t pgd); + +#define pud_offset(pgdp, addr) \ + (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) + +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) + +/* + * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) + +#ifdef CONFIG_HUGETLB_PAGE +/* + * For 4k page size, we support explicit hugepage via hugepd + */ +static inline int pmd_huge(pmd_t pmd) +{ + return 0; +} + +static inline int pud_huge(pud_t pud) +{ + return 0; +} + +static inline int pgd_huge(pgd_t pgd) +{ + return 0; +} +#define pgd_huge pgd_huge + +static inline int hugepd_ok(hugepd_t hpd) +{ + /* + * if it is not a pte and have hugepd shift mask + * set, then it is a hugepd directory pointer + */ + if (!(hpd.pd & _PAGE_PTE) && + ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) + return true; + return false; +} +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h new file mode 100644 index 000000000000..9f9942998587 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -0,0 +1,312 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H +#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H + +#include <asm-generic/pgtable-nopud.h> + +#define PTE_INDEX_SIZE 8 +#define PMD_INDEX_SIZE 10 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE 12 + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PAGE_SHIFT + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define _PAGE_COMBO 0x00020000 /* this is a combo 4k page */ +#define _PAGE_4K_PFN 0x00040000 /* PFN is for a single 4k page */ +/* + * Used to track subpage group valid if _PAGE_COMBO is set + * This overloads _PAGE_F_GIX and _PAGE_F_SECOND + */ +#define _PAGE_COMBO_VALID (_PAGE_F_GIX | _PAGE_F_SECOND) + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \ + _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO) + +/* Shift to put page number into pte. + * + * That gives us a max RPN of 34 bits, which means a max of 50 bits + * of addressable physical space, or 46 bits for the special 4k PFNs. + */ +#define PTE_RPN_SHIFT (30) +/* + * we support 16 fragments per PTE page of 64K size. + */ +#define PTE_FRAG_NR 16 +/* + * We use a 2K PTE page fragment and another 2K for storing + * real_pte_t hash index + */ +#define PTE_FRAG_SIZE_SHIFT 12 +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + +/* + * Bits to mask out from a PMD to get to the PTE page + * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. + */ +#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) +/* Bits to mask out from a PGD/PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0x1ff + +#ifndef __ASSEMBLY__ + +/* + * With 64K pages on hash table, we have a special PTE format that + * uses a second "half" of the page table to encode sub-page information + * in order to deal with 64K made of 4K HW pages. Thus we override the + * generic accessors and iterators here + */ +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + unsigned long *hidxp; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); + rpte.hidx = *hidxp; + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> _PAGE_F_GIX_SHIFT) & 0xf; +} + +#define __rpte_to_pte(r) ((r).pte) +extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); +/* + * Trick: we set __end to va + 64k, which happens works for + * a 16M page as well as we want only one iteration + */ +#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ + do { \ + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; vpn < __end; index++, \ + vpn += (1L << (shift - VPN_SHIFT))) { \ + if (!__split || __rpte_sub_valid(rpte, index)) \ + do { + +#define pte_iterate_hashed_end() } while(0); } } while(0) + +#define pte_pagesize_index(mm, addr, pte) \ + (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) + +#define remap_4k_pfn(vma, addr, pfn, prot) \ + (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ + __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) + +#define PTE_TABLE_SIZE PTE_FRAG_SIZE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + (sizeof(unsigned long) << PMD_INDEX_SIZE)) +#else +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#endif +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) +#define pte_pgd(pte) ((pgd_t)pte_pud(pte)) + +#ifdef CONFIG_HUGETLB_PAGE +/* + * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have + * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; + * + * Defined in such a way that we can optimize away code block at build time + * if CONFIG_HUGETLB_PAGE=n. + */ +static inline int pmd_huge(pmd_t pmd) +{ + /* + * leaf pte for huge page + */ + return !!(pmd_val(pmd) & _PAGE_PTE); +} + +static inline int pud_huge(pud_t pud) +{ + /* + * leaf pte for huge page + */ + return !!(pud_val(pud) & _PAGE_PTE); +} + +static inline int pgd_huge(pgd_t pgd) +{ + /* + * leaf pte for huge page + */ + return !!(pgd_val(pgd) & _PAGE_PTE); +} +#define pgd_huge pgd_huge + +#ifdef CONFIG_DEBUG_VM +extern int hugepd_ok(hugepd_t hpd); +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#else +/* + * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't + * need to setup hugepage directory for them. Our pte and page directory format + * enable us to have this enabled. + */ +static inline int hugepd_ok(hugepd_t hpd) +{ + return 0; +} +#define is_hugepd(pdep) 0 +#endif /* CONFIG_DEBUG_VM */ + +#endif /* CONFIG_HUGETLB_PAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern unsigned long pmd_hugepage_update(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmdp, + unsigned long clr, + unsigned long set); +static inline char *get_hpte_slot_array(pmd_t *pmdp) +{ + /* + * The hpte hindex is stored in the pgtable whose address is in the + * second half of the PMD + * + * Order this load with the test for pmd_trans_huge in the caller + */ + smp_rmb(); + return *(char **)(pmdp + PTRS_PER_PMD); + + +} +/* + * The linux hugepage PMD now include the pmd entries followed by the address + * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. + * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per + * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and + * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. + * + * The last three bits are intentionally left to zero. This memory location + * are also used as normal page PTE pointers. So if we have any pointers + * left around while we collapse a hugepage, we need to make sure + * _PAGE_PRESENT bit of that is zero when we look at them + */ +static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) +{ + return (hpte_slot_array[index] >> 3) & 0x1; +} + +static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, + int index) +{ + return hpte_slot_array[index] >> 4; +} + +static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, + unsigned int index, unsigned int hidx) +{ + hpte_slot_array[index] = hidx << 4 | 0x1 << 3; +} + +/* + * + * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs + * page. The hugetlbfs page table walking and mangling paths are totally + * separated form the core VM paths and they're differentiated by + * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. + * + * pmd_trans_huge() is defined as false at build time if + * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build + * time in such case. + * + * For ppc64 we need to differntiate from explicit hugepages from THP, because + * for THP we also track the subpage details at the pmd level. We don't do + * that for explicit huge pages. + * + */ +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) == + (_PAGE_PTE | _PAGE_THP_HUGE)); +} + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) & _PAGE_SPLITTING; + return 0; +} + +static inline int pmd_large(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_PTE); +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); +} + +static inline pmd_t pmd_mksplitting(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); +} + +#define __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); +} + +static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long old; + + if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); + return ((old & _PAGE_ACCESSED) != 0); +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + + if ((pmd_val(*pmdp) & _PAGE_RW) == 0) + return; + + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); +} + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h new file mode 100644 index 000000000000..8b929e531758 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -0,0 +1,526 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H +#define _ASM_POWERPC_BOOK3S_64_HASH_H +#ifdef __KERNEL__ + +/* + * Common bits between 4K and 64K pages in a linux-style PTE. + * These match the bits in the (hardware-defined) PowerPC PTE as closely + * as possible. Additional bits may be defined in pgtable-hash64-*.h + * + * Note: We only support user read/write permissions. Supervisor always + * have full read/write to pages above PAGE_OFFSET (pages below that + * always use the user access permissions). + * + * We could create separate kernel read-only if we used the 3 PP bits + * combinations that newer processors provide but we currently don't. + */ +#define _PAGE_PTE 0x00001 +#define _PAGE_PRESENT 0x00002 /* software: pte contains a translation */ +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_USER 0x00004 /* matches one of the PP bits */ +#define _PAGE_EXEC 0x00008 /* No execute on POWER4 and newer (we invert) */ +#define _PAGE_GUARDED 0x00010 +/* We can derive Memory coherence from _PAGE_NO_CACHE */ +#define _PAGE_COHERENT 0x0 +#define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x00040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x00080 /* C: page changed */ +#define _PAGE_ACCESSED 0x00100 /* R: page referenced */ +#define _PAGE_RW 0x00200 /* software: user write access allowed */ +#define _PAGE_HASHPTE 0x00400 /* software: pte has an associated HPTE */ +#define _PAGE_BUSY 0x00800 /* software: PTE & hash are busy */ +#define _PAGE_F_GIX 0x07000 /* full page: hidx bits */ +#define _PAGE_F_GIX_SHIFT 12 +#define _PAGE_F_SECOND 0x08000 /* Whether to use secondary hash or not */ +#define _PAGE_SPECIAL 0x10000 /* software: special page */ + +/* + * THP pages can't be special. So use the _PAGE_SPECIAL + */ +#define _PAGE_SPLITTING _PAGE_SPECIAL + +/* + * We need to differentiate between explicit huge page and THP huge + * page, since THP huge page also need to track real subpage details + */ +#define _PAGE_THP_HUGE _PAGE_4K_PFN + +/* + * set of bits not changed in pmd_modify. + */ +#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ + _PAGE_THP_HUGE | _PAGE_PTE) + +#ifdef CONFIG_PPC_64K_PAGES +#include <asm/book3s/64/hash-64k.h> +#else +#include <asm/book3s/64/hash-4k.h> +#endif + +/* + * Size of EA range mapped by our pagetables. + */ +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) +#else +#define PMD_CACHE_INDEX PMD_INDEX_SIZE +#endif +/* + * Define the address range of the kernel non-linear virtual area + */ +#define KERN_VIRT_START ASM_CONST(0xD000000000000000) +#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) + +/* + * The vmalloc space starts at the beginning of that region, and + * occupies half of it on hash CPUs and a quarter of it on Book3E + * (we keep a quarter for the virtual memmap) + */ +#define VMALLOC_START KERN_VIRT_START +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) + +/* + * Region IDs + */ +#define REGION_SHIFT 60UL +#define REGION_MASK (0xfUL << REGION_SHIFT) +#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) + +#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) +#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) +#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ +#define USER_REGION_ID (0UL) + +/* + * Defines the address of the vmemap area, in its own region on + * hash table CPUs. + */ +#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) + +#ifdef CONFIG_PPC_MM_SLICES +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN +#endif /* CONFIG_PPC_MM_SLICES */ + +/* No separate kernel read-only */ +#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* Strong Access Ordering */ +#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +/* PTEIDX nibble */ +#define _PTEIDX_SECONDARY 0x8 +#define _PTEIDX_GROUP_IX 0x7 + +/* Hash table based platforms need atomic updates of the linux PTE */ +#define PTE_ATOMIC_UPDATES 1 +#define _PTE_NONE_MASK _PAGE_HPTEFLAGS +/* + * The mask convered by the RPN must be a ULL on 32-bit platforms with + * 64-bit PTEs + */ +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE) +/* + * Mask of bits returned by pte_pgprot() + */ +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_4K_PFN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC) +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) + +/* Permission masks used to generate the __P and __S table, + * + * Note:__pgprot is defined in arch/powerpc/include/asm/page.h + * + * Write permissions imply read permissions for now (we could make write-only + * pages on BookE but we don't bother for now). Execute permission control is + * possible on platforms that define _PAGE_EXEC + * + * Note due to the way vm flags are laid out, the bits are XWR + */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ + _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER ) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER ) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_X +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY_X +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_X +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED_X +#define __S111 PAGE_SHARED_X + +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) + +/* Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X +#define PAGE_AGP (PAGE_KERNEL_NC) + +#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) +#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) + +#ifndef __ASSEMBLY__ +#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ + || (pmd_val(pmd) & PMD_BAD_BITS)) +#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) + +#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ + || (pud_val(pud) & PUD_BAD_BITS)) +#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) + +#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) +#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) +#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) + +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); +extern unsigned long htab_convert_pte_flags(unsigned long pteflags); +/* Atomic PTE updates */ +static inline unsigned long pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + unsigned long set, + int huge) +{ + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%3 # pte_update\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + andc %1,%0,%4 \n\ + or %1,%1,%7\n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*ptep) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) + : "cc" ); + /* huge pages use the old page table lock */ + if (!huge) + assert_pte_locked(mm, addr); + + if (old & _PAGE_HASHPTE) + hpte_need_flush(mm, addr, ptep, old, huge); + + return old; +} + +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); + return (old & _PAGE_ACCESSED) != 0; +} +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); +} + +/* + * We currently remove entries from the hashtable regardless of whether + * the entry was young or dirty. The generic routines only flush if the + * entry was young or dirty which is not good enough. + * + * We should be more intelligent about this but for the moment we override + * these functions and force a tlb flush unconditionally + */ +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(__vma, __address, __ptep) \ +({ \ + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ + __ptep); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); + return __pte(old); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t * ptep) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 0); +} + + +/* Set the dirty and/or accessed bits atomically in a linux PTE, this + * function doesn't need to flush the hash entry + */ +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) +{ + unsigned long bits = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%4\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + or %0,%3,%0\n\ + stdcx. %0,0,%4\n\ + bne- 1b" + :"=&r" (old), "=&r" (tmp), "=m" (*ptep) + :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) + :"cc"); +} + +#define __HAVE_ARCH_PTE_SAME +#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } +static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } +static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/asm-generic/pgtable.h . On powerpc, this will only + * work for user pages and always return true for kernel pages. + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & + (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); +} + +static inline unsigned long pte_pfn(pte_t pte) +{ + return pte_val(pte) >> PTE_RPN_SHIFT; +} + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ + /* + * Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 32-bit PTEs. + */ + *ptep = pte; +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached pgprot_noncached +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE | _PAGE_GUARDED); +} + +#define pgprot_noncached_wc pgprot_noncached_wc +static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE); +} + +#define pgprot_cached pgprot_cached +static inline pgprot_t pgprot_cached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT); +} + +#define pgprot_cached_wthru pgprot_cached_wthru +static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT | _PAGE_WRITETHRU); +} + +#define pgprot_cached_noncoherent pgprot_cached_noncoherent +static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t prot) +{ + return pgprot_noncached_wc(prot); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, unsigned long old_pmd); +#else +static inline void hpte_do_hugepage_flush(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp, + unsigned long old_pmd) +{ + WARN(1, "%s called with THP disabled\n", __func__); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h new file mode 100644 index 000000000000..a2d4e0e37067 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -0,0 +1,266 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +/* + * This file contains the functions and defines necessary to modify and use + * the ppc64 hashed page table. + */ + +#include <asm/book3s/64/hash.h> +#include <asm/barrier.h> + +/* + * The second half of the kernel virtual space is used for IO mappings, + * it's itself carved into the PIO region (ISA and PHB IO space) and + * the ioremap space + * + * ISA_IO_BASE = KERN_IO_START, 64K reserved area + * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces + * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE + */ +#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) +#define FULL_IO_SIZE 0x80000000ul +#define ISA_IO_BASE (KERN_IO_START) +#define ISA_IO_END (KERN_IO_START + 0x10000ul) +#define PHB_IO_BASE (ISA_IO_END) +#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) +#define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) + +#define vmemmap ((struct page *)VMEMMAP_BASE) + +/* Advertise special mapping type for AGP */ +#define HAVE_PAGE_AGP + +/* Advertise support for _PAGE_SPECIAL */ +#define __HAVE_ARCH_PTE_SPECIAL + +#ifndef __ASSEMBLY__ + +/* + * This is the default implementation of various PTE accessors, it's + * used in all cases except Book3S with 64K pages where we have a + * concept of sub-pages + */ +#ifndef __real_pte + +#ifdef CONFIG_STRICT_MM_TYPECHECKS +#define __real_pte(e,p) ((real_pte_t){(e)}) +#define __rpte_to_pte(r) ((r).pte) +#else +#define __real_pte(e,p) (e) +#define __rpte_to_pte(r) (__pte(r)) +#endif +#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT) + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K + +#endif /* __real_pte */ + +static inline void pmd_set(pmd_t *pmdp, unsigned long val) +{ + *pmdp = __pmd(val); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (!pmd_none(pmd)) + +static inline void pud_set(pud_t *pudp, unsigned long val) +{ + *pudp = __pud(val); +} + +static inline void pud_clear(pud_t *pudp) +{ + *pudp = __pud(0); +} + +#define pud_none(pud) (!pud_val(pud)) +#define pud_present(pud) (pud_val(pud) != 0) + +extern struct page *pud_page(pud_t pud); +extern struct page *pmd_page(pmd_t pmd); +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} +#define pud_write(pud) pte_write(pud_pte(pud)) +#define pgd_write(pgd) pte_write(pgd_pte(pgd)) +static inline void pgd_set(pgd_t *pgdp, unsigned long val) +{ + *pgdp = __pgd(val); +} + +/* + * Find an entry in a page-table-directory. We combine the address region + * (the high order N bits) and the pgd portion of the address. + */ + +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#define pmd_offset(pudp,addr) \ + (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) + +#define pte_offset_kernel(dir,addr) \ + (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_unmap(pte) do { } while(0) + +/* to find an entry in a kernel page-table-directory */ +/* This now only contains the vmalloc pages */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* Encode and de-code a swap entry */ +#define MAX_SWAPFILES_CHECK() do { \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ + /* \ + * Don't have overlapping bits with _PAGE_HPTEFLAGS \ + * We filter HPTEFLAGS on set_pte. \ + */ \ + BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ + } while (0) +/* + * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; + */ +#define SWP_TYPE_BITS 5 +#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ + & ((1UL << SWP_TYPE_BITS) - 1)) +#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) << _PAGE_BIT_SWAP_TYPE) \ + | ((offset) << PTE_RPN_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) +#define __swp_entry_to_pte(x) __pte((x).val) + +void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); +void pgtable_cache_init(void); + +struct page *realmode_pfn_to_page(unsigned long pfn); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); +extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); +extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); +extern int has_transparent_hugepage(void); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +static inline pte_t *pmdp_ptep(pmd_t *pmd) +{ + return (pte_t *)pmd; +} + +#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) +#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) +#define pmd_young(pmd) pte_young(pmd_pte(pmd)) +#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) +#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) +#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) +#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) +#ifdef CONFIG_NUMA_BALANCING +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif /* CONFIG_NUMA_BALANCING */ + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) pte_write(pmd_pte(pmd)) + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE)); +} + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#define pmdp_collapse_flush pmdp_collapse_flush + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +struct spinlock; +static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl) +{ + /* + * Archs like ppc64 use pgtable to store per pmd + * specific information. So when we switch the pmd, + * we should also withdraw and deposit the pgtable + */ + return true; +} +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h new file mode 100644 index 000000000000..8b0f4a29259a --- /dev/null +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -0,0 +1,29 @@ +#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H +#define _ASM_POWERPC_BOOK3S_PGTABLE_H + +#ifdef CONFIG_PPC64 +#include <asm/book3s/64/pgtable.h> +#else +#include <asm/book3s/32/pgtable.h> +#endif + +#define FIRST_USER_ADDRESS 0UL +#ifndef __ASSEMBLY__ +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index ad6263cffb0f..d1a8d93cccfd 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (prev), "+m" (*(volatile unsigned int *)p) : "r" (p), "r" (val) : "cc", "memory"); @@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (prev), "+m" (*(volatile unsigned long *)p) : "r" (p), "r" (val) : "cc", "memory"); @@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) @@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) unsigned long prev; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ stdcx. %4,0,%2\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 77f52b26dad6..9ee10781121f 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -263,17 +263,6 @@ do_kvm_##n: \ #define KVM_HANDLER_SKIP(area, h, n) #endif -#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE -#define KVMTEST_PR(n) __KVMTEST(n) -#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) -#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) - -#else -#define KVMTEST_PR(n) -#define KVM_HANDLER_PR(area, h, n) -#define KVM_HANDLER_PR_SKIP(area, h, n) -#endif - #define NOTEST(n) /* @@ -360,13 +349,13 @@ label##_pSeries: \ HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ - EXC_STD, KVMTEST_PR, vec) + EXC_STD, KVMTEST, vec) /* Version of above for when we have to branch out-of-line */ #define STD_EXCEPTION_PSERIES_OOL(vec, label) \ .globl label##_pSeries; \ label##_pSeries: \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) #define STD_EXCEPTION_HV(loc, vec, label) \ @@ -436,17 +425,13 @@ label##_relon_hv: \ #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) #define SOFTEN_TEST_PR(vec) \ - KVMTEST_PR(vec); \ + KVMTEST(vec); \ _SOFTEN_TEST(EXC_STD, vec) #define SOFTEN_TEST_HV(vec) \ KVMTEST(vec); \ _SOFTEN_TEST(EXC_HV, vec) -#define SOFTEN_TEST_HV_201(vec) \ - KVMTEST(vec); \ - _SOFTEN_TEST(EXC_STD, vec) - #define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index ba3342bbdbda..7352d3f212df 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -21,7 +21,7 @@ * need for various slices related matters. Note that this isn't the * complete pgtable.h but only a portion of it. */ -#include <asm/pgtable-ppc64.h> +#include <asm/book3s/64/pgtable.h> #include <asm/bug.h> #include <asm/processor.h> diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9c326565d498..c82cbf52d19e 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC32_H -#define _ASM_POWERPC_PGTABLE_PPC32_H +#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define _ASM_POWERPC_NOHASH_32_PGTABLE_H #include <asm-generic/pgtable-nopmd.h> @@ -106,17 +106,15 @@ extern int icache_44x_need_flush; */ #if defined(CONFIG_40x) -#include <asm/pte-40x.h> +#include <asm/nohash/32/pte-40x.h> #elif defined(CONFIG_44x) -#include <asm/pte-44x.h> +#include <asm/nohash/32/pte-44x.h> #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) -#include <asm/pte-book3e.h> +#include <asm/nohash/pte-book3e.h> #elif defined(CONFIG_FSL_BOOKE) -#include <asm/pte-fsl-booke.h> +#include <asm/nohash/32/pte-fsl-booke.h> #elif defined(CONFIG_8xx) -#include <asm/pte-8xx.h> -#else /* CONFIG_6xx */ -#include <asm/pte-hash32.h> +#include <asm/nohash/32/pte-8xx.h> #endif /* And here we include common definitions */ @@ -130,7 +128,12 @@ extern int icache_44x_need_flush; #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) -#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + + /* * When flushing the tlb entry for a page, we also need to flush the hash @@ -337,4 +340,4 @@ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */ +#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 486b1ef81338..9624ebdacc47 100644 --- a/arch/powerpc/include/asm/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_40x_H -#define _ASM_POWERPC_PTE_40x_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H +#define _ASM_POWERPC_NOHASH_32_PTE_40x_H #ifdef __KERNEL__ /* @@ -61,4 +61,4 @@ #define PTE_ATOMIC_UPDATES 1 #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_40x_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 36f75fab23f5..fdab41c654ef 100644 --- a/arch/powerpc/include/asm/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_44x_H -#define _ASM_POWERPC_PTE_44x_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H +#define _ASM_POWERPC_NOHASH_32_PTE_44x_H #ifdef __KERNEL__ /* @@ -94,4 +94,4 @@ #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_44x_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index a0e2ba960976..3742b1919661 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_8xx_H -#define _ASM_POWERPC_PTE_8xx_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H #ifdef __KERNEL__ /* @@ -62,4 +62,4 @@ _PAGE_HWWRITE | _PAGE_EXEC) #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_8xx_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h index 9f5c3d04a1a3..5422d00c6145 100644 --- a/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H -#define _ASM_POWERPC_PTE_FSL_BOOKE_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H +#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H #ifdef __KERNEL__ /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based @@ -37,4 +37,4 @@ #define PTE_WIMGE_SHIFT (6) #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index 132ee1d482c2..fc7d51753f81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H -#define _ASM_POWERPC_PGTABLE_PPC64_4K_H +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H /* * Entries per page directory level. The PTE level must use a 64b record * for each page table entry. The PMD and PGD level use a 32b record for @@ -55,11 +55,15 @@ #define pgd_none(pgd) (!pgd_val(pgd)) #define pgd_bad(pgd) (pgd_val(pgd) == 0) #define pgd_present(pgd) (pgd_val(pgd) != 0) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) #ifndef __ASSEMBLY__ +static inline void pgd_clear(pgd_t *pgdp) +{ + *pgdp = __pgd(0); +} + static inline pte_t pgd_pte(pgd_t pgd) { return __pte(pgd_val(pgd)); @@ -85,4 +89,4 @@ extern struct page *pgd_page(pgd_t pgd); #define remap_4k_pfn(vma, addr, pfn, prot) \ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) -#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */ +#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 1de35bbd02a6..570fb30be21c 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H -#define _ASM_POWERPC_PGTABLE_PPC64_64K_H +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H #include <asm-generic/pgtable-nopud.h> @@ -9,8 +9,19 @@ #define PUD_INDEX_SIZE 0 #define PGD_INDEX_SIZE 12 +/* + * we support 32 fragments per PTE page of 64K size + */ +#define PTE_FRAG_NR 32 +/* + * We use a 2K PTE page fragment and another 2K for storing + * real_pte_t hash index + */ +#define PTE_FRAG_SIZE_SHIFT 11 +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + #ifndef __ASSEMBLY__ -#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) +#define PTE_TABLE_SIZE PTE_FRAG_SIZE #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) #endif /* __ASSEMBLY__ */ @@ -32,13 +43,15 @@ #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -/* Bits to mask out from a PMD to get to the PTE page */ -/* PMDs point to PTE table fragments which are 4K aligned. */ -#define PMD_MASKED_BITS 0xfff +/* + * Bits to mask out from a PMD to get to the PTE page + * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. + */ +#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) /* Bits to mask out from a PGD/PUD to get to the PMD page */ #define PUD_MASKED_BITS 0x1ff #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) -#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ +#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 3245f2d96d4f..b9f734dd5b81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -1,14 +1,14 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ -#define _ASM_POWERPC_PGTABLE_PPC64_H_ +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_H /* * This file contains the functions and defines necessary to modify and use * the ppc64 hashed page table. */ #ifdef CONFIG_PPC_64K_PAGES -#include <asm/pgtable-ppc64-64k.h> +#include <asm/nohash/64/pgtable-64k.h> #else -#include <asm/pgtable-ppc64-4k.h> +#include <asm/nohash/64/pgtable-4k.h> #endif #include <asm/barrier.h> @@ -18,7 +18,7 @@ * Size of EA range mapped by our pagetables. */ #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -97,11 +97,7 @@ /* * Include the PTE bits definitions */ -#ifdef CONFIG_PPC_BOOK3S -#include <asm/pte-hash64.h> -#else -#include <asm/pte-book3e.h> -#endif +#include <asm/nohash/pte-book3e.h> #include <asm/pte-common.h> #ifdef CONFIG_PPC_MM_SLICES @@ -110,59 +106,47 @@ #endif /* CONFIG_PPC_MM_SLICES */ #ifndef __ASSEMBLY__ - -/* - * This is the default implementation of various PTE accessors, it's - * used in all cases except Book3S with 64K pages where we have a - * concept of sub-pages - */ -#ifndef __real_pte - -#ifdef CONFIG_STRICT_MM_TYPECHECKS -#define __real_pte(e,p) ((real_pte_t){(e)}) -#define __rpte_to_pte(r) ((r).pte) -#else -#define __real_pte(e,p) (e) -#define __rpte_to_pte(r) (__pte(r)) -#endif -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * We expect this to be called only for user addresses or kernel virtual - * addresses other than the linear mapping. - */ -#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K - -#endif /* __real_pte */ - - /* pte_clear moved to later in this file */ #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) -#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) +static inline void pmd_set(pmd_t *pmdp, unsigned long val) +{ + *pmdp = __pmd(val); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ || (pmd_val(pmd) & PMD_BAD_BITS)) #define pmd_present(pmd) (!pmd_none(pmd)) -#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) extern struct page *pmd_page(pmd_t pmd); -#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) +static inline void pud_set(pud_t *pudp, unsigned long val) +{ + *pudp = __pud(val); +} + +static inline void pud_clear(pud_t *pudp) +{ + *pudp = __pud(0); +} + #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ || (pud_val(pud) & PUD_BAD_BITS)) #define pud_present(pud) (pud_val(pud) != 0) -#define pud_clear(pudp) (pud_val(*(pudp)) = 0) #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) extern struct page *pud_page(pud_t pud); @@ -177,9 +161,13 @@ static inline pud_t pte_pud(pte_t pte) return __pud(pte_val(pte)); } #define pud_write(pud) pte_write(pud_pte(pud)) -#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) #define pgd_write(pgd) pte_write(pgd_pte(pgd)) +static inline void pgd_set(pgd_t *pgdp, unsigned long val) +{ + *pgdp = __pgd(val); +} + /* * Find an entry in a page-table-directory. We combine the address region * (the high order N bits) and the pgd portion of the address. @@ -373,254 +361,4 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); #endif /* __ASSEMBLY__ */ -/* - * THP pages can't be special. So use the _PAGE_SPECIAL - */ -#define _PAGE_SPLITTING _PAGE_SPECIAL - -/* - * We need to differentiate between explicit huge page and THP huge - * page, since THP huge page also need to track real subpage details - */ -#define _PAGE_THP_HUGE _PAGE_4K_PFN - -/* - * set of bits not changed in pmd_modify. - */ -#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ - _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ - _PAGE_THP_HUGE) - -#ifndef __ASSEMBLY__ -/* - * The linux hugepage PMD now include the pmd entries followed by the address - * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. - * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per - * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and - * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. - * - * The last three bits are intentionally left to zero. This memory location - * are also used as normal page PTE pointers. So if we have any pointers - * left around while we collapse a hugepage, we need to make sure - * _PAGE_PRESENT bit of that is zero when we look at them - */ -static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) -{ - return (hpte_slot_array[index] >> 3) & 0x1; -} - -static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, - int index) -{ - return hpte_slot_array[index] >> 4; -} - -static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, - unsigned int index, unsigned int hidx) -{ - hpte_slot_array[index] = hidx << 4 | 0x1 << 3; -} - -struct page *realmode_pfn_to_page(unsigned long pfn); - -static inline char *get_hpte_slot_array(pmd_t *pmdp) -{ - /* - * The hpte hindex is stored in the pgtable whose address is in the - * second half of the PMD - * - * Order this load with the test for pmd_trans_huge in the caller - */ - smp_rmb(); - return *(char **)(pmdp + PTRS_PER_PMD); - - -} - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long old_pmd); -extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); -extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); -extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); -extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd); -extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd); -/* - * - * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs - * page. The hugetlbfs page table walking and mangling paths are totally - * separated form the core VM paths and they're differentiated by - * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. - * - * pmd_trans_huge() is defined as false at build time if - * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build - * time in such case. - * - * For ppc64 we need to differntiate from explicit hugepages from THP, because - * for THP we also track the subpage details at the pmd level. We don't do - * that for explicit huge pages. - * - */ -static inline int pmd_trans_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); -} - -static inline int pmd_trans_splitting(pmd_t pmd) -{ - if (pmd_trans_huge(pmd)) - return pmd_val(pmd) & _PAGE_SPLITTING; - return 0; -} - -extern int has_transparent_hugepage(void); -#else -static inline void hpte_do_hugepage_flush(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp, - unsigned long old_pmd) -{ - - WARN(1, "%s called with THP disabled\n", __func__); -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -static inline int pmd_large(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return ((pmd_val(pmd) & 0x3) != 0x0); -} - -static inline pte_t pmd_pte(pmd_t pmd) -{ - return __pte(pmd_val(pmd)); -} - -static inline pmd_t pte_pmd(pte_t pte) -{ - return __pmd(pte_val(pte)); -} - -static inline pte_t *pmdp_ptep(pmd_t *pmd) -{ - return (pte_t *)pmd; -} - -#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) -#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) -#define pmd_young(pmd) pte_young(pmd_pte(pmd)) -#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) -#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) -#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) -#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) - -#define __HAVE_ARCH_PMD_WRITE -#define pmd_write(pmd) pte_write(pmd_pte(pmd)) - -static inline pmd_t pmd_mkhuge(pmd_t pmd) -{ - /* Do nothing, mk_pmd() does this part. */ - return pmd; -} - -static inline pmd_t pmd_mknotpresent(pmd_t pmd) -{ - pmd_val(pmd) &= ~_PAGE_PRESENT; - return pmd; -} - -static inline pmd_t pmd_mksplitting(pmd_t pmd) -{ - pmd_val(pmd) |= _PAGE_SPLITTING; - return pmd; -} - -#define __HAVE_ARCH_PMD_SAME -static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); -} - -#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS -extern int pmdp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp, - pmd_t entry, int dirty); - -extern unsigned long pmd_hugepage_update(struct mm_struct *mm, - unsigned long addr, - pmd_t *pmdp, - unsigned long clr, - unsigned long set); - -static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp) -{ - unsigned long old; - - if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) - return 0; - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); - return ((old & _PAGE_ACCESSED) != 0); -} - -#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG -extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH -extern int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR -extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_SET_WRPROTECT -static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp) -{ - - if ((pmd_val(*pmdp) & _PAGE_RW) == 0) - return; - - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); -} - -#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH -extern void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); - -extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#define pmdp_collapse_flush pmdp_collapse_flush - -#define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, - pgtable_t pgtable); -#define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_INVALIDATE -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); - -#define pmd_move_must_withdraw pmd_move_must_withdraw -struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl) -{ - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ +#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h new file mode 100644 index 000000000000..1263c22d60d8 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -0,0 +1,252 @@ +#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H +#define _ASM_POWERPC_NOHASH_PGTABLE_H + +#if defined(CONFIG_PPC64) +#include <asm/nohash/64/pgtable.h> +#else +#include <asm/nohash/32/pgtable.h> +#endif + +#ifndef __ASSEMBLY__ + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; +} +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/asm-generic/pgtable.h . On powerpc, this will only + * work for user pages and always return true for kernel pages. + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & + (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); } +static inline unsigned long pte_pfn(pte_t pte) { + return pte_val(pte) >> PTE_RPN_SHIFT; } + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_basic_t ptev; + + ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); + ptev |= _PAGE_RO; + return __pte(ptev); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_basic_t ptev; + + ptev = pte_val(pte) & ~_PAGE_RO; + ptev |= _PAGE_RW; + return __pte(ptev); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) + /* Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else + /* Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 32-bit PTEs. + */ + *ptep = pte; + +#ifdef CONFIG_PPC_BOOK3E_64 + /* + * With hardware tablewalk, a sync is needed to ensure that + * subsequent accesses see the PTE we just wrote. Unlike userspace + * mappings, we can't tolerate spurious faults, so make sure + * the new PTE will be seen the first time. + */ + if (is_kernel_addr(addr)) + mb(); +#endif +#endif +} + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT)) + +#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT | _PAGE_WRITETHRU)) + +#define pgprot_cached_noncoherent(prot) \ + (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) + +#define pgprot_writecombine pgprot_noncached_wc + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#ifdef CONFIG_HUGETLB_PAGE +static inline int hugepd_ok(hugepd_t hpd) +{ + return (hpd.pd > 0); +} + +static inline int pmd_huge(pmd_t pmd) +{ + return 0; +} + +static inline int pud_huge(pud_t pud) +{ + return 0; +} + +static inline int pgd_huge(pgd_t pgd) +{ + return 0; +} +#define pgd_huge pgd_huge + +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#endif + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index 8d8473278d91..e16807b78edf 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_BOOK3E_H -#define _ASM_POWERPC_PTE_BOOK3E_H +#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H +#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H #ifdef __KERNEL__ /* PTE bit definitions for processors compliant to the Book3E @@ -84,4 +84,4 @@ #endif #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 3140c19c448c..e34124f6fbf2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -286,8 +286,11 @@ extern long long virt_phys_offset; /* PTE level */ typedef struct { pte_basic_t pte; } pte_t; -#define pte_val(x) ((x).pte) #define __pte(x) ((pte_t) { (x) }) +static inline pte_basic_t pte_val(pte_t x) +{ + return x.pte; +} /* 64k pages additionally define a bigger "real PTE" type that gathers * the "second half" part of the PTE for pseudo 64k pages @@ -301,21 +304,30 @@ typedef struct { pte_t pte; } real_pte_t; /* PMD level */ #ifdef CONFIG_PPC64 typedef struct { unsigned long pmd; } pmd_t; -#define pmd_val(x) ((x).pmd) #define __pmd(x) ((pmd_t) { (x) }) +static inline unsigned long pmd_val(pmd_t x) +{ + return x.pmd; +} /* PUD level exusts only on 4k pages */ #ifndef CONFIG_PPC_64K_PAGES typedef struct { unsigned long pud; } pud_t; -#define pud_val(x) ((x).pud) #define __pud(x) ((pud_t) { (x) }) +static inline unsigned long pud_val(pud_t x) +{ + return x.pud; +} #endif /* !CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC64 */ /* PGD level */ typedef struct { unsigned long pgd; } pgd_t; -#define pgd_val(x) ((x).pgd) #define __pgd(x) ((pgd_t) { (x) }) +static inline unsigned long pgd_val(pgd_t x) +{ + return x.pgd; +} /* Page protection bits */ typedef struct { unsigned long pgprot; } pgprot_t; @@ -329,8 +341,11 @@ typedef struct { unsigned long pgprot; } pgprot_t; */ typedef pte_basic_t pte_t; -#define pte_val(x) (x) #define __pte(x) (x) +static inline pte_basic_t pte_val(pte_t pte) +{ + return pte; +} #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; @@ -341,67 +356,42 @@ typedef pte_t real_pte_t; #ifdef CONFIG_PPC64 typedef unsigned long pmd_t; -#define pmd_val(x) (x) #define __pmd(x) (x) +static inline unsigned long pmd_val(pmd_t pmd) +{ + return pmd; +} #ifndef CONFIG_PPC_64K_PAGES typedef unsigned long pud_t; -#define pud_val(x) (x) #define __pud(x) (x) +static inline unsigned long pud_val(pud_t pud) +{ + return pud; +} #endif /* !CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC64 */ typedef unsigned long pgd_t; -#define pgd_val(x) (x) -#define pgprot_val(x) (x) +#define __pgd(x) (x) +static inline unsigned long pgd_val(pgd_t pgd) +{ + return pgd; +} typedef unsigned long pgprot_t; -#define __pgd(x) (x) +#define pgprot_val(x) (x) #define __pgprot(x) (x) #endif typedef struct { signed long pd; } hugepd_t; -#ifdef CONFIG_HUGETLB_PAGE -#ifdef CONFIG_PPC_BOOK3S_64 -#ifdef CONFIG_PPC_64K_PAGES -/* - * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't - * need to setup hugepage directory for them. Our pte and page directory format - * enable us to have this enabled. But to avoid errors when implementing new - * features disable hugepd for 64K. We enable a debug version here, So we catch - * wrong usage. - */ -#ifdef CONFIG_DEBUG_VM -extern int hugepd_ok(hugepd_t hpd); -#else -#define hugepd_ok(x) (0) -#endif -#else -static inline int hugepd_ok(hugepd_t hpd) -{ - /* - * hugepd pointer, bottom two bits == 00 and next 4 bits - * indicate size of table - */ - return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); -} -#endif -#else -static inline int hugepd_ok(hugepd_t hpd) -{ - return (hpd.pd > 0); -} -#endif - -#define is_hugepd(hpd) (hugepd_ok(hpd)) -#define pgd_huge pgd_huge -int pgd_huge(pgd_t pgd); -#else /* CONFIG_HUGETLB_PAGE */ -#define is_hugepd(pdep) 0 -#define pgd_huge(pgd) 0 +#ifndef CONFIG_HUGETLB_PAGE +#define is_hugepd(pdep) (0) +#define pgd_huge(pgd) (0) #endif /* CONFIG_HUGETLB_PAGE */ + #define __hugepd(x) ((hugepd_t) { (x) }) struct page; diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 842846c1b711..76d6b9e0c8a9 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -21,16 +21,34 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE -#define pmd_populate_kernel(mm, pmd, pte) \ - (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) -#define pmd_populate(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, + pte_t *pte) +{ + *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pte_page) +{ + *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); +} + #define pmd_pgtable(pmd) pmd_page(pmd) #else -#define pmd_populate_kernel(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) -#define pmd_populate(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, + pte_t *pte) +{ + *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pte_page) +{ + *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); +} + #define pmd_pgtable(pmd) pmd_page(pmd) #endif diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 4b0be20fcbfd..69ef28a81733 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #ifndef CONFIG_PPC_64K_PAGES -#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) +#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -71,9 +71,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_set(pud, (unsigned long)pmd); } -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) -#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + pmd_set(pmd, (unsigned long)pte); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte_page) +{ + pmd_set(pmd, (unsigned long)page_address(pte_page)); +} + #define pmd_pgtable(pmd) pmd_page(pmd) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, @@ -154,16 +163,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, } #else /* if CONFIG_PPC_64K_PAGES */ -/* - * we support 16 fragments per PTE page. - */ -#define PTE_FRAG_NR 16 -/* - * We use a 2K PTE page fragment and another 2K for storing - * real_pte_t hash index - */ -#define PTE_FRAG_SIZE_SHIFT 12 -#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int); extern void page_table_free(struct mm_struct *, unsigned long *, int); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index b64b4212b71f..ac9fb114e25d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -1,6 +1,5 @@ #ifndef _ASM_POWERPC_PGTABLE_H #define _ASM_POWERPC_PGTABLE_H -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> @@ -13,210 +12,20 @@ struct mm_struct; #endif /* !__ASSEMBLY__ */ -#if defined(CONFIG_PPC64) -# include <asm/pgtable-ppc64.h> +#ifdef CONFIG_PPC_BOOK3S +#include <asm/book3s/pgtable.h> #else -# include <asm/pgtable-ppc32.h> -#endif - -/* - * We save the slot number & secondary bit in the second half of the - * PTE page. We use the 8 bytes per each pte entry. - */ -#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) +#include <asm/nohash/pgtable.h> +#endif /* !CONFIG_PPC_BOOK3S */ #ifndef __ASSEMBLY__ #include <asm/tlbflush.h> -/* Generic accessors to PTE bits */ -static inline int pte_write(pte_t pte) -{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } - -#ifdef CONFIG_NUMA_BALANCING -/* - * These work without NUMA balancing but the kernel does not care. See the - * comment in include/asm-generic/pgtable.h . On powerpc, this will only - * work for user pages and always return true for kernel pages. - */ -static inline int pte_protnone(pte_t pte) -{ - return (pte_val(pte) & - (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; -} - -static inline int pmd_protnone(pmd_t pmd) -{ - return pte_protnone(pmd_pte(pmd)); -} -#endif /* CONFIG_NUMA_BALANCING */ - -static inline int pte_present(pte_t pte) -{ - return pte_val(pte) & _PAGE_PRESENT; -} - -/* Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - * - * Even if PTEs can be unsigned long long, a PFN is always an unsigned - * long for now. - */ -static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { - return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | - pgprot_val(pgprot)); } -static inline unsigned long pte_pfn(pte_t pte) { - return pte_val(pte) >> PTE_RPN_SHIFT; } - /* Keep these as a macros to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -/* Generic modifiers for PTE bits */ -static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); - pte_val(pte) |= _PAGE_RO; return pte; } -static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } -static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) &= ~_PAGE_RO; - pte_val(pte) |= _PAGE_RW; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_DIRTY; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { - pte_val(pte) |= _PAGE_SPECIAL; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { - return pte; } -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); - return pte; -} - - -/* Insert a PTE, top-level function is out of line. It uses an inline - * low level function in the respective pgtable-* files - */ -extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pte); - -/* This low level function performs the actual PTE insertion - * Setting the PTE depends on the MMU type and other factors. It's - * an horrible mess that I'm not going to try to clean up now but - * I'm keeping it in one place rather than spread around - */ -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, int percpu) -{ -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) - /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the - * helper pte_update() which does an atomic update. We need to do that - * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a - * per-CPU PTE such as a kmap_atomic, we do a simple update preserving - * the hash bits instead (ie, same as the non-SMP case) - */ - if (percpu) - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - else - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); - -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) - /* Second case is 32-bit with 64-bit PTE. In this case, we - * can just store as long as we do the two halves in the right order - * with a barrier in between. This is possible because we take care, - * in the hash code, to pre-invalidate if the PTE was already hashed, - * which synchronizes us with any concurrent invalidation. - * In the percpu case, we also fallback to the simple update preserving - * the hash bits - */ - if (percpu) { - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - return; - } -#if _PAGE_HASHPTE != 0 - if (pte_val(*ptep) & _PAGE_HASHPTE) - flush_hash_entry(mm, ptep, addr); -#endif - __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ - eieio\n\ - stw%U0%X0 %L2,%1" - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) - : "r" (pte) : "memory"); - -#elif defined(CONFIG_PPC_STD_MMU_32) - /* Third case is 32-bit hash table in UP mode, we need to preserve - * the _PAGE_HASHPTE bit since we may not have invalidated the previous - * translation in the hash yet (done in a subsequent flush_tlb_xxx()) - * and see we need to keep track that this PTE needs invalidating - */ - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else - /* Anything else just stores the PTE normally. That covers all 64-bit - * cases, and 32-bit non-hash with 32-bit PTEs. - */ - *ptep = pte; - -#ifdef CONFIG_PPC_BOOK3E_64 - /* - * With hardware tablewalk, a sync is needed to ensure that - * subsequent accesses see the PTE we just wrote. Unlike userspace - * mappings, we can't tolerate spurious faults, so make sure - * the new PTE will be seen the first time. - */ - if (is_kernel_addr(addr)) - mb(); -#endif -#endif -} - - -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep, pte_t entry, int dirty); - -/* - * Macro to mark a page protection value as "uncacheable". - */ - -#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU) - -#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_NO_CACHE | _PAGE_GUARDED)) - -#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_NO_CACHE)) - -#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_COHERENT)) - -#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_COHERENT | _PAGE_WRITETHRU)) - -#define pgprot_cached_noncoherent(prot) \ - (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) - -#define pgprot_writecombine pgprot_noncached_wc - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -271,5 +80,4 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, } #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 67859edbf8fd..1b394247afc2 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -202,6 +202,23 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex, } /* + * ptes must be 8*sizeof(unsigned long) + */ +static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, + unsigned long *ptes) + +{ + long rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex); + + memcpy(ptes, retbuf, 8*sizeof(unsigned long)); + + return rc; +} + +/* * plpar_pte_read_4_raw can be called in real mode. * ptes must be 8*sizeof(unsigned long) */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 5afea361beaa..ac2330820b9a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -88,12 +88,6 @@ struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); -/* Lazy FPU handling on uni-processor */ -extern struct task_struct *last_task_used_math; -extern struct task_struct *last_task_used_altivec; -extern struct task_struct *last_task_used_vsx; -extern struct task_struct *last_task_used_spe; - #ifdef CONFIG_PPC32 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START @@ -294,6 +288,7 @@ struct thread_struct { #endif #ifdef CONFIG_PPC64 unsigned long dscr; + unsigned long fscr; /* * This member element dscr_inherit indicates that the process * has explicitly attempted and changed the DSCR register value @@ -385,8 +380,6 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); -extern void fp_enable(void); -extern void vec_enable(void); extern void load_fp_state(struct thread_fp_state *fp); extern void store_fp_state(struct thread_fp_state *fp); extern void load_vr_state(struct thread_vr_state *vr); diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 71537a319fc8..1ec67b043065 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -40,6 +40,11 @@ #else #define _PAGE_RW 0 #endif + +#ifndef _PAGE_PTE +#define _PAGE_PTE 0 +#endif + #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h deleted file mode 100644 index c134e809aac3..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-4k.h +++ /dev/null @@ -1,17 +0,0 @@ -/* To be include by pgtable-hash64.h only */ - -/* PTE bits */ -#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ -#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ -#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ -#define _PAGE_F_SECOND _PAGE_SECONDARY -#define _PAGE_F_GIX _PAGE_GROUP_IX -#define _PAGE_SPECIAL 0x10000 /* software: special page */ - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ - _PAGE_SECONDARY | _PAGE_GROUP_IX) - -/* shift to put page number into pte */ -#define PTE_RPN_SHIFT (17) - diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h deleted file mode 100644 index 4f4ec2ab45c9..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ /dev/null @@ -1,102 +0,0 @@ -/* To be include by pgtable-hash64.h only */ - -/* Additional PTE bits (don't change without checking asm in hash_low.S) */ -#define _PAGE_SPECIAL 0x00000400 /* software: special page */ -#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ -#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ -#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ -#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ - -/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, - * we set that to be the whole sub-bits mask. The C code will only - * test this, so a multi-bit mask will work. For combo pages, this - * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of - * all the sub bits. For real 64k pages, we now have the assembly set - * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap - * that mask. This is fine as long as the HIDX bits are never set on - * a PTE that isn't hashed, which is the case today. - * - * A little nit is for the huge page C code, which does the hashing - * in C, we need to provide which bit to use. - */ -#define _PAGE_HASHPTE _PAGE_HPTE_SUB - -/* Note the full page bits must be in the same location as for normal - * 4k pages as the same assembly will be used to insert 64K pages - * whether the kernel has CONFIG_PPC_64K_PAGES or not - */ -#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ -#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) - -/* Shift to put page number into pte. - * - * That gives us a max RPN of 34 bits, which means a max of 50 bits - * of addressable physical space, or 46 bits for the special 4k PFNs. - */ -#define PTE_RPN_SHIFT (30) - -#ifndef __ASSEMBLY__ - -/* - * With 64K pages on hash table, we have a special PTE format that - * uses a second "half" of the page table to encode sub-page information - * in order to deal with 64K made of 4K HW pages. Thus we override the - * generic accessors and iterators here - */ -#define __real_pte __real_pte -static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) -{ - real_pte_t rpte; - - rpte.pte = pte; - rpte.hidx = 0; - if (pte_val(pte) & _PAGE_COMBO) { - /* - * Make sure we order the hidx load against the _PAGE_COMBO - * check. The store side ordering is done in __hash_page_4K - */ - smp_rmb(); - rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); - } - return rpte; -} - -static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) -{ - if ((pte_val(rpte.pte) & _PAGE_COMBO)) - return (rpte.hidx >> (index<<2)) & 0xf; - return (pte_val(rpte.pte) >> 12) & 0xf; -} - -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_sub_valid(rpte, index) \ - (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) - -/* Trick: we set __end to va + 64k, which happens works for - * a 16M page as well as we want only one iteration - */ -#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ - do { \ - unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ - unsigned __split = (psize == MMU_PAGE_4K || \ - psize == MMU_PAGE_64K_AP); \ - shift = mmu_psize_defs[psize].shift; \ - for (index = 0; vpn < __end; index++, \ - vpn += (1L << (shift - VPN_SHIFT))) { \ - if (!__split || __rpte_sub_valid(rpte, index)) \ - do { - -#define pte_iterate_hashed_end() } while(0); } } while(0) - -#define pte_pagesize_index(mm, addr, pte) \ - (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) - -#define remap_4k_pfn(vma, addr, pfn, prot) \ - (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ - remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ - __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) - -#endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h deleted file mode 100644 index ef612c160da7..000000000000 --- a/arch/powerpc/include/asm/pte-hash64.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef _ASM_POWERPC_PTE_HASH64_H -#define _ASM_POWERPC_PTE_HASH64_H -#ifdef __KERNEL__ - -/* - * Common bits between 4K and 64K pages in a linux-style PTE. - * These match the bits in the (hardware-defined) PowerPC PTE as closely - * as possible. Additional bits may be defined in pgtable-hash64-*.h - * - * Note: We only support user read/write permissions. Supervisor always - * have full read/write to pages above PAGE_OFFSET (pages below that - * always use the user access permissions). - * - * We could create separate kernel read-only if we used the 3 PP bits - * combinations that newer processors provide but we currently don't. - */ -#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ -#define _PAGE_USER 0x0002 /* matches one of the PP bits */ -#define _PAGE_BIT_SWAP_TYPE 2 -#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ -#define _PAGE_GUARDED 0x0008 -/* We can derive Memory coherence from _PAGE_NO_CACHE */ -#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x0080 /* C: page changed */ -#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ -#define _PAGE_RW 0x0200 /* software: user write access allowed */ -#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ - -/* No separate kernel read-only */ -#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ -#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW - -/* Strong Access Ordering */ -#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -/* PTEIDX nibble */ -#define _PTEIDX_SECONDARY 0x8 -#define _PTEIDX_GROUP_IX 0x7 - -/* Hash table based platforms need atomic updates of the linux PTE */ -#define PTE_ATOMIC_UPDATES 1 - -#ifdef CONFIG_PPC_64K_PAGES -#include <asm/pte-hash64-64k.h> -#else -#include <asm/pte-hash64-4k.h> -#endif - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_HASH64_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2220f7a60def..c4cb2ffc624e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1194,12 +1194,20 @@ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ : : "r" (v) : "memory") #define mtmsr(v) __mtmsrd((v), 0) +#define __MTMSR "mtmsrd" #else #define mtmsr(v) asm volatile("mtmsr %0" : \ : "r" ((unsigned long)(v)) \ : "memory") +#define __MTMSR "mtmsr" #endif +static inline void mtmsr_isync(unsigned long val) +{ + asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : : + "r" (val), "i" (CPU_FTR_ARCH_206) : "memory"); +} + #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) @@ -1207,6 +1215,15 @@ : "r" ((unsigned long)(v)) \ : "memory") +extern void msr_check_and_set(unsigned long bits); +extern bool strict_msr_control; +extern void __msr_check_and_clear(unsigned long bits); +static inline void msr_check_and_clear(unsigned long bits) +{ + if (strict_msr_control) + __msr_check_and_clear(bits); +} + static inline unsigned long mfvtb (void) { #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 15cca17cba4b..5b268b6be74c 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -4,6 +4,8 @@ #ifndef _ASM_POWERPC_SWITCH_TO_H #define _ASM_POWERPC_SWITCH_TO_H +#include <asm/reg.h> + struct thread_struct; struct task_struct; struct pt_regs; @@ -12,74 +14,59 @@ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) -struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); -#ifdef CONFIG_PPC_BOOK3S_64 -static inline void save_early_sprs(struct thread_struct *prev) -{ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - prev->tar = mfspr(SPRN_TAR); - if (cpu_has_feature(CPU_FTR_DSCR)) - prev->dscr = mfspr(SPRN_DSCR); -} -#else -static inline void save_early_sprs(struct thread_struct *prev) {} -#endif -extern void enable_kernel_fp(void); -extern void enable_kernel_altivec(void); -extern void enable_kernel_vsx(void); -extern int emulate_altivec(struct pt_regs *); -extern void __giveup_vsx(struct task_struct *); -extern void giveup_vsx(struct task_struct *); -extern void enable_kernel_spe(void); -extern void giveup_spe(struct task_struct *); -extern void load_up_spe(struct task_struct *); extern void switch_booke_debug_regs(struct debug_reg *new_debug); -#ifndef CONFIG_SMP -extern void discard_lazy_cpu_state(void); -#else -static inline void discard_lazy_cpu_state(void) -{ -} -#endif +extern int emulate_altivec(struct pt_regs *); + +extern void flush_all_to_thread(struct task_struct *); +extern void giveup_all(struct task_struct *); #ifdef CONFIG_PPC_FPU +extern void enable_kernel_fp(void); extern void flush_fp_to_thread(struct task_struct *); extern void giveup_fpu(struct task_struct *); +extern void __giveup_fpu(struct task_struct *); +static inline void disable_kernel_fp(void) +{ + msr_check_and_clear(MSR_FP); +} #else static inline void flush_fp_to_thread(struct task_struct *t) { } -static inline void giveup_fpu(struct task_struct *t) { } #endif #ifdef CONFIG_ALTIVEC +extern void enable_kernel_altivec(void); extern void flush_altivec_to_thread(struct task_struct *); extern void giveup_altivec(struct task_struct *); -extern void giveup_altivec_notask(void); -#else -static inline void flush_altivec_to_thread(struct task_struct *t) -{ -} -static inline void giveup_altivec(struct task_struct *t) +extern void __giveup_altivec(struct task_struct *); +static inline void disable_kernel_altivec(void) { + msr_check_and_clear(MSR_VEC); } #endif #ifdef CONFIG_VSX +extern void enable_kernel_vsx(void); extern void flush_vsx_to_thread(struct task_struct *); -#else -static inline void flush_vsx_to_thread(struct task_struct *t) +extern void giveup_vsx(struct task_struct *); +extern void __giveup_vsx(struct task_struct *); +static inline void disable_kernel_vsx(void) { + msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); } #endif #ifdef CONFIG_SPE +extern void enable_kernel_spe(void); extern void flush_spe_to_thread(struct task_struct *); -#else -static inline void flush_spe_to_thread(struct task_struct *t) +extern void giveup_spe(struct task_struct *); +extern void __giveup_spe(struct task_struct *); +static inline void disable_kernel_spe(void) { + msr_check_and_clear(MSR_SPE); } #endif diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index e682a7143edb..c50868681f9e 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -44,7 +44,7 @@ static inline void isync(void) MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" -#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n" #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" #else #define PPC_ACQUIRE_BARRIER diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4b6b8ace18e0..6a5ace5fa0c8 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,10 +12,9 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 379 +#define NR_syscalls 379 #define __NR__exit __NR_exit -#define NR_syscalls __NR_syscalls #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index b73a8199f161..1afe90ade595 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -41,7 +41,7 @@ #include <linux/unistd.h> #include <linux/time.h> -#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) +#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) /* * So here is the ppc64 backward compatible version diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 86150fbb42c3..8e7cb8e2b21a 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs) preempt_disable(); enable_kernel_fp(); cvt_df(&data.dd, (float *)&data.x32.low32); + disable_kernel_fp(); preempt_enable(); #else return 0; @@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs) preempt_disable(); enable_kernel_fp(); cvt_fd((float *)&data.x32.low32, &data.dd); + disable_kernel_fp(); preempt_enable(); #else return 0; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index a94f155db78e..c8b4225a0095 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -452,43 +452,11 @@ _GLOBAL(_switch) /* r3-r13 are caller saved -- Cort */ SAVE_8GPRS(14, r1) SAVE_10GPRS(22, r1) - mflr r20 /* Return to switch caller */ - mfmsr r22 - li r0, MSR_FP -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - oris r0,r0,MSR_VSX@h /* Disable VSX */ -END_FTR_SECTION_IFSET(CPU_FTR_VSX) -#endif /* CONFIG_VSX */ -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - oris r0,r0,MSR_VEC@h /* Disable altivec */ - mfspr r24,SPRN_VRSAVE /* save vrsave register value */ - std r24,THREAD_VRSAVE(r3) -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ - and. r0,r0,r22 - beq+ 1f - andc r22,r22,r0 - MTMSRD(r22) - isync -1: std r20,_NIP(r1) + std r0,_NIP(r1) /* Return to switch caller */ mfcr r23 std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */ -#ifdef CONFIG_PPC_BOOK3S_64 -BEGIN_FTR_SECTION - /* Event based branch registers */ - mfspr r0, SPRN_BESCR - std r0, THREAD_BESCR(r3) - mfspr r0, SPRN_EBBHR - std r0, THREAD_EBBHR(r3) - mfspr r0, SPRN_EBBRR - std r0, THREAD_EBBRR(r3) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) -#endif - #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all @@ -576,47 +544,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) -#ifdef CONFIG_PPC_BOOK3S_64 -BEGIN_FTR_SECTION - /* Event based branch registers */ - ld r0, THREAD_BESCR(r4) - mtspr SPRN_BESCR, r0 - ld r0, THREAD_EBBHR(r4) - mtspr SPRN_EBBHR, r0 - ld r0, THREAD_EBBRR(r4) - mtspr SPRN_EBBRR, r0 - - ld r0,THREAD_TAR(r4) - mtspr SPRN_TAR,r0 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) -#endif - -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - ld r0,THREAD_VRSAVE(r4) - mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_PPC64 -BEGIN_FTR_SECTION - lwz r6,THREAD_DSCR_INHERIT(r4) - ld r0,THREAD_DSCR(r4) - cmpwi r6,0 - bne 1f - ld r0,PACA_DSCR_DEFAULT(r13) -1: -BEGIN_FTR_SECTION_NESTED(70) - mfspr r8, SPRN_FSCR - rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG) - mtspr SPRN_FSCR, r8 -END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) - cmpd r0,r25 - beq 2f - mtspr SPRN_DSCR,r0 -2: -END_FTR_SECTION_IFSET(CPU_FTR_DSCR) -#endif - ld r6,_CCR(r1) mtcrf 0xFF,r6 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0a0399c2af11..3419cbf2ad59 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -242,7 +242,7 @@ instruction_access_slb_pSeries: HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) - EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ #ifdef __DISABLED__ @@ -276,18 +276,18 @@ hardware_interrupt_hv: KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) FTR_SECTION_ELSE _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, - EXC_STD, SOFTEN_TEST_HV_201) + EXC_STD, SOFTEN_TEST_PR) KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600) STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700) STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800) . = 0x900 .globl decrementer_pSeries @@ -297,10 +297,10 @@ decrementer_pSeries: STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00) STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00) . = 0xc00 .globl system_call_pSeries @@ -332,7 +332,7 @@ system_call_pSeries: KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00) /* At 0xe??? we have a bunch of hypervisor exceptions, we branch * out of line to handle them @@ -408,7 +408,7 @@ hv_facility_unavailable_trampoline: #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300) . = 0x1500 .global denorm_exception_hv @@ -436,7 +436,7 @@ denorm_exception_hv: #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700) #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) @@ -536,9 +536,9 @@ machine_check_pSeries_0: KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) - KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400) + KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900) KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) #ifdef CONFIG_PPC_DENORMALISATION @@ -621,13 +621,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) /* moved from 0xf00 */ STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00) STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20) STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40) STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) - KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60) STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) @@ -1556,29 +1556,19 @@ do_hash_page: lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ bne 77f /* then don't call hash_page now */ - /* - * We need to set the _PAGE_USER bit if MSR_PR is set or if we are - * accessing a userspace segment (even from the kernel). We assume - * kernel addresses always have the high bit set. - */ - rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ - rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ - orc r0,r12,r0 /* MSR_PR | ~high_bit */ - rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ - ori r4,r4,1 /* add _PAGE_PRESENT */ - rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ /* * r3 contains the faulting address - * r4 contains the required access permissions + * r4 msr * r5 contains the trap number * r6 contains dsisr * * at return r3 = 0 for success, 1 for page fault, negative for error */ + mr r4,r12 ld r6,_DSISR(r1) - bl hash_page /* build HPTE if possible */ - cmpdi r3,0 /* see if hash_page succeeded */ + bl __hash_page /* build HPTE if possible */ + cmpdi r3,0 /* see if __hash_page succeeded */ /* Success */ beq fast_exc_return_irq /* Return from exception on success */ diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 9ad236e5d2c9..2117eaca3d28 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -73,30 +73,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R7) - /* FP/VSX off again */ - MTMSRD(r6) - SYNC - blr #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* - * Enable use of the FPU, and VSX if possible, for the caller. - */ -_GLOBAL(fp_enable) - mfmsr r3 - ori r3,r3,MSR_FP -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - oris r3,r3,MSR_VSX@h -END_FTR_SECTION_IFSET(CPU_FTR_VSX) -#endif - SYNC - MTMSRD(r3) - isync /* (not necessary for arch 2.02 and later) */ - blr - -/* * Load state from memory into FP registers including FPSCR. * Assumes the caller has enabled FP in the MSR. */ @@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) SYNC MTMSRD(r5) /* enable use of fpu now */ isync -/* - * For SMP, we don't do lazy FPU switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_fpu in switch_to. - */ -#ifndef CONFIG_SMP - LOAD_REG_ADDRBASE(r3, last_task_used_math) - toreal(r3) - PPC_LL r4,ADDROFF(last_task_used_math)(r3) - PPC_LCMPI 0,r4,0 - beq 1f - toreal(r4) - addi r4,r4,THREAD /* want last_task_used_math->thread */ - addi r10,r4,THREAD_FPSTATE - SAVE_32FPVSRS(0, R5, R10) - mffs fr0 - stfd fr0,FPSTATE_FPSCR(r10) - PPC_LL r5,PT_REGS(r4) - toreal(r5) - PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r10,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r10 /* disable FP for previous task */ - PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ /* enable use of FP after return */ #ifdef CONFIG_PPC32 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ @@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) lfd fr0,FPSTATE_FPSCR(r10) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R10) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - fromreal(r4) - PPC_STL r4,ADDROFF(last_task_used_math)(r3) -#endif /* CONFIG_SMP */ /* restore registers and return */ /* we haven't used ctr or xer or lr */ blr /* - * giveup_fpu(tsk) + * __giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */ -_GLOBAL(giveup_fpu) - mfmsr r5 - ori r5,r5,MSR_FP -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - oris r5,r5,MSR_VSX@h -END_FTR_SECTION_IFSET(CPU_FTR_VSX) -#endif - SYNC_601 - ISYNC_601 - MTMSRD(r5) /* enable use of fpu now */ - SYNC_601 - isync - PPC_LCMPI 0,r3,0 - beqlr- /* if no previous owner, done */ +_GLOBAL(__giveup_fpu) addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r6,THREAD_FPSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) @@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) andc r4,r4,r3 /* disable FP for previous task */ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: -#ifndef CONFIG_SMP - li r5,0 - LOAD_REG_ADDRBASE(r4,last_task_used_math) - PPC_STL r5,ADDROFF(last_task_used_math)(r4) -#endif /* CONFIG_SMP */ blr /* diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index fffd1f96bb1d..f705171b924b 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -857,29 +857,6 @@ _GLOBAL(load_up_spe) oris r5,r5,MSR_SPE@h mtmsr r5 /* enable use of SPE now */ isync -/* - * For SMP, we don't do lazy SPE switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_spe in switch_to. - */ -#ifndef CONFIG_SMP - lis r3,last_task_used_spe@ha - lwz r4,last_task_used_spe@l(r3) - cmpi 0,r4,0 - beq 1f - addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ - SAVE_32EVRS(0,r10,r4,THREAD_EVR0) - evxor evr10, evr10, evr10 /* clear out evr10 */ - evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ - li r5,THREAD_ACC - evstddx evr10, r4, r5 /* save off accumulator */ - lwz r5,PT_REGS(r4) - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r10,MSR_SPE@h - andc r4,r4,r10 /* disable SPE for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* !CONFIG_SMP */ /* enable use of SPE after return */ oris r9,r9,MSR_SPE@h mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ @@ -889,10 +866,6 @@ _GLOBAL(load_up_spe) evlddx evr4,r10,r5 evmra evr4,evr4 REST_32EVRS(0,r10,r5,THREAD_EVR0) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - stw r4,last_task_used_spe@l(r3) -#endif /* !CONFIG_SMP */ blr /* @@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors) #ifdef CONFIG_SPE /* - * extern void giveup_spe(struct task_struct *prev) + * extern void __giveup_spe(struct task_struct *prev) * */ -_GLOBAL(giveup_spe) - mfmsr r5 - oris r5,r5,MSR_SPE@h - mtmsr r5 /* enable use of SPE now */ - isync - cmpi 0,r3,0 - beqlr- /* if no previous owner, done */ +_GLOBAL(__giveup_spe) addi r3,r3,THREAD /* want THREAD of task */ lwz r5,PT_REGS(r3) cmpi 0,r5,0 @@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe) andc r4,r4,r3 /* disable SPE for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_spe@ha - stw r5,last_task_used_spe@l(r4) -#endif /* !CONFIG_SMP */ blr #endif /* CONFIG_SPE */ diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 112ccf497562..cf4fb5429cf1 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common) std r0,_LINK(r1) std r0,_NIP(r1) -#ifndef CONFIG_SMP - /* Make sure FPU, VSX etc... are flushed as we may lose - * state when going to nap mode - */ - bl discard_lazy_cpu_state -#endif /* CONFIG_SMP */ - /* Hard disable interrupts */ mfmsr r9 rldicl r9,r9,48,1 diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 202963ee013a..41e1607e800c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount); #endif #ifdef CONFIG_PPC_FPU -EXPORT_SYMBOL(giveup_fpu); EXPORT_SYMBOL(load_fp_state); EXPORT_SYMBOL(store_fp_state); #endif #ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL(giveup_altivec); EXPORT_SYMBOL(load_vr_state); EXPORT_SYMBOL(store_vr_state); #endif @@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state); EXPORT_SYMBOL_GPL(__giveup_vsx); #endif -#ifdef CONFIG_SPE -EXPORT_SYMBOL(giveup_spe); -#endif - #ifdef CONFIG_EPAPR_PARAVIRT EXPORT_SYMBOL(epapr_hypercall_start); #endif diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 646bf4d222c1..dccc87e8fee5 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -67,15 +67,8 @@ extern unsigned long _get_SP(void); -#ifndef CONFIG_SMP -struct task_struct *last_task_used_math = NULL; -struct task_struct *last_task_used_altivec = NULL; -struct task_struct *last_task_used_vsx = NULL; -struct task_struct *last_task_used_spe = NULL; -#endif - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -void giveup_fpu_maybe_transactional(struct task_struct *tsk) +static void check_if_tm_restore_required(struct task_struct *tsk) { /* * If we are saving the current thread's registers, and the @@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk) tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; set_thread_flag(TIF_RESTORE_TM); } +} +#else +static inline void check_if_tm_restore_required(struct task_struct *tsk) { } +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + +bool strict_msr_control; +EXPORT_SYMBOL(strict_msr_control); + +static int __init enable_strict_msr_control(char *str) +{ + strict_msr_control = true; + pr_info("Enabling strict facility control\n"); - giveup_fpu(tsk); + return 0; } +early_param("ppc_strict_facility_enable", enable_strict_msr_control); -void giveup_altivec_maybe_transactional(struct task_struct *tsk) +void msr_check_and_set(unsigned long bits) { - /* - * If we are saving the current thread's registers, and the - * thread is in a transactional state, set the TIF_RESTORE_TM - * bit so that we know to restore the registers before - * returning to userspace. - */ - if (tsk == current && tsk->thread.regs && - MSR_TM_ACTIVE(tsk->thread.regs->msr) && - !test_thread_flag(TIF_RESTORE_TM)) { - tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; - set_thread_flag(TIF_RESTORE_TM); - } + unsigned long oldmsr = mfmsr(); + unsigned long newmsr; - giveup_altivec(tsk); + newmsr = oldmsr | bits; + +#ifdef CONFIG_VSX + if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) + newmsr |= MSR_VSX; +#endif + + if (oldmsr != newmsr) + mtmsr_isync(newmsr); } -#else -#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) -#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) -#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +void __msr_check_and_clear(unsigned long bits) +{ + unsigned long oldmsr = mfmsr(); + unsigned long newmsr; + + newmsr = oldmsr & ~bits; + +#ifdef CONFIG_VSX + if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) + newmsr &= ~MSR_VSX; +#endif + + if (oldmsr != newmsr) + mtmsr_isync(newmsr); +} +EXPORT_SYMBOL(__msr_check_and_clear); #ifdef CONFIG_PPC_FPU +void giveup_fpu(struct task_struct *tsk) +{ + check_if_tm_restore_required(tsk); + + msr_check_and_set(MSR_FP); + __giveup_fpu(tsk); + msr_check_and_clear(MSR_FP); +} +EXPORT_SYMBOL(giveup_fpu); + /* * Make sure the floating-point register state in the * the thread_struct is up to date for task tsk. @@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk) */ preempt_disable(); if (tsk->thread.regs->msr & MSR_FP) { -#ifdef CONFIG_SMP /* * This should only ever be called for current or * for a stopped child process. Since we save away - * the FP register state on context switch on SMP, + * the FP register state on context switch, * there is something wrong if a stopped child appears * to still have its FP state in the CPU registers. */ BUG_ON(tsk != current); -#endif - giveup_fpu_maybe_transactional(tsk); + giveup_fpu(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_fp_to_thread); -#endif /* CONFIG_PPC_FPU */ void enable_kernel_fp(void) { WARN_ON(preemptible()); -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) - giveup_fpu_maybe_transactional(current); - else - giveup_fpu(NULL); /* just enables FP for kernel */ -#else - giveup_fpu_maybe_transactional(last_task_used_math); -#endif /* CONFIG_SMP */ + msr_check_and_set(MSR_FP); + + if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { + check_if_tm_restore_required(current); + __giveup_fpu(current); + } } EXPORT_SYMBOL(enable_kernel_fp); +#endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC +void giveup_altivec(struct task_struct *tsk) +{ + check_if_tm_restore_required(tsk); + + msr_check_and_set(MSR_VEC); + __giveup_altivec(tsk); + msr_check_and_clear(MSR_VEC); +} +EXPORT_SYMBOL(giveup_altivec); + void enable_kernel_altivec(void) { WARN_ON(preemptible()); -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) - giveup_altivec_maybe_transactional(current); - else - giveup_altivec_notask(); -#else - giveup_altivec_maybe_transactional(last_task_used_altivec); -#endif /* CONFIG_SMP */ + msr_check_and_set(MSR_VEC); + + if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { + check_if_tm_restore_required(current); + __giveup_altivec(current); + } } EXPORT_SYMBOL(enable_kernel_altivec); @@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk) if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VEC) { -#ifdef CONFIG_SMP BUG_ON(tsk != current); -#endif - giveup_altivec_maybe_transactional(tsk); + giveup_altivec(tsk); } preempt_enable(); } @@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX -void enable_kernel_vsx(void) +void giveup_vsx(struct task_struct *tsk) { - WARN_ON(preemptible()); + check_if_tm_restore_required(tsk); -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) - giveup_vsx(current); - else - giveup_vsx(NULL); /* just enable vsx for kernel - force */ -#else - giveup_vsx(last_task_used_vsx); -#endif /* CONFIG_SMP */ + msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); + if (tsk->thread.regs->msr & MSR_FP) + __giveup_fpu(tsk); + if (tsk->thread.regs->msr & MSR_VEC) + __giveup_altivec(tsk); + __giveup_vsx(tsk); + msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); } -EXPORT_SYMBOL(enable_kernel_vsx); +EXPORT_SYMBOL(giveup_vsx); -void giveup_vsx(struct task_struct *tsk) +void enable_kernel_vsx(void) { - giveup_fpu_maybe_transactional(tsk); - giveup_altivec_maybe_transactional(tsk); - __giveup_vsx(tsk); + WARN_ON(preemptible()); + + msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); + + if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { + check_if_tm_restore_required(current); + if (current->thread.regs->msr & MSR_FP) + __giveup_fpu(current); + if (current->thread.regs->msr & MSR_VEC) + __giveup_altivec(current); + __giveup_vsx(current); + } } -EXPORT_SYMBOL(giveup_vsx); +EXPORT_SYMBOL(enable_kernel_vsx); void flush_vsx_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VSX) { -#ifdef CONFIG_SMP BUG_ON(tsk != current); -#endif giveup_vsx(tsk); } preempt_enable(); @@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE +void giveup_spe(struct task_struct *tsk) +{ + check_if_tm_restore_required(tsk); + + msr_check_and_set(MSR_SPE); + __giveup_spe(tsk); + msr_check_and_clear(MSR_SPE); +} +EXPORT_SYMBOL(giveup_spe); void enable_kernel_spe(void) { WARN_ON(preemptible()); -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) - giveup_spe(current); - else - giveup_spe(NULL); /* just enable SPE for kernel - force */ -#else - giveup_spe(last_task_used_spe); -#endif /* __SMP __ */ + msr_check_and_set(MSR_SPE); + + if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { + check_if_tm_restore_required(current); + __giveup_spe(current); + } } EXPORT_SYMBOL(enable_kernel_spe); @@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk) if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_SPE) { -#ifdef CONFIG_SMP BUG_ON(tsk != current); -#endif tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); giveup_spe(tsk); } @@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk) } #endif /* CONFIG_SPE */ -#ifndef CONFIG_SMP -/* - * If we are doing lazy switching of CPU state (FP, altivec or SPE), - * and the current task has some state, discard it. - */ -void discard_lazy_cpu_state(void) +static unsigned long msr_all_available; + +static int __init init_msr_all_available(void) { - preempt_disable(); - if (last_task_used_math == current) - last_task_used_math = NULL; +#ifdef CONFIG_PPC_FPU + msr_all_available |= MSR_FP; +#endif #ifdef CONFIG_ALTIVEC - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#endif /* CONFIG_ALTIVEC */ + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr_all_available |= MSR_VEC; +#endif #ifdef CONFIG_VSX - if (last_task_used_vsx == current) - last_task_used_vsx = NULL; -#endif /* CONFIG_VSX */ + if (cpu_has_feature(CPU_FTR_VSX)) + msr_all_available |= MSR_VSX; +#endif #ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; + if (cpu_has_feature(CPU_FTR_SPE)) + msr_all_available |= MSR_SPE; #endif - preempt_enable(); + + return 0; +} +early_initcall(init_msr_all_available); + +void giveup_all(struct task_struct *tsk) +{ + unsigned long usermsr; + + if (!tsk->thread.regs) + return; + + usermsr = tsk->thread.regs->msr; + + if ((usermsr & msr_all_available) == 0) + return; + + msr_check_and_set(msr_all_available); + +#ifdef CONFIG_PPC_FPU + if (usermsr & MSR_FP) + __giveup_fpu(tsk); +#endif +#ifdef CONFIG_ALTIVEC + if (usermsr & MSR_VEC) + __giveup_altivec(tsk); +#endif +#ifdef CONFIG_VSX + if (usermsr & MSR_VSX) + __giveup_vsx(tsk); +#endif +#ifdef CONFIG_SPE + if (usermsr & MSR_SPE) + __giveup_spe(tsk); +#endif + + msr_check_and_clear(msr_all_available); +} +EXPORT_SYMBOL(giveup_all); + +void flush_all_to_thread(struct task_struct *tsk) +{ + if (tsk->thread.regs) { + preempt_disable(); + BUG_ON(tsk != current); + giveup_all(tsk); + +#ifdef CONFIG_SPE + if (tsk->thread.regs->msr & MSR_SPE) + tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); +#endif + + preempt_enable(); + } } -#endif /* CONFIG_SMP */ +EXPORT_SYMBOL(flush_all_to_thread); #ifdef CONFIG_PPC_ADV_DEBUG_REGS void do_send_trap(struct pt_regs *regs, unsigned long address, @@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs) msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; if (msr_diff & MSR_FP) { - fp_enable(); + msr_check_and_set(MSR_FP); load_fp_state(¤t->thread.fp_state); + msr_check_and_clear(MSR_FP); regs->msr |= current->thread.fpexc_mode; } if (msr_diff & MSR_VEC) { - vec_enable(); + msr_check_and_set(MSR_VEC); load_vr_state(¤t->thread.vr_state); + msr_check_and_clear(MSR_VEC); } regs->msr |= msr_diff; } @@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs) #define __switch_to_tm(prev) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *new) +static inline void save_sprs(struct thread_struct *t) { - struct thread_struct *new_thread, *old_thread; - struct task_struct *last; -#ifdef CONFIG_PPC_BOOK3S_64 - struct ppc64_tlb_batch *batch; +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC))) + t->vrsave = mfspr(SPRN_VRSAVE); #endif +#ifdef CONFIG_PPC_BOOK3S_64 + if (cpu_has_feature(CPU_FTR_DSCR)) + t->dscr = mfspr(SPRN_DSCR); - WARN_ON(!irqs_disabled()); + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + t->bescr = mfspr(SPRN_BESCR); + t->ebbhr = mfspr(SPRN_EBBHR); + t->ebbrr = mfspr(SPRN_EBBRR); - /* Back up the TAR and DSCR across context switches. - * Note that the TAR is not available for use in the kernel. (To - * provide this, the TAR should be backed up/restored on exception - * entry/exit instead, and be in pt_regs. FIXME, this should be in - * pt_regs anyway (for debug).) - * Save the TAR and DSCR here before we do treclaim/trecheckpoint as - * these will change them. - */ - save_early_sprs(&prev->thread); + t->fscr = mfspr(SPRN_FSCR); - __switch_to_tm(prev); + /* + * Note that the TAR is not available for use in the kernel. + * (To provide this, the TAR should be backed up/restored on + * exception entry/exit instead, and be in pt_regs. FIXME, + * this should be in pt_regs anyway (for debug).) + */ + t->tar = mfspr(SPRN_TAR); + } +#endif +} -#ifdef CONFIG_SMP - /* avoid complexity of lazy save/restore of fpu - * by just saving it every time we switch out if - * this task used the fpu during the last quantum. - * - * If it tries to use the fpu again, it'll trap and - * reload its fp regs. So we don't have to do a restore - * every switch, just a save. - * -- Cort - */ - if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) - giveup_fpu(prev); +static inline void restore_sprs(struct thread_struct *old_thread, + struct thread_struct *new_thread) +{ #ifdef CONFIG_ALTIVEC - /* - * If the previous thread used altivec in the last quantum - * (thus changing altivec regs) then save them. - * We used to check the VRSAVE register but not all apps - * set it, so we don't rely on it now (and in fact we need - * to save & restore VSCR even if VRSAVE == 0). -- paulus - * - * On SMP we always save/restore altivec regs just to avoid the - * complexity of changing processors. - * -- Cort - */ - if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) - giveup_altivec(prev); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_VSX - if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) - /* VMX and FPU registers are already save here */ - __giveup_vsx(prev); -#endif /* CONFIG_VSX */ -#ifdef CONFIG_SPE - /* - * If the previous thread used spe in the last quantum - * (thus changing spe regs) then save them. - * - * On SMP we always save/restore spe regs just to avoid the - * complexity of changing processors. - */ - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) - giveup_spe(prev); -#endif /* CONFIG_SPE */ + if (cpu_has_feature(CPU_FTR_ALTIVEC) && + old_thread->vrsave != new_thread->vrsave) + mtspr(SPRN_VRSAVE, new_thread->vrsave); +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + if (cpu_has_feature(CPU_FTR_DSCR)) { + u64 dscr = get_paca()->dscr_default; + u64 fscr = old_thread->fscr & ~FSCR_DSCR; -#else /* CONFIG_SMP */ -#ifdef CONFIG_ALTIVEC - /* Avoid the trap. On smp this this never happens since - * we don't set last_task_used_altivec -- Cort - */ - if (new->thread.regs && last_task_used_altivec == new) - new->thread.regs->msr |= MSR_VEC; -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_VSX - if (new->thread.regs && last_task_used_vsx == new) - new->thread.regs->msr |= MSR_VSX; -#endif /* CONFIG_VSX */ -#ifdef CONFIG_SPE - /* Avoid the trap. On smp this this never happens since - * we don't set last_task_used_spe - */ - if (new->thread.regs && last_task_used_spe == new) - new->thread.regs->msr |= MSR_SPE; -#endif /* CONFIG_SPE */ + if (new_thread->dscr_inherit) { + dscr = new_thread->dscr; + fscr |= FSCR_DSCR; + } -#endif /* CONFIG_SMP */ + if (old_thread->dscr != dscr) + mtspr(SPRN_DSCR, dscr); -#ifdef CONFIG_PPC_ADV_DEBUG_REGS - switch_booke_debug_regs(&new->thread.debug); -#else -/* - * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would - * schedule DABR - */ -#ifndef CONFIG_HAVE_HW_BREAKPOINT - if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) - __set_breakpoint(&new->thread.hw_brk); -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + if (old_thread->fscr != fscr) + mtspr(SPRN_FSCR, fscr); + } + + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + if (old_thread->bescr != new_thread->bescr) + mtspr(SPRN_BESCR, new_thread->bescr); + if (old_thread->ebbhr != new_thread->ebbhr) + mtspr(SPRN_EBBHR, new_thread->ebbhr); + if (old_thread->ebbrr != new_thread->ebbrr) + mtspr(SPRN_EBBRR, new_thread->ebbrr); + + if (old_thread->tar != new_thread->tar) + mtspr(SPRN_TAR, new_thread->tar); + } #endif +} +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *new) +{ + struct thread_struct *new_thread, *old_thread; + struct task_struct *last; +#ifdef CONFIG_PPC_BOOK3S_64 + struct ppc64_tlb_batch *batch; +#endif new_thread = &new->thread; old_thread = ¤t->thread; + WARN_ON(!irqs_disabled()); + #ifdef CONFIG_PPC64 /* * Collect processor utilization data per process @@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + switch_booke_debug_regs(&new->thread.debug); +#else +/* + * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would + * schedule DABR + */ +#ifndef CONFIG_HAVE_HW_BREAKPOINT + if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) + __set_breakpoint(&new->thread.hw_brk); +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif + + /* + * We need to save SPRs before treclaim/trecheckpoint as these will + * change a number of them. + */ + save_sprs(&prev->thread); + + __switch_to_tm(prev); + + /* Save FPU, Altivec, VSX and SPE state */ + giveup_all(prev); + /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out @@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev, tm_recheckpoint_new_task(new); + /* + * Call restore_sprs() before calling _switch(). If we move it after + * _switch() then we miss out on calling it for new tasks. The reason + * for this is we manually create a stack frame for new tasks that + * directly returns through ret_from_fork() or + * ret_from_kernel_thread(). See copy_thread() for details. + */ + restore_sprs(old_thread, new_thread); + last = _switch(old_thread, new_thread); #ifdef CONFIG_PPC_BOOK3S_64 @@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs) printk("\n"); } -static struct regbit { +struct regbit { unsigned long bit; const char *name; -} msr_bits[] = { +}; + +static struct regbit msr_bits[] = { #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) {MSR_SF, "SF"}, {MSR_HV, "HV"}, @@ -985,16 +1086,49 @@ static struct regbit { {0, NULL} }; -static void printbits(unsigned long val, struct regbit *bits) +static void print_bits(unsigned long val, struct regbit *bits, const char *sep) { - const char *sep = ""; + const char *s = ""; - printk("<"); for (; bits->bit; ++bits) if (val & bits->bit) { - printk("%s%s", sep, bits->name); - sep = ","; + printk("%s%s", s, bits->name); + s = sep; } +} + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static struct regbit msr_tm_bits[] = { + {MSR_TS_T, "T"}, + {MSR_TS_S, "S"}, + {MSR_TM, "E"}, + {0, NULL} +}; + +static void print_tm_bits(unsigned long val) +{ +/* + * This only prints something if at least one of the TM bit is set. + * Inside the TM[], the output means: + * E: Enabled (bit 32) + * S: Suspended (bit 33) + * T: Transactional (bit 34) + */ + if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { + printk(",TM["); + print_bits(val, msr_tm_bits, ""); + printk("]"); + } +} +#else +static void print_tm_bits(unsigned long val) {} +#endif + +static void print_msr_bits(unsigned long val) +{ + printk("<"); + print_bits(val, msr_bits, ","); + print_tm_bits(val); printk(">"); } @@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs) printk("REGS: %p TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); printk("MSR: "REG" ", regs->msr); - printbits(regs->msr, msr_bits); + print_msr_bits(regs->msr); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) @@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs) void exit_thread(void) { - discard_lazy_cpu_state(); } void flush_thread(void) { - discard_lazy_cpu_state(); - #ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); #else /* CONFIG_HAVE_HW_BREAKPOINT */ @@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t) */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - flush_fp_to_thread(src); - flush_altivec_to_thread(src); - flush_vsx_to_thread(src); - flush_spe_to_thread(src); + flush_all_to_thread(src); /* * Flush TM state out so we can copy it. __switch_to_tm() does this * flush but it removes the checkpointed state from the current CPU and @@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { p->thread.dscr_inherit = current->thread.dscr_inherit; - p->thread.dscr = current->thread.dscr; + p->thread.dscr = mfspr(SPRN_DSCR); } if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; @@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->msr = MSR_USER32; } #endif - discard_lazy_cpu_state(); #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 737c0d0b53ac..30a03c03fe73 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -60,6 +60,7 @@ struct pt_regs_offset { #define STR(s) #s /* convert to string */ #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define GPR_OFFSET_NAME(num) \ + {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} #define REG_OFFSET_END {.name = NULL, .offset = 0} diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index ef7c24e84a62..b6aa378aff63 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, * contains valid data */ if (current->thread.used_vsr && ctx_has_vsx_region) { - __giveup_vsx(current); + flush_vsx_to_thread(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; @@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs, * contains valid data */ if (current->thread.used_vsr) { - __giveup_vsx(current); + flush_vsx_to_thread(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; if (msr & MSR_VSX) { @@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs, if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); - /* - * Do this before updating the thread state in - * current->thread.fpr/vr/evr. That way, if we get preempted - * and another task grabs the FPU/Altivec/SPE, it won't be - * tempted to save the current CPU state into the thread_struct - * and corrupt what we are writing there. - */ - discard_lazy_cpu_state(); - #ifdef CONFIG_ALTIVEC /* * Force the process to reload the altivec registers from @@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, /* Restore the previous little-endian mode */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); - /* - * Do this before updating the thread state in - * current->thread.fpr/vr/evr. That way, if we get preempted - * and another task grabs the FPU/Altivec/SPE, it won't be - * tempted to save the current CPU state into the thread_struct - * and corrupt what we are writing there. - */ - discard_lazy_cpu_state(); - #ifdef CONFIG_ALTIVEC regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c676ecec0869..25520794aa37 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, * VMX data. */ if (current->thread.used_vsr && ctx_has_vsx_region) { - __giveup_vsx(current); + flush_vsx_to_thread(current); v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); /* set MSR_VSX in the MSR value in the frame to @@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, * VMX data. */ if (current->thread.used_vsr) { - __giveup_vsx(current); + flush_vsx_to_thread(current); v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; @@ -350,15 +350,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, err |= __get_user(set->sig[0], &sc->oldmask); /* - * Do this before updating the thread state in - * current->thread.fpr/vr. That way, if we get preempted - * and another task grabs the FPU/Altivec, it won't be - * tempted to save the current CPU state into the thread_struct - * and corrupt what we are writing there. - */ - discard_lazy_cpu_state(); - - /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. @@ -469,15 +460,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); /* - * Do this before updating the thread state in - * current->thread.fpr/vr. That way, if we get preempted - * and another task grabs the FPU/Altivec, it won't be - * tempted to save the current CPU state into the thread_struct - * and corrupt what we are writing there. - */ - discard_lazy_cpu_state(); - - /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index eae33e10b65f..6669b1752512 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -20,9 +20,7 @@ void save_processor_state(void) * flush out all the special registers so we don't need * to save them in the snapshot */ - flush_fp_to_thread(current); - flush_altivec_to_thread(current); - flush_spe_to_thread(current); + flush_all_to_thread(current); #ifdef CONFIG_PPC64 hard_irq_disable(); diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c index 2384129f5893..55323a620cfe 100644 --- a/arch/powerpc/kernel/systbl_chk.c +++ b/arch/powerpc/kernel/systbl_chk.c @@ -57,4 +57,4 @@ START_TABLE #include <asm/systbl.h> -END_TABLE __NR_syscalls +END_TABLE NR_syscalls diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh index 19415e7674a5..31b6e7c358ca 100644 --- a/arch/powerpc/kernel/systbl_chk.sh +++ b/arch/powerpc/kernel/systbl_chk.sh @@ -16,7 +16,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file /^START_TABLE/ { num = 0; next; } /^END_TABLE/ { if (num != $2) { - printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n", + printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n", $2, num - 1; exit(1); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 37de90f8a845..b6becc795bb5 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs) die("nonrecoverable exception", regs, SIGKILL); } -void trace_syscall(struct pt_regs *regs) -{ - printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", - current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], - regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); -} - void kernel_fp_unavailable_exception(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b457bfa28436..def1b8b5e6c1 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void) extern unsigned long sys_ni_syscall; - for (i = 0; i < __NR_syscalls; i++) { + for (i = 0; i < NR_syscalls; i++) { #ifdef CONFIG_PPC64 if (sys_call_table[i*2] != sys_ni_syscall) vdso_data->syscall_map_64[i >> 5] |= diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 59cf5f452879..3745113fcc65 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) addi r3,r3,CFG_SYSCALL_MAP32 cmpli cr0,r4,0 beqlr - li r0,__NR_syscalls + li r0,NR_syscalls stw r0,0(r4) crclr cr0*4+so blr diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index 2f01c4a0d8a0..184a6ba7f283 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) cmpli cr0,r4,0 crclr cr0*4+so beqlr - li r0,__NR_syscalls + li r0,NR_syscalls stw r0,0(r4) blr .cfi_endproc diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index f5c80d567d8d..162d0f714941 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -29,24 +29,10 @@ _GLOBAL(do_load_up_transact_altivec) addi r10,r3,THREAD_TRANSACT_VRSTATE REST_32VRS(0,r4,r10) - /* Disable VEC again. */ - MTMSRD(r6) - isync - blr #endif /* - * Enable use of VMX/Altivec for the caller. - */ -_GLOBAL(vec_enable) - mfmsr r3 - oris r3,r3,MSR_VEC@h - MTMSRD(r3) - isync - blr - -/* * Load state from memory into VMX registers including VSCR. * Assumes the caller has enabled VMX in the MSR. */ @@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec) MTMSRD(r5) /* enable use of AltiVec now */ isync -/* - * For SMP, we don't do lazy VMX switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_altvec in switch_to. - * VRSAVE isn't dealt with here, that is done in the normal context - * switch code. Note that we could rely on vrsave value to eventually - * avoid saving all of the VREGs here... - */ -#ifndef CONFIG_SMP - LOAD_REG_ADDRBASE(r3, last_task_used_altivec) - toreal(r3) - PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) - PPC_LCMPI 0,r4,0 - beq 1f - - /* Save VMX state to last_task_used_altivec's THREAD struct */ - toreal(r4) - addi r4,r4,THREAD - addi r6,r4,THREAD_VRSTATE - SAVE_32VRS(0,r5,r6) - mfvscr v0 - li r10,VRSTATE_VSCR - stvx v0,r10,r6 - /* Disable VMX for last_task_used_altivec */ - PPC_LL r5,PT_REGS(r4) - toreal(r5) - PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r10,MSR_VEC@h - andc r4,r4,r10 - PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* Hack: if we get an altivec unavailable trap with VRSAVE * set to all zeros, we assume this is a broken application * that fails to set it properly, and thus we switch it to @@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec) lvx v0,r10,r6 mtvscr v0 REST_32VRS(0,r4,r6) -#ifndef CONFIG_SMP - /* Update last_task_used_altivec to 'current' */ - subi r4,r5,THREAD /* Back to 'current' */ - fromreal(r4) - PPC_STL r4,ADDROFF(last_task_used_altivec)(r3) -#endif /* CONFIG_SMP */ /* restore registers and return */ blr -_GLOBAL(giveup_altivec_notask) - mfmsr r3 - andis. r4,r3,MSR_VEC@h - bnelr /* Already enabled? */ - oris r3,r3,MSR_VEC@h - SYNC - MTMSRD(r3) /* enable use of VMX now */ - isync - blr - /* - * giveup_altivec(tsk) + * __giveup_altivec(tsk) * Disable VMX for the task given as the argument, * and save the vector registers in its thread_struct. - * Enables the VMX for use in the kernel on return. */ -_GLOBAL(giveup_altivec) - mfmsr r5 - oris r5,r5,MSR_VEC@h - SYNC - MTMSRD(r5) /* enable use of VMX now */ - isync - PPC_LCMPI 0,r3,0 - beqlr /* if no previous owner, done */ +_GLOBAL(__giveup_altivec) addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) @@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) andc r4,r4,r3 /* disable FP for previous task */ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: -#ifndef CONFIG_SMP - li r5,0 - LOAD_REG_ADDRBASE(r4,last_task_used_altivec) - PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) -#endif /* CONFIG_SMP */ blr #ifdef CONFIG_VSX @@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx) andis. r5,r12,MSR_VEC@h beql+ load_up_altivec /* skip if already loaded */ -#ifndef CONFIG_SMP - ld r3,last_task_used_vsx@got(r2) - ld r4,0(r3) - cmpdi 0,r4,0 - beq 1f - /* Disable VSX for last_task_used_vsx */ - addi r4,r4,THREAD - ld r5,PT_REGS(r4) - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r6,MSR_VSX@h - andc r6,r4,r6 - std r6,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ ld r4,PACACURRENT(r13) addi r4,r4,THREAD /* Get THREAD */ li r6,1 @@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx) /* enable use of VSX after return */ oris r12,r12,MSR_VSX@h std r12,_MSR(r1) -#ifndef CONFIG_SMP - /* Update last_task_used_vsx to 'current' */ - ld r4,PACACURRENT(r13) - std r4,0(r3) -#endif /* CONFIG_SMP */ b fast_exception_return /* * __giveup_vsx(tsk) * Disable VSX for the task given as the argument. * Does NOT save vsx registers. - * Enables the VSX for use in the kernel on return. */ _GLOBAL(__giveup_vsx) - mfmsr r5 - oris r5,r5,MSR_VSX@h - mtmsrd r5 /* enable use of VSX now */ - isync - - cmpdi 0,r3,0 - beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ ld r5,PT_REGS(r3) cmpdi 0,r5,0 @@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx) andc r4,r4,r3 /* disable VSX for previous task */ std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: -#ifndef CONFIG_SMP - li r5,0 - ld r4,last_task_used_vsx@got(r2) - std r5,0(r4) -#endif /* CONFIG_SMP */ blr #endif /* CONFIG_VSX */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 54b45b73195f..8e694707bc56 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2700,9 +2700,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) goto out; } - flush_fp_to_thread(current); - flush_altivec_to_thread(current); - flush_vsx_to_thread(current); + flush_all_to_thread(current); + vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a759d9adb0b6..eab96cfe82fa 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) if (rcomp) kvmppc_set_cr(vcpu, cr); + disable_kernel_fp(); preempt_enable(); return emulated; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 64891b081ad5..a78e0e6bd932 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); t->fp_save_area = &vcpu->arch.fp; preempt_enable(); } @@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); t->vr_save_area = &vcpu->arch.vr; preempt_enable(); #endif @@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); preempt_enable(); } #ifdef CONFIG_ALTIVEC @@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); preempt_enable(); } #endif @@ -1486,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) goto out; /* interrupts now hard-disabled */ - /* Save FPU state in thread_struct */ - if (current->thread.regs->msr & MSR_FP) - giveup_fpu(current); - -#ifdef CONFIG_ALTIVEC - /* Save Altivec state in thread_struct */ - if (current->thread.regs->msr & MSR_VEC) - giveup_altivec(current); -#endif - -#ifdef CONFIG_VSX - /* Save VSX state in thread_struct */ - if (current->thread.regs->msr & MSR_VSX) - __giveup_vsx(current); -#endif + /* Save FPU, Altivec and VSX state */ + giveup_all(current); /* Preload FPU if it's enabled */ if (kvmppc_get_msr(vcpu) & MSR_FP) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index fd5875179e5c..778ef86e187e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_spe(); kvmppc_save_guest_spe(vcpu); + disable_kernel_spe(); vcpu->arch.shadow_msr &= ~MSR_SPE; preempt_enable(); } @@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_spe(); kvmppc_load_guest_spe(vcpu); + disable_kernel_spe(); vcpu->arch.shadow_msr |= MSR_SPE; preempt_enable(); } @@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) if (!(current->thread.regs->msr & MSR_FP)) { enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); current->thread.fp_save_area = &vcpu->arch.fp; current->thread.regs->msr |= MSR_FP; } @@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) if (!(current->thread.regs->msr & MSR_VEC)) { enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); current->thread.vr_save_area = &vcpu->arch.vr; current->thread.regs->msr |= MSR_VEC; } diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index ac93a3bd2730..b27e030fc9f8 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -46,6 +46,7 @@ int enter_vmx_usercopy(void) */ int exit_vmx_usercopy(void) { + disable_kernel_altivec(); pagefault_enable(); preempt_enable(); return 0; @@ -70,6 +71,7 @@ int enter_vmx_copy(void) */ void *exit_vmx_copy(void *dest) { + disable_kernel_altivec(); preempt_enable(); return dest; } diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c index e905f7c2ea7b..07f49f1568e5 100644 --- a/arch/powerpc/lib/xor_vmx.c +++ b/arch/powerpc/lib/xor_vmx.c @@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, v2 += 4; } while (--lines > 0); + disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_2); @@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, v3 += 4; } while (--lines > 0); + disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_3); @@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, v4 += 4; } while (--lines > 0); + disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_4); @@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, v5 += 4; } while (--lines > 0); + disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_5); diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 5810967511d4..31a5d42df8c9 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -110,10 +110,10 @@ unsigned long __init mmu_mapin_ram(unsigned long top) unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); - pmd_val(*pmdp++) = val; - pmd_val(*pmdp++) = val; - pmd_val(*pmdp++) = val; - pmd_val(*pmdp++) = val; + *pmdp++ = __pmd(val); + *pmdp++ = __pmd(val); + *pmdp++ = __pmd(val); + *pmdp++ = __pmd(val); v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; @@ -125,7 +125,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); - pmd_val(*pmdp) = val; + *pmdp = __pmd(val); v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 3eb73a38220d..1ffeda85c086 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -14,10 +14,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y) -obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o -obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ - tlb_hash$(CONFIG_WORD_SIZE).o \ +obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o +obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o \ mmu_context_hash$(CONFIG_WORD_SIZE).o +ifeq ($(CONFIG_PPC_STD_MMU_64),y) +obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o +obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o +endif obj-$(CONFIG_PPC_ICSWX) += icswx.o obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o obj-$(CONFIG_40x) += 40x_mmu.o diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c new file mode 100644 index 000000000000..e7c04542ba62 --- /dev/null +++ b/arch/powerpc/mm/hash64_4k.c @@ -0,0 +1,123 @@ +/* + * Copyright IBM Corporation, 2015 + * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include <linux/mm.h> +#include <asm/machdep.h> +#include <asm/mmu.h> + +int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, + pte_t *ptep, unsigned long trap, unsigned long flags, + int ssize, int subpg_prot) +{ + unsigned long hpte_group; + unsigned long rflags, pa; + unsigned long old_pte, new_pte; + unsigned long vpn, hash, slot; + unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; + + /* + * atomically mark the linux large page PTE busy and dirty + */ + do { + pte_t pte = READ_ONCE(*ptep); + + old_pte = pte_val(pte); + /* If PTE busy, retry the access */ + if (unlikely(old_pte & _PAGE_BUSY)) + return 0; + /* If PTE permissions don't match, take page fault */ + if (unlikely(access & ~old_pte)) + return 1; + /* + * Try to lock the PTE, add ACCESSED and DIRTY if it was + * a write access. Since this is 4K insert of 64K page size + * also add _PAGE_COMBO + */ + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE; + if (access & _PAGE_RW) + new_pte |= _PAGE_DIRTY; + } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, + old_pte, new_pte)); + /* + * PP bits. _PAGE_USER is already PP bit 0x2, so we only + * need to add in 0x1 if it's a read-only user page + */ + rflags = htab_convert_pte_flags(new_pte); + + if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && + !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); + + vpn = hpt_vpn(ea, vsid, ssize); + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* + * There MIGHT be an HPTE for this pte + */ + hash = hpt_hash(vpn, shift, ssize); + if (old_pte & _PAGE_F_SECOND) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT; + + if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K, + MMU_PAGE_4K, ssize, flags) == -1) + old_pte &= ~_PAGE_HPTEFLAGS; + } + + if (likely(!(old_pte & _PAGE_HASHPTE))) { + + pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; + hash = hpt_hash(vpn, shift, ssize); + +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + + /* Insert into the hash table, primary slot */ + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + /* + * Primary is full, try the secondary + */ + if (unlikely(slot == -1)) { + hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, + rflags, HPTE_V_SECONDARY, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + if (slot == -1) { + if (mftb() & 0x1) + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + ppc_md.hpte_remove(hpte_group); + /* + * FIXME!! Should be try the group from which we removed ? + */ + goto repeat; + } + } + /* + * Hypervisor failure. Restore old pmd and return -1 + * similar to __hash_page_* + */ + if (unlikely(slot == -2)) { + *ptep = __pte(old_pte); + hash_failure_debug(ea, access, vsid, trap, ssize, + MMU_PAGE_4K, MMU_PAGE_4K, old_pte); + return -1; + } + new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX); + } + *ptep = __pte(new_pte & ~_PAGE_BUSY); + return 0; +} diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c new file mode 100644 index 000000000000..0762c1e08c88 --- /dev/null +++ b/arch/powerpc/mm/hash64_64k.c @@ -0,0 +1,322 @@ +/* + * Copyright IBM Corporation, 2015 + * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include <linux/mm.h> +#include <asm/machdep.h> +#include <asm/mmu.h> +/* + * index from 0 - 15 + */ +bool __rpte_sub_valid(real_pte_t rpte, unsigned long index) +{ + unsigned long g_idx; + unsigned long ptev = pte_val(rpte.pte); + + g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT; + index = index >> 2; + if (g_idx & (0x1 << index)) + return true; + else + return false; +} +/* + * index from 0 - 15 + */ +static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index) +{ + unsigned long g_idx; + + if (!(ptev & _PAGE_COMBO)) + return ptev; + index = index >> 2; + g_idx = 0x1 << index; + + return ptev | (g_idx << _PAGE_F_GIX_SHIFT); +} + +int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, + pte_t *ptep, unsigned long trap, unsigned long flags, + int ssize, int subpg_prot) +{ + real_pte_t rpte; + unsigned long *hidxp; + unsigned long hpte_group; + unsigned int subpg_index; + unsigned long rflags, pa, hidx; + unsigned long old_pte, new_pte, subpg_pte; + unsigned long vpn, hash, slot; + unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; + + /* + * atomically mark the linux large page PTE busy and dirty + */ + do { + pte_t pte = READ_ONCE(*ptep); + + old_pte = pte_val(pte); + /* If PTE busy, retry the access */ + if (unlikely(old_pte & _PAGE_BUSY)) + return 0; + /* If PTE permissions don't match, take page fault */ + if (unlikely(access & ~old_pte)) + return 1; + /* + * Try to lock the PTE, add ACCESSED and DIRTY if it was + * a write access. Since this is 4K insert of 64K page size + * also add _PAGE_COMBO + */ + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO; + if (access & _PAGE_RW) + new_pte |= _PAGE_DIRTY; + } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, + old_pte, new_pte)); + /* + * Handle the subpage protection bits + */ + subpg_pte = new_pte & ~subpg_prot; + rflags = htab_convert_pte_flags(subpg_pte); + + if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && + !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { + + /* + * No CPU has hugepages but lacks no execute, so we + * don't need to worry about that case + */ + rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); + } + + subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; + vpn = hpt_vpn(ea, vsid, ssize); + rpte = __real_pte(__pte(old_pte), ptep); + /* + *None of the sub 4k page is hashed + */ + if (!(old_pte & _PAGE_HASHPTE)) + goto htab_insert_hpte; + /* + * Check if the pte was already inserted into the hash table + * as a 64k HW page, and invalidate the 64k HPTE if so. + */ + if (!(old_pte & _PAGE_COMBO)) { + flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); + old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; + goto htab_insert_hpte; + } + /* + * Check for sub page valid and update + */ + if (__rpte_sub_valid(rpte, subpg_index)) { + int ret; + + hash = hpt_hash(vpn, shift, ssize); + hidx = __rpte_to_hidx(rpte, subpg_index); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + + ret = ppc_md.hpte_updatepp(slot, rflags, vpn, + MMU_PAGE_4K, MMU_PAGE_4K, + ssize, flags); + /* + *if we failed because typically the HPTE wasn't really here + * we try an insertion. + */ + if (ret == -1) + goto htab_insert_hpte; + + *ptep = __pte(new_pte & ~_PAGE_BUSY); + return 0; + } + +htab_insert_hpte: + /* + * handle _PAGE_4K_PFN case + */ + if (old_pte & _PAGE_4K_PFN) { + /* + * All the sub 4k page have the same + * physical address. + */ + pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT; + } else { + pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; + pa += (subpg_index << shift); + } + hash = hpt_hash(vpn, shift, ssize); +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + + /* Insert into the hash table, primary slot */ + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + /* + * Primary is full, try the secondary + */ + if (unlikely(slot == -1)) { + hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, + rflags, HPTE_V_SECONDARY, + MMU_PAGE_4K, MMU_PAGE_4K, ssize); + if (slot == -1) { + if (mftb() & 0x1) + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + ppc_md.hpte_remove(hpte_group); + /* + * FIXME!! Should be try the group from which we removed ? + */ + goto repeat; + } + } + /* + * Hypervisor failure. Restore old pmd and return -1 + * similar to __hash_page_* + */ + if (unlikely(slot == -2)) { + *ptep = __pte(old_pte); + hash_failure_debug(ea, access, vsid, trap, ssize, + MMU_PAGE_4K, MMU_PAGE_4K, old_pte); + return -1; + } + /* + * Insert slot number & secondary bit in PTE second half, + * clear _PAGE_BUSY and set appropriate HPTE slot bit + * Since we have _PAGE_BUSY set on ptep, we can be sure + * nobody is undating hidx. + */ + hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); + rpte.hidx &= ~(0xfUL << (subpg_index << 2)); + *hidxp = rpte.hidx | (slot << (subpg_index << 2)); + new_pte = mark_subptegroup_valid(new_pte, subpg_index); + new_pte |= _PAGE_HASHPTE; + /* + * check __real_pte for details on matching smp_rmb() + */ + smp_wmb(); + *ptep = __pte(new_pte & ~_PAGE_BUSY); + return 0; +} + +int __hash_page_64K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned long flags, int ssize) +{ + + unsigned long hpte_group; + unsigned long rflags, pa; + unsigned long old_pte, new_pte; + unsigned long vpn, hash, slot; + unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift; + + /* + * atomically mark the linux large page PTE busy and dirty + */ + do { + pte_t pte = READ_ONCE(*ptep); + + old_pte = pte_val(pte); + /* If PTE busy, retry the access */ + if (unlikely(old_pte & _PAGE_BUSY)) + return 0; + /* If PTE permissions don't match, take page fault */ + if (unlikely(access & ~old_pte)) + return 1; + /* + * Check if PTE has the cache-inhibit bit set + * If so, bail out and refault as a 4k page + */ + if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) && + unlikely(old_pte & _PAGE_NO_CACHE)) + return 0; + /* + * Try to lock the PTE, add ACCESSED and DIRTY if it was + * a write access. Since this is 4K insert of 64K page size + * also add _PAGE_COMBO + */ + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; + if (access & _PAGE_RW) + new_pte |= _PAGE_DIRTY; + } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, + old_pte, new_pte)); + + rflags = htab_convert_pte_flags(new_pte); + + if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && + !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); + + vpn = hpt_vpn(ea, vsid, ssize); + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* + * There MIGHT be an HPTE for this pte + */ + hash = hpt_hash(vpn, shift, ssize); + if (old_pte & _PAGE_F_SECOND) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT; + + if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K, + MMU_PAGE_64K, ssize, flags) == -1) + old_pte &= ~_PAGE_HPTEFLAGS; + } + + if (likely(!(old_pte & _PAGE_HASHPTE))) { + + pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; + hash = hpt_hash(vpn, shift, ssize); + +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + + /* Insert into the hash table, primary slot */ + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, + MMU_PAGE_64K, MMU_PAGE_64K, ssize); + /* + * Primary is full, try the secondary + */ + if (unlikely(slot == -1)) { + hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, + rflags, HPTE_V_SECONDARY, + MMU_PAGE_64K, MMU_PAGE_64K, ssize); + if (slot == -1) { + if (mftb() & 0x1) + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP) & ~0x7UL; + ppc_md.hpte_remove(hpte_group); + /* + * FIXME!! Should be try the group from which we removed ? + */ + goto repeat; + } + } + /* + * Hypervisor failure. Restore old pmd and return -1 + * similar to __hash_page_* + */ + if (unlikely(slot == -2)) { + *ptep = __pte(old_pte); + hash_failure_debug(ea, access, vsid, trap, ssize, + MMU_PAGE_64K, MMU_PAGE_64K, old_pte); + return -1; + } + new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX); + } + *ptep = __pte(new_pte & ~_PAGE_BUSY); + return 0; +} diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S deleted file mode 100644 index 3b49e3295901..000000000000 --- a/arch/powerpc/mm/hash_low_64.S +++ /dev/null @@ -1,1003 +0,0 @@ -/* - * ppc64 MMU hashtable management routines - * - * (c) Copyright IBM Corp. 2003, 2005 - * - * Maintained by: Benjamin Herrenschmidt - * <benh@kernel.crashing.org> - * - * This file is covered by the GNU Public Licence v2 as - * described in the kernel's COPYING file. - */ - -#include <asm/reg.h> -#include <asm/pgtable.h> -#include <asm/mmu.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include <asm/cputable.h> - - .text - -/* - * Stackframe: - * - * +-> Back chain (SP + 256) - * | General register save area (SP + 112) - * | Parameter save area (SP + 48) - * | TOC save area (SP + 40) - * | link editor doubleword (SP + 32) - * | compiler doubleword (SP + 24) - * | LR save area (SP + 16) - * | CR save area (SP + 8) - * SP ---> +-- Back chain (SP + 0) - */ - -#ifndef CONFIG_PPC_64K_PAGES - -/***************************************************************************** - * * - * 4K SW & 4K HW pages implementation * - * * - *****************************************************************************/ - - -/* - * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, unsigned long flags, - * int ssize) - * - * Adds a 4K page to the hash table in a segment of 4K pages only - */ - -_GLOBAL(__hash_page_4K) - mflr r0 - std r0,16(r1) - stdu r1,-STACKFRAMESIZE(r1) - /* Save all params that we need after a function call */ - std r6,STK_PARAM(R6)(r1) - std r8,STK_PARAM(R8)(r1) - std r9,STK_PARAM(R9)(r1) - - /* Save non-volatile registers. - * r31 will hold "old PTE" - * r30 is "new PTE" - * r29 is vpn - * r28 is a hash value - * r27 is hashtab mask (maybe dynamic patched instead ?) - */ - std r27,STK_REG(R27)(r1) - std r28,STK_REG(R28)(r1) - std r29,STK_REG(R29)(r1) - std r30,STK_REG(R30)(r1) - std r31,STK_REG(R31)(r1) - - /* Step 1: - * - * Check permissions, atomically mark the linux PTE busy - * and hashed. - */ -1: - ldarx r31,0,r6 - /* Check access rights (access & ~(pte_val(*ptep))) */ - andc. r0,r4,r31 - bne- htab_wrong_access - /* Check if PTE is busy */ - andi. r0,r31,_PAGE_BUSY - /* If so, just bail out and refault if needed. Someone else - * is changing this PTE anyway and might hash it. - */ - bne- htab_bail_ok - - /* Prepare new PTE value (turn access RW into DIRTY, then - * add BUSY,HASHPTE and ACCESSED) - */ - rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ - or r30,r30,r31 - ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE - /* Write the linux PTE atomically (setting busy) */ - stdcx. r30,0,r6 - bne- 1b - isync - - /* Step 2: - * - * Insert/Update the HPTE in the hash table. At this point, - * r4 (access) is re-useable, we use it for the new HPTE flags - */ - -BEGIN_FTR_SECTION - cmpdi r9,0 /* check segment size */ - bne 3f -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT - VPN_SHIFT - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) - or r29,r28,r29 - /* - * Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) - */ - rldicl r0,r3,64-12,48 - xor r28,r5,r0 /* hash */ - b 4f - -3: /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) - or r29,r28,r29 - - /* - * calculate hash value for primary slot and - * store it in r28 for 1T segment - * r3 = va, r5 = vsid - */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ - rldicl r0,r3,64-12,36 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ - xor r28,r28,r0 /* hash */ - - /* Convert linux PTE bits into HW equivalents */ -4: andi. r3,r30,0x1fe /* Get basic set of flags */ - xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ - rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ - rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ - and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ - andc r0,r30,r0 /* r0 = pte & ~r0 */ - rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ - /* - * Always add "C" bit for perf. Memory coherence is always enabled - */ - ori r3,r3,HPTE_R_C | HPTE_R_M - - /* We eventually do the icache sync here (maybe inline that - * code rather than call a C function...) - */ -BEGIN_FTR_SECTION - mr r4,r30 - mr r5,r7 - bl hash_page_do_lazy_icache -END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) - - /* At this point, r3 contains new PP bits, save them in - * place of "access" in the param area (sic) - */ - std r3,STK_PARAM(R4)(r1) - - /* Get htab_hash_mask */ - ld r4,htab_hash_mask@got(2) - ld r27,0(r4) /* htab_hash_mask -> r27 */ - - /* Check if we may already be in the hashtable, in this case, we - * go to out-of-line code to try to modify the HPTE - */ - andi. r0,r31,_PAGE_HASHPTE - bne htab_modify_pte - -htab_insert_pte: - /* Clear hpte bits in new pte (we also clear BUSY btw) and - * add _PAGE_HASHPTE - */ - lis r0,_PAGE_HPTEFLAGS@h - ori r0,r0,_PAGE_HPTEFLAGS@l - andc r30,r30,r0 - ori r30,r30,_PAGE_HASHPTE - - /* physical address r5 */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT - sldi r5,r5,PAGE_SHIFT - - /* Calculate primary group hash */ - and r0,r28,r27 - rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,0 /* !bolted, !secondary */ - li r8,MMU_PAGE_4K /* page size */ - li r9,MMU_PAGE_4K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl htab_call_hpte_insert1 -htab_call_hpte_insert1: - bl . /* Patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge htab_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- htab_pte_insert_failure - - /* Now try secondary slot */ - - /* physical address r5 */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT - sldi r5,r5,PAGE_SHIFT - - /* Calculate secondary group hash */ - andc r0,r27,r28 - rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,HPTE_V_SECONDARY /* !bolted, secondary */ - li r8,MMU_PAGE_4K /* page size */ - li r9,MMU_PAGE_4K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl htab_call_hpte_insert2 -htab_call_hpte_insert2: - bl . /* Patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge+ htab_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- htab_pte_insert_failure - - /* Both are full, we need to evict something */ - mftb r0 - /* Pick a random group based on TB */ - andi. r0,r0,1 - mr r5,r28 - bne 2f - not r5,r5 -2: and r0,r5,r27 - rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - /* Call ppc_md.hpte_remove */ -.globl htab_call_hpte_remove -htab_call_hpte_remove: - bl . /* Patched by htab_finish_init() */ - - /* Try all again */ - b htab_insert_pte - -htab_bail_ok: - li r3,0 - b htab_bail - -htab_pte_insert_ok: - /* Insert slot number & secondary bit in PTE */ - rldimi r30,r3,12,63-15 - - /* Write out the PTE with a normal write - * (maybe add eieio may be good still ?) - */ -htab_write_out_pte: - ld r6,STK_PARAM(R6)(r1) - std r30,0(r6) - li r3, 0 -htab_bail: - ld r27,STK_REG(R27)(r1) - ld r28,STK_REG(R28)(r1) - ld r29,STK_REG(R29)(r1) - ld r30,STK_REG(R30)(r1) - ld r31,STK_REG(R31)(r1) - addi r1,r1,STACKFRAMESIZE - ld r0,16(r1) - mtlr r0 - blr - -htab_modify_pte: - /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ - mr r4,r3 - rlwinm r3,r31,32-12,29,31 - - /* Secondary group ? if yes, get a inverted hash value */ - mr r5,r28 - andi. r0,r31,_PAGE_SECONDARY - beq 1f - not r5,r5 -1: - /* Calculate proper slot value for ppc_md.hpte_updatepp */ - and r0,r5,r27 - rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - add r3,r0,r3 /* add slot idx */ - - /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* vpn */ - li r6,MMU_PAGE_4K /* base page size */ - li r7,MMU_PAGE_4K /* actual page size */ - ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ -.globl htab_call_hpte_updatepp -htab_call_hpte_updatepp: - bl . /* Patched by htab_finish_init() */ - - /* if we failed because typically the HPTE wasn't really here - * we try an insertion. - */ - cmpdi 0,r3,-1 - beq- htab_insert_pte - - /* Clear the BUSY bit and Write out the PTE */ - li r0,_PAGE_BUSY - andc r30,r30,r0 - b htab_write_out_pte - -htab_wrong_access: - /* Bail out clearing reservation */ - stdcx. r31,0,r6 - li r3,1 - b htab_bail - -htab_pte_insert_failure: - /* Bail out restoring old PTE */ - ld r6,STK_PARAM(R6)(r1) - std r31,0(r6) - li r3,-1 - b htab_bail - - -#else /* CONFIG_PPC_64K_PAGES */ - - -/***************************************************************************** - * * - * 64K SW & 4K or 64K HW in a 4K segment pages implementation * - * * - *****************************************************************************/ - -/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, unsigned local flags, - * int ssize, int subpg_prot) - */ - -/* - * For now, we do NOT implement Admixed pages - */ -_GLOBAL(__hash_page_4K) - mflr r0 - std r0,16(r1) - stdu r1,-STACKFRAMESIZE(r1) - /* Save all params that we need after a function call */ - std r6,STK_PARAM(R6)(r1) - std r8,STK_PARAM(R8)(r1) - std r9,STK_PARAM(R9)(r1) - - /* Save non-volatile registers. - * r31 will hold "old PTE" - * r30 is "new PTE" - * r29 is vpn - * r28 is a hash value - * r27 is hashtab mask (maybe dynamic patched instead ?) - * r26 is the hidx mask - * r25 is the index in combo page - */ - std r25,STK_REG(R25)(r1) - std r26,STK_REG(R26)(r1) - std r27,STK_REG(R27)(r1) - std r28,STK_REG(R28)(r1) - std r29,STK_REG(R29)(r1) - std r30,STK_REG(R30)(r1) - std r31,STK_REG(R31)(r1) - - /* Step 1: - * - * Check permissions, atomically mark the linux PTE busy - * and hashed. - */ -1: - ldarx r31,0,r6 - /* Check access rights (access & ~(pte_val(*ptep))) */ - andc. r0,r4,r31 - bne- htab_wrong_access - /* Check if PTE is busy */ - andi. r0,r31,_PAGE_BUSY - /* If so, just bail out and refault if needed. Someone else - * is changing this PTE anyway and might hash it. - */ - bne- htab_bail_ok - /* Prepare new PTE value (turn access RW into DIRTY, then - * add BUSY and ACCESSED) - */ - rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ - or r30,r30,r31 - ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED - oris r30,r30,_PAGE_COMBO@h - /* Write the linux PTE atomically (setting busy) */ - stdcx. r30,0,r6 - bne- 1b - isync - - /* Step 2: - * - * Insert/Update the HPTE in the hash table. At this point, - * r4 (access) is re-useable, we use it for the new HPTE flags - */ - - /* Load the hidx index */ - rldicl r25,r3,64-12,60 - -BEGIN_FTR_SECTION - cmpdi r9,0 /* check segment size */ - bne 3f -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT - VPN_SHIFT - /* - * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff - * srdi r28,r3,VPN_SHIFT - */ - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) - or r29,r28,r29 - /* - * Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) - */ - rldicl r0,r3,64-12,48 - xor r28,r5,r0 /* hash */ - b 4f - -3: /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT - /* - * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff - * srdi r28,r3,VPN_SHIFT - */ - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) - or r29,r28,r29 - - /* - * Calculate hash value for primary slot and - * store it in r28 for 1T segment - * r3 = va, r5 = vsid - */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ - rldicl r0,r3,64-12,36 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ - xor r28,r28,r0 /* hash */ - - /* Convert linux PTE bits into HW equivalents */ -4: -#ifdef CONFIG_PPC_SUBPAGE_PROT - andc r10,r30,r10 - andi. r3,r10,0x1fe /* Get basic set of flags */ - rlwinm r0,r10,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ -#else - andi. r3,r30,0x1fe /* Get basic set of flags */ - rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ -#endif - xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ - rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ - and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ - andc r0,r3,r0 /* r0 = pte & ~r0 */ - rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ - /* - * Always add "C" bit for perf. Memory coherence is always enabled - */ - ori r3,r3,HPTE_R_C | HPTE_R_M - - /* We eventually do the icache sync here (maybe inline that - * code rather than call a C function...) - */ -BEGIN_FTR_SECTION - mr r4,r30 - mr r5,r7 - bl hash_page_do_lazy_icache -END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) - - /* At this point, r3 contains new PP bits, save them in - * place of "access" in the param area (sic) - */ - std r3,STK_PARAM(R4)(r1) - - /* Get htab_hash_mask */ - ld r4,htab_hash_mask@got(2) - ld r27,0(r4) /* htab_hash_mask -> r27 */ - - /* Check if we may already be in the hashtable, in this case, we - * go to out-of-line code to try to modify the HPTE. We look for - * the bit at (1 >> (index + 32)) - */ - rldicl. r0,r31,64-12,48 - li r26,0 /* Default hidx */ - beq htab_insert_pte - - /* - * Check if the pte was already inserted into the hash table - * as a 64k HW page, and invalidate the 64k HPTE if so. - */ - andis. r0,r31,_PAGE_COMBO@h - beq htab_inval_old_hpte - - ld r6,STK_PARAM(R6)(r1) - ori r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */ - ld r26,0(r26) - addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ - rldcr. r0,r31,r5,0 /* must match pgtable.h definition */ - bne htab_modify_pte - -htab_insert_pte: - /* real page number in r5, PTE RPN value + index */ - andis. r0,r31,_PAGE_4K_PFN@h - srdi r5,r31,PTE_RPN_SHIFT - bne- htab_special_pfn - sldi r5,r5,PAGE_FACTOR - add r5,r5,r25 -htab_special_pfn: - sldi r5,r5,HW_PAGE_SHIFT - - /* Calculate primary group hash */ - and r0,r28,r27 - rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,0 /* !bolted, !secondary */ - li r8,MMU_PAGE_4K /* page size */ - li r9,MMU_PAGE_4K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl htab_call_hpte_insert1 -htab_call_hpte_insert1: - bl . /* patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge htab_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- htab_pte_insert_failure - - /* Now try secondary slot */ - - /* real page number in r5, PTE RPN value + index */ - andis. r0,r31,_PAGE_4K_PFN@h - srdi r5,r31,PTE_RPN_SHIFT - bne- 3f - sldi r5,r5,PAGE_FACTOR - add r5,r5,r25 -3: sldi r5,r5,HW_PAGE_SHIFT - - /* Calculate secondary group hash */ - andc r0,r27,r28 - rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,HPTE_V_SECONDARY /* !bolted, secondary */ - li r8,MMU_PAGE_4K /* page size */ - li r9,MMU_PAGE_4K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl htab_call_hpte_insert2 -htab_call_hpte_insert2: - bl . /* patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge+ htab_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- htab_pte_insert_failure - - /* Both are full, we need to evict something */ - mftb r0 - /* Pick a random group based on TB */ - andi. r0,r0,1 - mr r5,r28 - bne 2f - not r5,r5 -2: and r0,r5,r27 - rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - /* Call ppc_md.hpte_remove */ -.globl htab_call_hpte_remove -htab_call_hpte_remove: - bl . /* patched by htab_finish_init() */ - - /* Try all again */ - b htab_insert_pte - - /* - * Call out to C code to invalidate an 64k HW HPTE that is - * useless now that the segment has been switched to 4k pages. - */ -htab_inval_old_hpte: - mr r3,r29 /* vpn */ - mr r4,r31 /* PTE.pte */ - li r5,0 /* PTE.hidx */ - li r6,MMU_PAGE_64K /* psize */ - ld r7,STK_PARAM(R9)(r1) /* ssize */ - ld r8,STK_PARAM(R8)(r1) /* flags */ - bl flush_hash_page - /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ - lis r0,_PAGE_HPTE_SUB@h - ori r0,r0,_PAGE_HPTE_SUB@l - andc r30,r30,r0 - b htab_insert_pte - -htab_bail_ok: - li r3,0 - b htab_bail - -htab_pte_insert_ok: - /* Insert slot number & secondary bit in PTE second half, - * clear _PAGE_BUSY and set approriate HPTE slot bit - */ - ld r6,STK_PARAM(R6)(r1) - li r0,_PAGE_BUSY - andc r30,r30,r0 - /* HPTE SUB bit */ - li r0,1 - subfic r5,r25,27 /* Must match bit position in */ - sld r0,r0,r5 /* pgtable.h */ - or r30,r30,r0 - /* hindx */ - sldi r5,r25,2 - sld r3,r3,r5 - li r4,0xf - sld r4,r4,r5 - andc r26,r26,r4 - or r26,r26,r3 - ori r5,r6,PTE_PAGE_HIDX_OFFSET - std r26,0(r5) - lwsync - std r30,0(r6) - li r3, 0 -htab_bail: - ld r25,STK_REG(R25)(r1) - ld r26,STK_REG(R26)(r1) - ld r27,STK_REG(R27)(r1) - ld r28,STK_REG(R28)(r1) - ld r29,STK_REG(R29)(r1) - ld r30,STK_REG(R30)(r1) - ld r31,STK_REG(R31)(r1) - addi r1,r1,STACKFRAMESIZE - ld r0,16(r1) - mtlr r0 - blr - -htab_modify_pte: - /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ - mr r4,r3 - sldi r5,r25,2 - srd r3,r26,r5 - - /* Secondary group ? if yes, get a inverted hash value */ - mr r5,r28 - andi. r0,r3,0x8 /* page secondary ? */ - beq 1f - not r5,r5 -1: andi. r3,r3,0x7 /* extract idx alone */ - - /* Calculate proper slot value for ppc_md.hpte_updatepp */ - and r0,r5,r27 - rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - add r3,r0,r3 /* add slot idx */ - - /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* vpn */ - li r6,MMU_PAGE_4K /* base page size */ - li r7,MMU_PAGE_4K /* actual page size */ - ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ -.globl htab_call_hpte_updatepp -htab_call_hpte_updatepp: - bl . /* patched by htab_finish_init() */ - - /* if we failed because typically the HPTE wasn't really here - * we try an insertion. - */ - cmpdi 0,r3,-1 - beq- htab_insert_pte - - /* Clear the BUSY bit and Write out the PTE */ - li r0,_PAGE_BUSY - andc r30,r30,r0 - ld r6,STK_PARAM(R6)(r1) - std r30,0(r6) - li r3,0 - b htab_bail - -htab_wrong_access: - /* Bail out clearing reservation */ - stdcx. r31,0,r6 - li r3,1 - b htab_bail - -htab_pte_insert_failure: - /* Bail out restoring old PTE */ - ld r6,STK_PARAM(R6)(r1) - std r31,0(r6) - li r3,-1 - b htab_bail - -#endif /* CONFIG_PPC_64K_PAGES */ - -#ifdef CONFIG_PPC_64K_PAGES - -/***************************************************************************** - * * - * 64K SW & 64K HW in a 64K segment pages implementation * - * * - *****************************************************************************/ - -_GLOBAL(__hash_page_64K) - mflr r0 - std r0,16(r1) - stdu r1,-STACKFRAMESIZE(r1) - /* Save all params that we need after a function call */ - std r6,STK_PARAM(R6)(r1) - std r8,STK_PARAM(R8)(r1) - std r9,STK_PARAM(R9)(r1) - - /* Save non-volatile registers. - * r31 will hold "old PTE" - * r30 is "new PTE" - * r29 is vpn - * r28 is a hash value - * r27 is hashtab mask (maybe dynamic patched instead ?) - */ - std r27,STK_REG(R27)(r1) - std r28,STK_REG(R28)(r1) - std r29,STK_REG(R29)(r1) - std r30,STK_REG(R30)(r1) - std r31,STK_REG(R31)(r1) - - /* Step 1: - * - * Check permissions, atomically mark the linux PTE busy - * and hashed. - */ -1: - ldarx r31,0,r6 - /* Check access rights (access & ~(pte_val(*ptep))) */ - andc. r0,r4,r31 - bne- ht64_wrong_access - /* Check if PTE is busy */ - andi. r0,r31,_PAGE_BUSY - /* If so, just bail out and refault if needed. Someone else - * is changing this PTE anyway and might hash it. - */ - bne- ht64_bail_ok -BEGIN_FTR_SECTION - /* Check if PTE has the cache-inhibit bit set */ - andi. r0,r31,_PAGE_NO_CACHE - /* If so, bail out and refault as a 4k page */ - bne- ht64_bail_ok -END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE) - /* Prepare new PTE value (turn access RW into DIRTY, then - * add BUSY and ACCESSED) - */ - rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ - or r30,r30,r31 - ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED - /* Write the linux PTE atomically (setting busy) */ - stdcx. r30,0,r6 - bne- 1b - isync - - /* Step 2: - * - * Insert/Update the HPTE in the hash table. At this point, - * r4 (access) is re-useable, we use it for the new HPTE flags - */ - -BEGIN_FTR_SECTION - cmpdi r9,0 /* check segment size */ - bne 3f -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT - VPN_SHIFT - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) - or r29,r28,r29 - - /* Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) - */ - rldicl r0,r3,64-16,52 - xor r28,r5,r0 /* hash */ - b 4f - -3: /* Calc vpn and put it in r29 */ - sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT - rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) - or r29,r28,r29 - /* - * calculate hash value for primary slot and - * store it in r28 for 1T segment - * r3 = va, r5 = vsid - */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ - rldicl r0,r3,64-16,40 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ - xor r28,r28,r0 /* hash */ - - /* Convert linux PTE bits into HW equivalents */ -4: andi. r3,r30,0x1fe /* Get basic set of flags */ - xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ - rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ - rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ - and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ - andc r0,r30,r0 /* r0 = pte & ~r0 */ - rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ - /* - * Always add "C" bit for perf. Memory coherence is always enabled - */ - ori r3,r3,HPTE_R_C | HPTE_R_M - - /* We eventually do the icache sync here (maybe inline that - * code rather than call a C function...) - */ -BEGIN_FTR_SECTION - mr r4,r30 - mr r5,r7 - bl hash_page_do_lazy_icache -END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) - - /* At this point, r3 contains new PP bits, save them in - * place of "access" in the param area (sic) - */ - std r3,STK_PARAM(R4)(r1) - - /* Get htab_hash_mask */ - ld r4,htab_hash_mask@got(2) - ld r27,0(r4) /* htab_hash_mask -> r27 */ - - /* Check if we may already be in the hashtable, in this case, we - * go to out-of-line code to try to modify the HPTE - */ - rldicl. r0,r31,64-12,48 - bne ht64_modify_pte - -ht64_insert_pte: - /* Clear hpte bits in new pte (we also clear BUSY btw) and - * add _PAGE_HPTE_SUB0 - */ - lis r0,_PAGE_HPTEFLAGS@h - ori r0,r0,_PAGE_HPTEFLAGS@l - andc r30,r30,r0 -#ifdef CONFIG_PPC_64K_PAGES - oris r30,r30,_PAGE_HPTE_SUB0@h -#else - ori r30,r30,_PAGE_HASHPTE -#endif - /* Phyical address in r5 */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT - sldi r5,r5,PAGE_SHIFT - - /* Calculate primary group hash */ - and r0,r28,r27 - rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,0 /* !bolted, !secondary */ - li r8,MMU_PAGE_64K - li r9,MMU_PAGE_64K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl ht64_call_hpte_insert1 -ht64_call_hpte_insert1: - bl . /* patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge ht64_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- ht64_pte_insert_failure - - /* Now try secondary slot */ - - /* Phyical address in r5 */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT - sldi r5,r5,PAGE_SHIFT - - /* Calculate secondary group hash */ - andc r0,r27,r28 - rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ - - /* Call ppc_md.hpte_insert */ - ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ - mr r4,r29 /* Retrieve vpn */ - li r7,HPTE_V_SECONDARY /* !bolted, secondary */ - li r8,MMU_PAGE_64K - li r9,MMU_PAGE_64K /* actual page size */ - ld r10,STK_PARAM(R9)(r1) /* segment size */ -.globl ht64_call_hpte_insert2 -ht64_call_hpte_insert2: - bl . /* patched by htab_finish_init() */ - cmpdi 0,r3,0 - bge+ ht64_pte_insert_ok /* Insertion successful */ - cmpdi 0,r3,-2 /* Critical failure */ - beq- ht64_pte_insert_failure - - /* Both are full, we need to evict something */ - mftb r0 - /* Pick a random group based on TB */ - andi. r0,r0,1 - mr r5,r28 - bne 2f - not r5,r5 -2: and r0,r5,r27 - rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - /* Call ppc_md.hpte_remove */ -.globl ht64_call_hpte_remove -ht64_call_hpte_remove: - bl . /* patched by htab_finish_init() */ - - /* Try all again */ - b ht64_insert_pte - -ht64_bail_ok: - li r3,0 - b ht64_bail - -ht64_pte_insert_ok: - /* Insert slot number & secondary bit in PTE */ - rldimi r30,r3,12,63-15 - - /* Write out the PTE with a normal write - * (maybe add eieio may be good still ?) - */ -ht64_write_out_pte: - ld r6,STK_PARAM(R6)(r1) - std r30,0(r6) - li r3, 0 -ht64_bail: - ld r27,STK_REG(R27)(r1) - ld r28,STK_REG(R28)(r1) - ld r29,STK_REG(R29)(r1) - ld r30,STK_REG(R30)(r1) - ld r31,STK_REG(R31)(r1) - addi r1,r1,STACKFRAMESIZE - ld r0,16(r1) - mtlr r0 - blr - -ht64_modify_pte: - /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ - mr r4,r3 - rlwinm r3,r31,32-12,29,31 - - /* Secondary group ? if yes, get a inverted hash value */ - mr r5,r28 - andi. r0,r31,_PAGE_F_SECOND - beq 1f - not r5,r5 -1: - /* Calculate proper slot value for ppc_md.hpte_updatepp */ - and r0,r5,r27 - rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ - add r3,r0,r3 /* add slot idx */ - - /* Call ppc_md.hpte_updatepp */ - mr r5,r29 /* vpn */ - li r6,MMU_PAGE_64K /* base page size */ - li r7,MMU_PAGE_64K /* actual page size */ - ld r8,STK_PARAM(R9)(r1) /* segment size */ - ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ -.globl ht64_call_hpte_updatepp -ht64_call_hpte_updatepp: - bl . /* patched by htab_finish_init() */ - - /* if we failed because typically the HPTE wasn't really here - * we try an insertion. - */ - cmpdi 0,r3,-1 - beq- ht64_insert_pte - - /* Clear the BUSY bit and Write out the PTE */ - li r0,_PAGE_BUSY - andc r30,r30,r0 - b ht64_write_out_pte - -ht64_wrong_access: - /* Bail out clearing reservation */ - stdcx. r31,0,r6 - li r3,1 - b ht64_bail - -ht64_pte_insert_failure: - /* Bail out restoring old PTE */ - ld r6,STK_PARAM(R6)(r1) - std r31,0(r6) - li r3,-1 - b ht64_bail - - -#endif /* CONFIG_PPC_64K_PAGES */ - - -/***************************************************************************** - * * - * Huge pages implementation is in hugetlbpage.c * - * * - *****************************************************************************/ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index c8822af10a58..8eaac81347fd 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -429,6 +429,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, local_irq_restore(flags); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE static void native_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, @@ -482,6 +483,15 @@ static void native_hugepage_invalidate(unsigned long vsid, } local_irq_restore(flags); } +#else +static void native_hugepage_invalidate(unsigned long vsid, + unsigned long addr, + unsigned char *hpte_slot_array, + int psize, int ssize, int local) +{ + WARN(1, "%s called without THP support\n", __func__); +} +#endif static inline int __hpte_actual_psize(unsigned int lp, int psize) { diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7f9616f7c479..4233dcccbaf7 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -159,24 +159,41 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; -static unsigned long htab_convert_pte_flags(unsigned long pteflags) +unsigned long htab_convert_pte_flags(unsigned long pteflags) { - unsigned long rflags = pteflags & 0x1fa; + unsigned long rflags = 0; /* _PAGE_EXEC -> NOEXEC */ if ((pteflags & _PAGE_EXEC) == 0) rflags |= HPTE_R_N; - - /* PP bits. PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page + /* + * PP bits: + * Linux use slb key 0 for kernel and 1 for user. + * kernel areas are mapped by PP bits 00 + * and and there is no kernel RO (_PAGE_KERNEL_RO). + * User area mapped by 0x2 and read only use by + * 0x3. */ - if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && - (pteflags & _PAGE_DIRTY))) - rflags |= 1; + if (pteflags & _PAGE_USER) { + rflags |= 0x2; + if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY))) + rflags |= 0x1; + } /* * Always add "C" bit for perf. Memory coherence is always enabled */ - return rflags | HPTE_R_C | HPTE_R_M; + rflags |= HPTE_R_C | HPTE_R_M; + /* + * Add in WIG bits + */ + if (pteflags & _PAGE_WRITETHRU) + rflags |= HPTE_R_W; + if (pteflags & _PAGE_NO_CACHE) + rflags |= HPTE_R_I; + if (pteflags & _PAGE_GUARDED) + rflags |= HPTE_R_G; + + return rflags; } int htab_bolt_mapping(unsigned long vstart, unsigned long vend, @@ -629,46 +646,6 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -extern u32 htab_call_hpte_insert1[]; -extern u32 htab_call_hpte_insert2[]; -extern u32 htab_call_hpte_remove[]; -extern u32 htab_call_hpte_updatepp[]; -extern u32 ht64_call_hpte_insert1[]; -extern u32 ht64_call_hpte_insert2[]; -extern u32 ht64_call_hpte_remove[]; -extern u32 ht64_call_hpte_updatepp[]; - -static void __init htab_finish_init(void) -{ -#ifdef CONFIG_PPC_64K_PAGES - patch_branch(ht64_call_hpte_insert1, - ppc_function_entry(ppc_md.hpte_insert), - BRANCH_SET_LINK); - patch_branch(ht64_call_hpte_insert2, - ppc_function_entry(ppc_md.hpte_insert), - BRANCH_SET_LINK); - patch_branch(ht64_call_hpte_remove, - ppc_function_entry(ppc_md.hpte_remove), - BRANCH_SET_LINK); - patch_branch(ht64_call_hpte_updatepp, - ppc_function_entry(ppc_md.hpte_updatepp), - BRANCH_SET_LINK); -#endif /* CONFIG_PPC_64K_PAGES */ - - patch_branch(htab_call_hpte_insert1, - ppc_function_entry(ppc_md.hpte_insert), - BRANCH_SET_LINK); - patch_branch(htab_call_hpte_insert2, - ppc_function_entry(ppc_md.hpte_insert), - BRANCH_SET_LINK); - patch_branch(htab_call_hpte_remove, - ppc_function_entry(ppc_md.hpte_remove), - BRANCH_SET_LINK); - patch_branch(htab_call_hpte_updatepp, - ppc_function_entry(ppc_md.hpte_updatepp), - BRANCH_SET_LINK); -} - static void __init htab_initialize(void) { unsigned long table; @@ -815,7 +792,6 @@ static void __init htab_initialize(void) mmu_linear_psize, mmu_kernel_ssize)); } - htab_finish_init(); DBG(" <- htab_initialize()\n"); } @@ -1148,9 +1124,10 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, } } +#endif /* CONFIG_PPC_64K_PAGES */ + if (current->mm == mm) check_paca_psize(ea, mm, psize, user_region); -#endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_PPC_64K_PAGES if (psize == MMU_PAGE_64K) @@ -1203,6 +1180,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, } EXPORT_SYMBOL_GPL(hash_page); +int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap, + unsigned long dsisr) +{ + unsigned long access = _PAGE_PRESENT; + unsigned long flags = 0; + struct mm_struct *mm = current->mm; + + if (REGION_ID(ea) == VMALLOC_REGION_ID) + mm = &init_mm; + + if (dsisr & DSISR_NOHPTE) + flags |= HPTE_NOHPTE_UPDATE; + + if (dsisr & DSISR_ISSTORE) + access |= _PAGE_RW; + /* + * We need to set the _PAGE_USER bit if MSR_PR is set or if we are + * accessing a userspace segment (even from the kernel). We assume + * kernel addresses always have the high bit set. + */ + if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID)) + access |= _PAGE_USER; + + if (trap == 0x400) + access |= _PAGE_EXEC; + + return hash_page_mm(mm, ea, access, trap, flags); +} + void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) { diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 4d87122cf6a7..baf1301ded0c 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, new_pmd |= _PAGE_DIRTY; } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, old_pmd, new_pmd)); - /* - * PP bits. _PAGE_USER is already PP bit 0x2, so we only - * need to add in 0x1 if it's a read-only user page - */ - rflags = new_pmd & _PAGE_USER; - if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) && - (new_pmd & _PAGE_DIRTY))) - rflags |= 0x1; - /* - * _PAGE_EXEC -> HW_NO_EXEC since it's inverted - */ - rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N); + rflags = htab_convert_pte_flags(new_pmd); #if 0 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { @@ -82,7 +71,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, */ shift = mmu_psize_defs[psize].shift; index = (ea & ~HPAGE_PMD_MASK) >> shift; - BUG_ON(index >= 4096); + BUG_ON(index >= PTE_FRAG_SIZE); vpn = hpt_vpn(ea, vsid, ssize); hpte_slot_array = get_hpte_slot_array(pmdp); @@ -131,13 +120,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; new_pmd |= _PAGE_HASHPTE; - /* Add in WIMG bits */ - rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | - _PAGE_GUARDED)); - /* - * enable the memory coherence always - */ - rflags |= HPTE_R_M; repeat: hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index d94b1af53a93..e2138c7ae70f 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, new_pte |= _PAGE_DIRTY; } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, old_pte, new_pte)); + rflags = htab_convert_pte_flags(new_pte); - rflags = 0x2 | (!(new_pte & _PAGE_RW)); - /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); sz = ((1UL) << shift); if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) /* No CPU has hugepages but lacks no execute, so we @@ -91,18 +89,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; /* clear HPTE slot informations in new PTE */ -#ifdef CONFIG_PPC_64K_PAGES - new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0; -#else new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; -#endif - /* Add in WIMG bits */ - rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | - _PAGE_COHERENT | _PAGE_GUARDED)); - /* - * enable the memory coherence always - */ - rflags |= HPTE_R_M; slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, mmu_psize, ssize); @@ -127,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, *ptep = __pte(new_pte & ~_PAGE_BUSY); return 0; } + +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM) +/* + * This enables us to catch the wrong page directory format + * Moved here so that we can use WARN() in the call. + */ +int hugepd_ok(hugepd_t hpd) +{ + bool is_hugepd; + + /* + * We should not find this format in page directory, warn otherwise. + */ + is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); + WARN(is_hugepd, "Found wrong page directory format\n"); + return 0; +} +#endif diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9833fee493ec..61b8b7ccea4f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,78 +53,6 @@ static unsigned nr_gpages; #define hugepd_none(hpd) ((hpd).pd == 0) -#ifdef CONFIG_PPC_BOOK3S_64 -/* - * At this point we do the placement change only for BOOK3S 64. This would - * possibly work on other subarchs. - */ - -/* - * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have - * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; - * - * Defined in such a way that we can optimize away code block at build time - * if CONFIG_HUGETLB_PAGE=n. - */ -int pmd_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return ((pmd_val(pmd) & 0x3) != 0x0); -} - -int pud_huge(pud_t pud) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return ((pud_val(pud) & 0x3) != 0x0); -} - -int pgd_huge(pgd_t pgd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return ((pgd_val(pgd) & 0x3) != 0x0); -} - -#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM) -/* - * This enables us to catch the wrong page directory format - * Moved here so that we can use WARN() in the call. - */ -int hugepd_ok(hugepd_t hpd) -{ - bool is_hugepd; - - /* - * We should not find this format in page directory, warn otherwise. - */ - is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); - WARN(is_hugepd, "Found wrong page directory format\n"); - return 0; -} -#endif - -#else -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} - -int pgd_huge(pgd_t pgd) -{ - return 0; -} -#endif - pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { /* Only called for hugetlbfs pages, hence can ignore THP */ @@ -966,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page) * We have 4 cases for pgds and pmds: * (1) invalid (all zeroes) * (2) pointer to next table, as normal; bottom 6 bits == 0 - * (3) leaf pte for huge page, bottom two bits != 00 - * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table + * (3) leaf pte for huge page _PAGE_PTE set + * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table * * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the the page and take a ref on it. diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d747dd7bc90b..379a6a90644b 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -87,11 +87,7 @@ static void pgd_ctor(void *addr) static void pmd_ctor(void *addr) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - memset(addr, 0, PMD_TABLE_SIZE * 2); -#else memset(addr, 0, PMD_TABLE_SIZE); -#endif } struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 83dfcb55ffef..83dfd7925c72 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, */ VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_USER)); + /* + * Add the pte bit when tryint set a pte + */ + pte = __pte(pte_val(pte) | _PAGE_PTE); /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e92cb2146b18..ea6bc31debb0 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -359,7 +359,7 @@ struct page *pud_page(pud_t pud) struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd) || pmd_huge(pmd)) - return pfn_to_page(pmd_pfn(pmd)); + return pte_page(pmd_pte(pmd)); return virt_to_page(pmd_page_vaddr(pmd)); } @@ -625,7 +625,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, "1: ldarx %0,0,%3\n\ andi. %1,%0,%6\n\ bne- 1b \n\ - ori %1,%0,%4 \n\ + oris %1,%0,%4@h \n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) @@ -759,22 +759,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) { - pmd_val(pmd) |= pgprot_val(pgprot); - return pmd; + return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); } pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) { - pmd_t pmd; - /* - * For a valid pte, we would have _PAGE_PRESENT always - * set. We use this to check THP page at pmd level. - * leaf pte for huge page, bottom two bits != 00 - */ - pmd_val(pmd) = pfn << PTE_RPN_SHIFT; - pmd_val(pmd) |= _PAGE_THP_HUGE; - pmd = pmd_set_protbits(pmd, pgprot); - return pmd; + unsigned long pmdv; + + pmdv = pfn << PTE_RPN_SHIFT; + return pmd_set_protbits(__pmd(pmdv), pgprot); } pmd_t mk_pmd(struct page *page, pgprot_t pgprot) @@ -784,10 +777,11 @@ pmd_t mk_pmd(struct page *page, pgprot_t pgprot) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { + unsigned long pmdv; - pmd_val(pmd) &= _HPAGE_CHG_MASK; - pmd = pmd_set_protbits(pmd, newprot); - return pmd; + pmdv = pmd_val(pmd); + pmdv &= _HPAGE_CHG_MASK; + return pmd_set_protbits(__pmd(pmdv), newprot); } /* diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 4ddf769a64e5..9f79004e6d6f 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -326,7 +326,7 @@ static int spu_process_callback(struct spu_context *ctx) spu_ret = -ENOSYS; npc += 4; - if (s.nr_ret < __NR_syscalls) { + if (s.nr_ret < NR_syscalls) { spu_release(ctx); /* do actual system call from here */ spu_ret = spu_sys_callback(&s); diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 6f4f8b060def..981546345033 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -258,13 +258,14 @@ static unsigned int pmac_pic_get_irq(void) #ifdef CONFIG_XMON static struct irqaction xmon_action = { .handler = xmon_irq, - .flags = 0, + .flags = IRQF_NO_THREAD, .name = "NMI - XMON" }; #endif static struct irqaction gatwick_cascade_action = { .handler = gatwick_action, + .flags = IRQF_NO_THREAD, .name = "cascade", }; diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 4ece8e40dd54..e315e704cca7 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -434,7 +434,6 @@ static const struct of_device_id opal_prd_match[] = { static struct platform_driver opal_prd_driver = { .driver = { .name = "opal-prd", - .owner = THIS_MODULE, .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index b7a67e3d2201..477290ad855e 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -315,48 +315,48 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, return 0; } -static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) +static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) { - unsigned long dword0; - unsigned long lpar_rc; - unsigned long dummy_word1; - unsigned long flags; + long lpar_rc; + unsigned long i, j; + struct { + unsigned long pteh; + unsigned long ptel; + } ptes[4]; - /* Read 1 pte at a time */ - /* Do not need RPN to logical page translation */ - /* No cross CEC PFT access */ - flags = 0; + for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { - lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); + lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); + if (lpar_rc != H_SUCCESS) + continue; - BUG_ON(lpar_rc != H_SUCCESS); + for (j = 0; j < 4; j++) { + if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && + (ptes[j].pteh & HPTE_V_VALID)) + return i + j; + } + } - return dword0; + return -1; } static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) { - unsigned long hash; - unsigned long i; long slot; - unsigned long want_v, hpte_v; + unsigned long hash; + unsigned long want_v; + unsigned long hpte_group; hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* Bolted entries are always in the primary group */ - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - for (i = 0; i < HPTES_PER_GROUP; i++) { - hpte_v = pSeries_lpar_hpte_getword0(slot); - - if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) - /* HPTE matches */ - return slot; - ++slot; - } - - return -1; -} + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot = __pSeries_lpar_hpte_find(want_v, hpte_group); + if (slot < 0) + return -1; + return hpte_group + slot; +} static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, @@ -396,6 +396,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, BUG_ON(lpar_rc != H_SUCCESS); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need * to make sure that we avoid bouncing the hypervisor tlbie lock. @@ -494,6 +495,15 @@ static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, index, psize, ssize); } +#else +static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, + unsigned long addr, + unsigned char *hpte_slot_array, + int psize, int ssize, int local) +{ + WARN(1, "%s called without THP support\n", __func__); +} +#endif static void pSeries_lpar_hpte_removebolted(unsigned long ea, int psize, int ssize) diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 7a399b4d60a0..c713b349d967 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -313,6 +313,7 @@ static const struct of_device_id axon_ram_device_id[] = { }, {} }; +MODULE_DEVICE_TABLE(of, axon_ram_device_id); static struct platform_driver axon_ram_driver = { .probe = axon_ram_probe, diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 263af709e536..022c7ab7351a 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -83,10 +83,10 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); @@ -103,9 +103,9 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(src, dst, &ctx->enc_key); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } @@ -120,9 +120,9 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_decrypt(src, dst, &ctx->dec_key); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2ec5315..1881b3f413fa 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -84,10 +84,10 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); @@ -115,7 +115,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -129,6 +128,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ret = blkcipher_walk_done(desc, &walk, nbytes); } + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } @@ -156,7 +156,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, } else { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -170,6 +169,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ret = blkcipher_walk_done(desc, &walk, nbytes); } + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306cd8f59..2d58b18acc10 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -81,9 +81,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + disable_kernel_vsx(); pagefault_enable(); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); @@ -100,9 +100,9 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, unsigned int nbytes = walk->nbytes; pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); + disable_kernel_vsx(); pagefault_enable(); crypto_xor(keystream, src, nbytes); @@ -133,7 +133,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, @@ -142,6 +141,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); + disable_kernel_vsx(); pagefault_enable(); /* We need to update IV mostly for last bytes/round */ diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 2183a2e77641..6c999cb01b80 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -118,10 +118,9 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_init_p8(ctx->htable, (const u64 *) key); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); return crypto_shash_setkey(ctx->fallback, key, keylen); @@ -149,11 +148,10 @@ static int p8_ghash_update(struct shash_desc *desc, GHASH_DIGEST_SIZE - dctx->bytes); preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); src += GHASH_DIGEST_SIZE - dctx->bytes; @@ -164,10 +162,9 @@ static int p8_ghash_update(struct shash_desc *desc, if (len) { preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); src += len; @@ -195,11 +192,10 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) dctx->buffer[i] = 0; preempt_disable(); pagefault_disable(); - enable_kernel_altivec(); enable_kernel_vsx(); - enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); + disable_kernel_vsx(); pagefault_enable(); preempt_enable(); dctx->bytes = 0; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 048901a1111a..caaec654d7ea 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -582,6 +582,7 @@ static struct of_device_id rackmeter_match[] = { { .name = "i2s" }, { } }; +MODULE_DEVICE_TABLE(of, rackmeter_match); static struct macio_driver rackmeter_driver = { .driver = { diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index f9512bfa6c3c..01ee736fe0ef 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -425,8 +425,9 @@ static int __init via_pmu_start(void) gpio_irq = irq_of_parse_and_map(gpio_node, 0); if (gpio_irq != NO_IRQ) { - if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER, - "GPIO1 ADB", (void *)0)) + if (request_irq(gpio_irq, gpio1_interrupt, + IRQF_NO_SUSPEND, "GPIO1 ADB", + (void *)0)) printk(KERN_ERR "pmu: can't get irq %d" " (GPIO1)\n", gpio_irq); else diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 103baf0e0c5b..a6543aefa299 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) afu = cxl_pci_to_afu(dev); - get_device(&afu->dev); ctx = cxl_context_alloc(); if (IS_ERR(ctx)) { rc = PTR_ERR(ctx); @@ -61,7 +60,6 @@ err_mapping: err_ctx: kfree(ctx); err_dev: - put_device(&afu->dev); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(cxl_dev_context_init); @@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx) if (ctx->status >= STARTED) return -EBUSY; - put_device(&ctx->afu->dev); - cxl_context_free(ctx); return 0; diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 2faa1270d085..6dde7a9d6a7e 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ctx->pe = i; ctx->elem = &ctx->afu->spa[i]; ctx->pe_inserted = false; + + /* + * take a ref on the afu so that it stays alive at-least till + * this context is reclaimed inside reclaim_ctx. + */ + cxl_afu_get(afu); return 0; } @@ -278,6 +284,9 @@ static void reclaim_ctx(struct rcu_head *rcu) if (ctx->irq_bitmap) kfree(ctx->irq_bitmap); + /* Drop ref to the afu device taken during cxl_context_init */ + cxl_afu_put(ctx->afu); + kfree(ctx); } diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 0cfb9c129f27..25ae57fa79b0 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -403,6 +403,18 @@ struct cxl_afu { bool enabled; }; +/* AFU refcount management */ +static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) +{ + + return (get_device(&afu->dev) == NULL) ? NULL : afu; +} + +static inline void cxl_afu_put(struct cxl_afu *afu) +{ + put_device(&afu->dev); +} + struct cxl_irq_name { struct list_head list; diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 7ccd2998be92..5cc14599837d 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) spin_unlock(&adapter->afu_list_lock); goto err_put_adapter; } - get_device(&afu->dev); + + /* + * taking a ref to the afu so that it doesn't go away + * for rest of the function. This ref is released before + * we return. + */ + cxl_afu_get(afu); spin_unlock(&adapter->afu_list_lock); if (!afu->current_mode) @@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) file->private_data = ctx; cxl_ctx_get(); - /* Our ref on the AFU will now hold the adapter */ - put_device(&adapter->dev); - - return 0; + /* indicate success */ + rc = 0; err_put_afu: - put_device(&afu->dev); + /* release the ref taken earlier */ + cxl_afu_put(afu); err_put_adapter: put_device(&adapter->dev); return rc; @@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file) mutex_unlock(&ctx->mapping_lock); } - put_device(&ctx->afu->dev); - /* * At this this point all bottom halfs have finished and we should be * getting no more IRQs from the hardware for this context. Once it's diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index c241e15cacb1..cbd4331fb45c 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, mask <<= shift; val <<= shift; - v = (in_le32(ioaddr) & ~mask) || (val & mask); + v = (in_le32(ioaddr) & ~mask) | (val & mask); out_le32(ioaddr, v); return PCIBIOS_SUCCESSFUL; diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index bec27fce7501..682aae8a1fef 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc @@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); + disable_kernel_altivec(); preempt_enable(); } diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c index 8265504de571..08a8b95e3bc1 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c +++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c @@ -60,14 +60,6 @@ int dscr_inherit_exec(void) else set_dscr(dscr); - /* - * XXX: Force a context switch out so that DSCR - * current value is copied into the thread struct - * which is required for the child to inherit the - * changed value. - */ - sleep(1); - pid = fork(); if (pid == -1) { perror("fork() failed"); diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c index 4e414caf7f40..3e5a6d195e9a 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c +++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c @@ -40,14 +40,6 @@ int dscr_inherit(void) else set_dscr(dscr); - /* - * XXX: Force a context switch out so that DSCR - * current value is copied into the thread struct - * which is required for the child to inherit the - * changed value. - */ - sleep(1); - pid = fork(); if (pid == -1) { perror("fork() failed"); |