diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/fault.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage-hash64.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 31 | ||||
-rw-r--r-- | arch/powerpc/mm/init-common.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-book3s64.c | 18 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-radix.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb-radix.c | 6 |
9 files changed, 51 insertions, 54 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6fd30ac7d14a..62a50d6d1053 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(debugger_fault_handler(regs))) goto bail; - /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) { + /* + * The kernel should never take an execute fault nor should it + * take a page fault to a kernel address. + */ + if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) { rc = SIGSEGV; goto bail; } @@ -391,20 +394,6 @@ good_area: if (is_exec) { /* - * An execution fault + no execute ? - * - * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we - * deliberately create NX mappings, and use the fault to do the - * cache flush. This is usually handled in hash_page_do_lazy_icache() - * but we could end up here if that races with a concurrent PTE - * update. In that case we need to fall through here to the VMA - * check below. - */ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && - (regs->msr & SRR1_ISI_N_OR_G)) - goto bad_area; - - /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. * diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 80334937e14f..67e19a0821be 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int create_section_mapping(unsigned long start, unsigned long end) +int hash__create_section_mapping(unsigned long start, unsigned long end) { int rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(PAGE_KERNEL), mmu_linear_psize, @@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end) return rc; } -int remove_section_mapping(unsigned long start, unsigned long end) +int hash__remove_section_mapping(unsigned long start, unsigned long end) { int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index d5026f3800b6..37b5f91e381b 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, int hugepd_ok(hugepd_t hpd) { bool is_hugepd; + unsigned long hpdval; + + hpdval = hpd_val(hpd); /* * We should not find this format in page directory, warn otherwise. */ - is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); + is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0)); WARN(is_hugepd, "Found wrong page directory format\n"); return 0; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 289df38fb7e0..8c3389cbcd12 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES]; static unsigned nr_gpages; #endif -#define hugepd_none(hpd) ((hpd).pd == 0) +#define hugepd_none(hpd) (hpd_val(hpd) == 0) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { @@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, for (i = 0; i < num_hugepd; i++, hpdp++) { if (unlikely(!hugepd_none(*hpdp))) break; - else + else { #ifdef CONFIG_PPC_BOOK3S_64 - hpdp->pd = __pa(new) | - (shift_to_mmu_psize(pshift) << 2); + *hpdp = __hugepd(__pa(new) | + (shift_to_mmu_psize(pshift) << 2)); #elif defined(CONFIG_PPC_8xx) - hpdp->pd = __pa(new) | - (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : - _PMD_PAGE_512K) | - _PMD_PRESENT; + *hpdp = __hugepd(__pa(new) | + (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : + _PMD_PAGE_512K) | _PMD_PRESENT); #else /* We use the old format for PPC_FSL_BOOK3E */ - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; + *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); #endif + } } /* If we bailed from the for loop early, an error occurred, clean up */ if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) - hpdp->pd = 0; + *hpdp = __hugepd(0); kmem_cache_free(cachep, new); } spin_unlock(&mm->page_table_lock); @@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif return; for (i = 0; i < num_hugepd; i++, hpdp++) - hpdp->pd = 0; + *hpdp = __hugepd(0); if (shift >= pdshift) hugepd_free(tlb, hugepte); @@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift) { + if (pdshift > shift) pgtable_cache_add(pdshift - shift, NULL); - if (!PGT_CACHE(pdshift - shift)) - panic("hugetlbpage_init(): could not create " - "pgtable cache for %d bit pagesize\n", shift); - } #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) else if (!hugepte_cache) { /* @@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void) else if (mmu_psize_defs[MMU_PAGE_2M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; #endif - else - panic("%s: Unable to set default huge page size\n", __func__); - return 0; } diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index a175cd82ae8c..f2108c40e697 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); new = kmem_cache_create(name, table_size, align, 0, ctor); + if (!new) + panic("Could not allocate pgtable cache for order %d", shift); + kfree(name); pgtable_cache[shift - 1] = new; + pr_debug("Allocated pgtable cache for order %d\n", shift); } @@ -88,7 +92,7 @@ void pgtable_cache_init(void) { pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); - if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) + if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX)) pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); /* * In all current configs, when the PUD index exists it's the @@ -97,11 +101,4 @@ void pgtable_cache_init(void) */ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); - - if (!PGT_CACHE(PGD_INDEX_SIZE)) - panic("Couldn't allocate pgd cache"); - if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) - panic("Couldn't allocate pmd pgtable caches"); - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) - panic("Couldn't allocate pud pgtable caches"); } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 93abf8a9813d..8e1588021d1c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix); void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ - if (disable_radix) + /* We don't yet have the machinery to do radix as a guest. */ + if (disable_radix || !(mfmsr() & MSR_HV)) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; if (early_radix_enabled()) diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index ebf9782bacf9..653ff6c74ebe 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -126,3 +126,21 @@ void mmu_cleanup_all(void) else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); } + +#ifdef CONFIG_MEMORY_HOTPLUG +int create_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__create_section_mapping(start, end); +} + +int remove_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__remove_section_mapping(start, end); +} +#endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfa53ccc8baf..34f1a0dbc898 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, if (!pmdp) return -ENOMEM; if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } ptep = pte_alloc_kernel(pmdp, ea); @@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, } pmdp = pmd_offset(pudp, ea); if (map_page_size == PMD_SIZE) { - ptep = (pte_t *)pudp; + ptep = pmdp_ptep(pmdp); goto set_the_pte; } if (!pmd_present(*pmdp)) { diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 61b79119065f..952713d6cf04 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { __tlbiel_pid(pid, set, ric); } - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); - return; + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) @@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("ptesync": : :"memory"); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } static inline void _tlbie_va(unsigned long va, unsigned long pid, |