aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-02-21 19:08:51 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-23 21:04:32 +1100
commitd5f17ee96447736a84bc44ffc4b0dddb1b519222 (patch)
tree6cd30783636f3e54bdec42c13b3e95af096762c6
parentpowerpc/kconfig: make _etext and data areas alignment configurable on Book3s 32 (diff)
downloadlinux-dev-d5f17ee96447736a84bc44ffc4b0dddb1b519222.tar.xz
linux-dev-d5f17ee96447736a84bc44ffc4b0dddb1b519222.zip
powerpc/8xx: don't disable large TLBs with CONFIG_STRICT_KERNEL_RWX
This patch implements handling of STRICT_KERNEL_RWX with large TLBs directly in the TLB miss handlers. To do so, etext and sinittext are aligned on 512kB boundaries and the miss handlers use 512kB pages instead of 8Mb pages for addresses close to the boundaries. It sets RO PP flags for addresses under sinittext. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h3
-rw-r--r--arch/powerpc/kernel/head_8xx.S54
-rw-r--r--arch/powerpc/mm/8xx_mmu.c31
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
6 files changed, 78 insertions, 16 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 81df0dbc8a9a..43fa82e409bf 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -735,6 +735,7 @@ config ETEXT_SHIFT
int "_etext shift" if ETEXT_SHIFT_BOOL
range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
+ default 19 if STRICT_KERNEL_RWX && PPC_8xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), IBATs are used to map kernel text.
@@ -755,6 +756,7 @@ config DATA_SHIFT
default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
+ default 19 if STRICT_KERNEL_RWX && PPC_8xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index b0f764c827c0..0a1a3fc54e54 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -231,9 +231,10 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
}
/* patch sites */
-extern s32 patch__itlbmiss_linmem_top;
+extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
extern s32 patch__fixupdar_linmem_top;
+extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 52c92913e39b..3f2d1afba2d1 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -292,6 +292,17 @@ SystemCall:
*/
EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
+/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
+#ifdef CONFIG_PERF_EVENTS
+ patch_site 0f, patch__dtlbmiss_perf
+0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, 1
+ stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ rfi
+#endif
+
. = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
@@ -405,10 +416,20 @@ InstructionTLBMiss:
#ifndef CONFIG_PIN_TLB_TEXT
ITLBMissLinear:
mtcr r11
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ patch_site 0f, patch__itlbmiss_linmem_top8
+
+ mfspr r10, SPRN_SRR0
+0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
+ rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
+ ori r11, r11, MI_PS512K | MI_SVALID
+ rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
+#else
/* Set 8M byte page and mark it valid */
li r11, MI_PS8MEG | MI_SVALID
- mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
+#endif
+ mtspr SPRN_MI_TWC, r11
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
@@ -494,16 +515,6 @@ DataStoreTLBMiss:
rfi
patch_site 0b, patch__dtlbmiss_exit_1
-#ifdef CONFIG_PERF_EVENTS
- patch_site 0f, patch__dtlbmiss_perf
-0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- addi r10, r10, 1
- stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
- rfi
-#endif
-
DTLBMissIMMR:
mtcr r11
/* Set 512k byte guarded page and mark it valid */
@@ -525,10 +536,29 @@ DTLBMissIMMR:
DTLBMissLinear:
mtcr r11
+ rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ patch_site 0f, patch__dtlbmiss_romem_top8
+
+0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
+ rlwinm r11, r11, 0, 0xff800000
+ neg r10, r11
+ or r11, r11, r10
+ rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
+ ori r11, r11, MI_PS512K | MI_SVALID
+ mfspr r10, SPRN_MD_EPN
+ rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
+#else
/* Set 8M byte page and mark it valid */
li r11, MD_PS8MEG | MD_SVALID
+#endif
mtspr SPRN_MD_TWC, r11
- rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ patch_site 0f, patch__dtlbmiss_romem_top
+
+0: subis r11, r10, 0
+ rlwimi r10, r11, 11, _PAGE_RO
+#endif
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index ce11cbaa25d8..fe1f6443d57f 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -94,11 +94,20 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
}
-static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
+static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
{
modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
+static void mmu_patch_addis(s32 *site, long simm)
+{
+ unsigned int instr = *(unsigned int *)patch_site_addr(site);
+
+ instr &= 0xffff0000;
+ instr |= ((unsigned long)simm) >> 16;
+ patch_instruction_site(site, instr);
+}
+
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long mapped;
@@ -135,6 +144,26 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return mapped;
}
+void mmu_mark_initmem_nx(void)
+{
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
+ mmu_patch_addis(&patch__itlbmiss_linmem_top8,
+ -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
+ if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+ mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ if (CONFIG_DATA_SHIFT < 23)
+ mmu_patch_addis(&patch__dtlbmiss_romem_top8,
+ -__pa(((unsigned long)_sinittext) &
+ ~(LARGE_PAGE_SIZE_8M - 1)));
+ mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
+}
+#endif
+
void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index bc28995a37ea..41a3513cadc9 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -108,7 +108,7 @@ static void __init MMU_setup(void)
__map_without_bats = 1;
__map_without_ltlbs = 1;
}
- if (strict_kernel_rwx_enabled())
+ if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
__map_without_ltlbs = 1;
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 98fc94affc29..74ff61dabcb1 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -166,7 +166,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
-#if defined(CONFIG_PPC_BOOK3S_32)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
#else