aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-09-15 01:30:48 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2018-09-19 21:59:41 +1000
commit85376e2a17ec152e76f6a87fcb66332a68926218 (patch)
tree66f42aa341719834f66ad7a563074840b944e261 /arch/powerpc/mm/slb.c
parentpowerpc/64s/hash: move POWER5 < DD2.1 slbie workaround where it is needed (diff)
downloadlinux-dev-85376e2a17ec152e76f6a87fcb66332a68926218.tar.xz
linux-dev-85376e2a17ec152e76f6a87fcb66332a68926218.zip
powerpc/64s/hash: remove the vmalloc segment from the bolted SLB
Remove the vmalloc segment from bolted SLBEs. This is not required to be bolted, and seems like it was added to help pre-load the SLB on context switch. However there are now other segments like the vmemmap segment and non-zero node memory that often take misses after a context switch, so it is better to solve this in a more general way. A subsequent change will track free SLB entries and uses those rather than round-robin overwrite valid entries, which makes it far less likely for kernel SLBEs to be evicted after they are installed. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/slb.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a9c66feb3c43..4a0b059ad104 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -30,8 +30,7 @@
enum slb_index {
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
- VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */
- KSTACK_INDEX = 2, /* Kernel stack map */
+ KSTACK_INDEX = 1, /* Kernel stack map */
};
extern void slb_allocate(unsigned long ea);
@@ -133,13 +132,11 @@ static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* and PR KVM appropriately too. */
- unsigned long linear_llp, vmalloc_llp, lflags, vflags;
+ unsigned long linear_llp, lflags;
unsigned long ksp_esid_data, ksp_vsid_data;
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
@@ -157,14 +154,10 @@ static void __slb_flush_and_rebolt(void)
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
"slbia\n"
- /* Slot 1 - first VMALLOC segment */
+ /* Slot 1 - kernel stack */
"slbmte %0,%1\n"
- /* Slot 2 - kernel stack */
- "slbmte %2,%3\n"
"isync"
- :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
- "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
- "r"(ksp_vsid_data),
+ :: "r"(ksp_vsid_data),
"r"(ksp_esid_data)
: "memory");
}
@@ -256,10 +249,6 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
void slb_vmalloc_update(void)
{
- unsigned long vflags;
-
- vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
slb_flush_and_rebolt();
}
@@ -394,7 +383,7 @@ void slb_set_size(u16 size)
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
- unsigned long lflags, vflags;
+ unsigned long lflags;
static int slb_encoding_inited;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_llp;
@@ -430,14 +419,12 @@ void slb_initialize(void)
get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even entry 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
- create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
/* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also