aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2018-06-01 13:54:02 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2018-06-03 20:40:38 +1000
commita5db5060e0b2e27605df272224bfd470f644d8a5 (patch)
tree1cfc27d0f084d34cc21978d63aa8e97fb4ba28ff /arch/powerpc/mm/slb.c
parentpowerpc/mm/hugetlb: Update hugetlb related locks (diff)
downloadlinux-dev-a5db5060e0b2e27605df272224bfd470f644d8a5.tar.xz
linux-dev-a5db5060e0b2e27605df272224bfd470f644d8a5.zip
powerpc/mm/hash: hard disable irq in the SLB insert path
When inserting SLB entries for EA above 512TB, we need to hard disable irq. This will make sure we don't take a PMU interrupt that can possibly touch user space address via a stack dump. To prevent this, we need to hard disable the interrupt. Also add a comment explaining why we don't need context synchronizing isync with slbmte. Fixes: f384796c4 ("powerpc/mm: Add support for handling > 512TB address in SLB miss") Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/slb.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 2f4b33b24b3b..cb796724a6fc 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -352,6 +352,14 @@ static void insert_slb_entry(unsigned long vsid, unsigned long ea,
/*
* We are irq disabled, hence should be safe to access PACA.
*/
+ VM_WARN_ON(!irqs_disabled());
+
+ /*
+ * We can't take a PMU exception in the following code, so hard
+ * disable interrupts.
+ */
+ hard_irq_disable();
+
index = get_paca()->stab_rr;
/*
@@ -369,6 +377,11 @@ static void insert_slb_entry(unsigned long vsid, unsigned long ea,
((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
esid_data = mk_esid_data(ea, ssize, index);
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing.
+ * Also we only handle user segments here.
+ */
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
: "memory");