aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r--arch/powerpc/kernel/process.c810
1 files changed, 461 insertions, 349 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index fad50db9dcf2..67da147fe34d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -34,18 +34,15 @@
#include <linux/ftrace.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
-#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <linux/elf-randomize.h>
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
-#include <asm/pgtable.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/runlatch.h>
@@ -96,7 +93,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
+ regs_set_return_msr(&tsk->thread.ckpt_regs,
+ tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
@@ -125,13 +123,11 @@ unsigned long notrace msr_check_and_set(unsigned long bits)
newmsr = oldmsr | bits;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr |= MSR_VSX;
-#endif
if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
+ newmsr = mtmsr_isync_irqsafe(newmsr);
return newmsr;
}
@@ -145,13 +141,11 @@ void notrace __msr_check_and_clear(unsigned long bits)
newmsr = oldmsr & ~bits;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr &= ~MSR_VSX;
-#endif
if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
+ mtmsr_isync_irqsafe(newmsr);
}
EXPORT_SYMBOL(__msr_check_and_clear);
@@ -163,11 +157,9 @@ static void __giveup_fpu(struct task_struct *tsk)
save_fpu(tsk);
msr = tsk->thread.regs->msr;
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
-#endif
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
@@ -236,23 +228,11 @@ void enable_kernel_fp(void)
}
}
EXPORT_SYMBOL(enable_kernel_fp);
-
-static int restore_fp(struct task_struct *tsk)
-{
- if (tsk->thread.load_fp) {
- load_fp_state(&current->thread.fp_state);
- current->thread.load_fp++;
- return 1;
- }
- return 0;
-}
#else
-static int restore_fp(struct task_struct *tsk) { return 0; }
+static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
-#define loadvec(thr) ((thr).load_vec)
-
static void __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
@@ -260,11 +240,9 @@ static void __giveup_altivec(struct task_struct *tsk)
save_altivec(tsk);
msr = tsk->thread.regs->msr;
msr &= ~MSR_VEC;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
-#endif
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
@@ -318,21 +296,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
-
-static int restore_altivec(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
- load_vr_state(&tsk->thread.vr_state);
- tsk->thread.used_vr = 1;
- tsk->thread.load_vec++;
-
- return 1;
- }
- return 0;
-}
-#else
-#define loadvec(thr) 0
-static inline int restore_altivec(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -341,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk)
unsigned long msr = tsk->thread.regs->msr;
/*
- * We should never be ssetting MSR_VSX without also setting
+ * We should never be setting MSR_VSX without also setting
* MSR_FP and MSR_VEC
*/
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
@@ -400,18 +363,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
-
-static int restore_vsx(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_VSX)) {
- tsk->thread.used_vsr = 1;
- return 1;
- }
-
- return 0;
-}
-#else
-static inline int restore_vsx(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -456,21 +407,14 @@ static unsigned long msr_all_available;
static int __init init_msr_all_available(void)
{
-#ifdef CONFIG_PPC_FPU
- msr_all_available |= MSR_FP;
-#endif
-#ifdef CONFIG_ALTIVEC
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr_all_available |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr_all_available |= MSR_VEC;
-#endif
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr_all_available |= MSR_VSX;
-#endif
-#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE))
msr_all_available |= MSR_SPE;
-#endif
return 0;
}
@@ -494,23 +438,72 @@ void giveup_all(struct task_struct *tsk)
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
-#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
__giveup_fpu(tsk);
-#endif
-#ifdef CONFIG_ALTIVEC
if (usermsr & MSR_VEC)
__giveup_altivec(tsk);
-#endif
-#ifdef CONFIG_SPE
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
-#endif
msr_check_and_clear(msr_all_available);
}
EXPORT_SYMBOL(giveup_all);
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_FPU
+static bool should_restore_fp(void)
+{
+ if (current->thread.load_fp) {
+ current->thread.load_fp++;
+ return true;
+ }
+ return false;
+}
+
+static void do_restore_fp(void)
+{
+ load_fp_state(&current->thread.fp_state);
+}
+#else
+static bool should_restore_fp(void) { return false; }
+static void do_restore_fp(void) { }
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static bool should_restore_altivec(void)
+{
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
+ current->thread.load_vec++;
+ return true;
+ }
+ return false;
+}
+
+static void do_restore_altivec(void)
+{
+ load_vr_state(&current->thread.vr_state);
+ current->thread.used_vr = 1;
+}
+#else
+static bool should_restore_altivec(void) { return false; }
+static void do_restore_altivec(void) { }
+#endif /* CONFIG_ALTIVEC */
+
+static bool should_restore_vsx(void)
+{
+ if (cpu_has_feature(CPU_FTR_VSX))
+ return true;
+ return false;
+}
+#ifdef CONFIG_VSX
+static void do_restore_vsx(void)
+{
+ current->thread.used_vsr = 1;
+}
+#else
+static void do_restore_vsx(void) { }
+#endif /* CONFIG_VSX */
+
/*
* The exception exit path calls restore_math() with interrupts hard disabled
* but the soft irq state not "reconciled". ftrace code that calls
@@ -524,33 +517,50 @@ EXPORT_SYMBOL(giveup_all);
void notrace restore_math(struct pt_regs *regs)
{
unsigned long msr;
-
- if (!MSR_TM_ACTIVE(regs->msr) &&
- !current->thread.load_fp && !loadvec(current->thread))
- return;
+ unsigned long new_msr = 0;
msr = regs->msr;
- msr_check_and_set(msr_all_available);
/*
- * Only reload if the bit is not set in the user MSR, the bit BEING set
- * indicates that the registers are hot
+ * new_msr tracks the facilities that are to be restored. Only reload
+ * if the bit is not set in the user MSR (if it is set, the registers
+ * are live for the user thread).
*/
- if ((!(msr & MSR_FP)) && restore_fp(current))
- msr |= MSR_FP | current->thread.fpexc_mode;
+ if ((!(msr & MSR_FP)) && should_restore_fp())
+ new_msr |= MSR_FP;
- if ((!(msr & MSR_VEC)) && restore_altivec(current))
- msr |= MSR_VEC;
+ if ((!(msr & MSR_VEC)) && should_restore_altivec())
+ new_msr |= MSR_VEC;
- if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
- restore_vsx(current)) {
- msr |= MSR_VSX;
+ if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
+ if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
+ new_msr |= MSR_VSX;
}
- msr_check_and_clear(msr_all_available);
+ if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
+ msr_check_and_set(new_msr);
+
+ if (new_msr & MSR_FP) {
+ do_restore_fp();
+
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
+ if (new_msr & MSR_VEC)
+ do_restore_altivec();
- regs->msr = msr;
+ if (new_msr & MSR_VSX)
+ do_restore_vsx();
+
+ msr_check_and_clear(new_msr);
+
+ regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
+ }
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
static void save_all(struct task_struct *tsk)
{
@@ -578,7 +588,6 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
- thread_pkey_regs_save(&tsk->thread);
}
void flush_all_to_thread(struct task_struct *tsk)
@@ -611,26 +620,70 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
(void __user *)address);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
-void do_break (struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+
+static void do_break_handler(struct pt_regs *regs)
+{
+ struct arch_hw_breakpoint null_brk = {0};
+ struct arch_hw_breakpoint *info;
+ ppc_inst_t instr = ppc_inst(0);
+ int type = 0;
+ int size = 0;
+ unsigned long ea;
+ int i;
+
+ /*
+ * If underneath hw supports only one watchpoint, we know it
+ * caused exception. 8xx also falls into this category.
+ */
+ if (nr_wp_slots() == 1) {
+ __set_breakpoint(0, &null_brk);
+ current->thread.hw_brk[0] = null_brk;
+ current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
+ return;
+ }
+
+ /* Otherwise find out which DAWR caused exception and disable it. */
+ wp_get_instr_detail(regs, &instr, &type, &size, &ea);
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ info = &current->thread.hw_brk[i];
+ if (!info->address)
+ continue;
+
+ if (wp_check_constraints(regs, instr, ea, type, size, info)) {
+ __set_breakpoint(i, &null_brk);
+ current->thread.hw_brk[i] = null_brk;
+ current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
+ }
+ }
+}
+
+DEFINE_INTERRUPT_HANDLER(do_break)
{
current->thread.trap_nr = TRAP_HWBKPT;
- if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+ if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
11, SIGSEGV) == NOTIFY_STOP)
return;
if (debugger_break_match(regs))
return;
- /* Clear the breakpoint */
- hw_breakpoint_disable();
+ /*
+ * We reach here only when watchpoint exception is generated by ptrace
+ * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
+ * watchpoint is already handled by hw_breakpoint_handler() so we don't
+ * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
+ * we need to manually handle the watchpoint here.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
+ do_break_handler(regs);
/* Deliver the signal to userspace */
- force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
+static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -704,48 +757,52 @@ void switch_booke_debug_regs(struct debug_reg *new_debug)
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
-static void set_breakpoint(struct arch_hw_breakpoint *brk)
+static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
{
preempt_disable();
- __set_breakpoint(brk);
+ __set_breakpoint(i, brk);
preempt_enable();
}
static void set_debug_reg_defaults(struct thread_struct *thread)
{
- thread->hw_brk.address = 0;
- thread->hw_brk.type = 0;
- thread->hw_brk.len = 0;
- thread->hw_brk.hw_len = 0;
- if (ppc_breakpoint_available())
- set_breakpoint(&thread->hw_brk);
-}
-#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
-{
- mtspr(SPRN_DAC1, dabr);
-#ifdef CONFIG_PPC_47x
- isync();
-#endif
- return 0;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ thread->hw_brk[i] = null_brk;
+ if (ppc_breakpoint_available())
+ set_breakpoint(i, &thread->hw_brk[i]);
+ }
}
-#elif defined(CONFIG_PPC_BOOK3S)
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+
+static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
+ struct arch_hw_breakpoint *b)
{
- mtspr(SPRN_DABR, dabr);
- if (cpu_has_feature(CPU_FTR_DABRX))
- mtspr(SPRN_DABRX, dabrx);
- return 0;
+ if (a->address != b->address)
+ return false;
+ if (a->type != b->type)
+ return false;
+ if (a->len != b->len)
+ return false;
+ /* no need to check hw_len. it's calculated from address and len */
+ return true;
}
-#else
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+
+static void switch_hw_breakpoint(struct task_struct *new)
{
- return -EINVAL;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
+ &new->thread.hw_brk[i])))
+ continue;
+
+ __set_breakpoint(i, &new->thread.hw_brk[i]);
+ }
}
-#endif
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
static inline int set_dabr(struct arch_hw_breakpoint *brk)
{
@@ -757,7 +814,19 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr, dabrx);
- return __set_dabr(dabr, dabrx);
+ if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
+ mtspr(SPRN_DAC1, dabr);
+ if (IS_ENABLED(CONFIG_PPC_47x))
+ isync();
+ return 0;
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
+ mtspr(SPRN_DABR, dabr);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
@@ -765,12 +834,12 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
LCTRL1_CRWF_RW;
unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
- unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN;
- unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN;
+ unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
+ unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
if (start_addr == 0)
lctrl2 |= LCTRL2_LW0LA_F;
- else if (end_addr == ~0U)
+ else if (end_addr == 0)
lctrl2 |= LCTRL2_LW0LA_E;
else
lctrl2 |= LCTRL2_LW0LA_EandF;
@@ -786,20 +855,20 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
mtspr(SPRN_CMPE, start_addr - 1);
- mtspr(SPRN_CMPF, end_addr + 1);
+ mtspr(SPRN_CMPF, end_addr);
mtspr(SPRN_LCTRL1, lctrl1);
mtspr(SPRN_LCTRL2, lctrl2);
return 0;
}
-void __set_breakpoint(struct arch_hw_breakpoint *brk)
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
- memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
+ memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
if (dawr_enabled())
// Power8 or later
- set_dawr(brk);
+ set_dawr(nr, brk);
else if (IS_ENABLED(CONFIG_PPC_8xx))
set_breakpoint_8xx(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
@@ -822,19 +891,6 @@ bool ppc_breakpoint_available(void)
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
-static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
- struct arch_hw_breakpoint *b)
-{
- if (a->address != b->address)
- return false;
- if (a->type != b->type)
- return false;
- if (a->len != b->len)
- return false;
- /* no need to check hw_len. it's calculated from address and len */
- return true;
-}
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
@@ -1056,12 +1112,13 @@ void restore_tm_state(struct pt_regs *regs)
#endif
restore_math(regs);
- regs->msr |= msr_diff;
+ regs_set_return_msr(regs, regs->msr | msr_diff);
}
-#else
+#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev, new)
+void tm_reclaim_current(uint8_t cause) {}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static inline void save_sprs(struct thread_struct *t)
@@ -1070,6 +1127,10 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ t->spefscr = mfspr(SPRN_SPEFSCR);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
@@ -1090,9 +1151,41 @@ static inline void save_sprs(struct thread_struct *t)
t->tar = mfspr(SPRN_TAR);
}
#endif
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvmppc_save_user_regs(void)
+{
+ unsigned long usermsr;
+
+ if (!current->thread.regs)
+ return;
+
+ usermsr = current->thread.regs->msr;
+
+ if (usermsr & MSR_FP)
+ save_fpu(current);
+
+ if (usermsr & MSR_VEC)
+ save_altivec(current);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
- thread_pkey_regs_save(t);
+void kvmppc_save_current_sprs(void)
+{
+ save_sprs(&current->thread);
}
+EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
@@ -1102,6 +1195,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE) &&
+ old_thread->spefscr != new_thread->spefscr)
+ mtspr(SPRN_SPEFSCR, new_thread->spefscr);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
@@ -1132,7 +1230,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
mtspr(SPRN_TIDR, new_thread->tidr);
#endif
- thread_pkey_regs_restore(new_thread, old_thread);
}
struct task_struct *__switch_to(struct task_struct *prev,
@@ -1140,7 +1237,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
{
struct thread_struct *new_thread, *old_thread;
struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct ppc64_tlb_batch *batch;
#endif
@@ -1149,7 +1246,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
@@ -1157,6 +1254,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
__flush_tlb_pending(batch);
batch->active = 0;
}
+
+ /*
+ * On POWER9 the copy-paste buffer can only paste into
+ * foreign real addresses, so unprivileged processes can not
+ * see the data or use it in any way unless they have
+ * foreign real mappings. If the new process has the foreign
+ * real address mappings, we must issue a cp_abort to clear
+ * any state and prevent snooping, corruption or a covert
+ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
+ atomic_read(&new->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1167,8 +1277,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
- __set_breakpoint(&new->thread.hw_brk);
+ switch_hw_breakpoint(new);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -1193,37 +1302,50 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Call restore_sprs() before calling _switch(). If we move it after
- * _switch() then we miss out on calling it for new tasks. The reason
- * for this is we manually create a stack frame for new tasks that
- * directly returns through ret_from_fork() or
+ * Call restore_sprs() and set_return_regs_changed() before calling
+ * _switch(). If we move it after _switch() then we miss out on calling
+ * it for new tasks. The reason for this is we manually create a stack
+ * frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
+ set_return_regs_changed(); /* _switch changes stack (and regs) */
+
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ kuap_assert_locked();
+
last = _switch(old_thread, new_thread);
+ /*
+ * Nothing after _switch will be run for newly created tasks,
+ * because they switch directly to ret_from_fork/ret_from_kernel_thread
+ * etc. Code added here should have a comment explaining why that is
+ * okay.
+ */
+
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ /*
+ * This applies to a process that was context switched while inside
+ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
+ * deactivated above, before _switch(). This will never be the case
+ * for new tasks.
+ */
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
+#endif
- if (current->thread.regs) {
+ /*
+ * Math facilities are masked out of the child MSR in copy_thread.
+ * A new task does not need to restore_math because it will
+ * demand fault them.
+ */
+ if (current->thread.regs)
restore_math(current->thread.regs);
-
- /*
- * The copy-paste buffer can only store into foreign real
- * addresses, so unprivileged processes can not see the
- * data or use it in any way unless they have foreign real
- * mappings. If the new process has the foreign real address
- * mappings, we must issue a cp_abort to clear any state and
- * prevent snooping, corruption or a covert channel.
- */
- if (current->thread.used_vas)
- asm volatile(PPC_CP_ABORT);
- }
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
@@ -1234,29 +1356,31 @@ struct task_struct *__switch_to(struct task_struct *prev,
static void show_instructions(struct pt_regs *regs)
{
int i;
+ unsigned long nip = regs->nip;
unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
printk("Instruction dump:");
+ /*
+ * If we were executing with the MMU off for instructions, adjust pc
+ * rather than printing XXXXXXXX.
+ */
+ if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
+ pc = (unsigned long)phys_to_virt(pc);
+ nip = (unsigned long)phys_to_virt(regs->nip);
+ }
+
for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
if (!(i % 8))
pr_cont("\n");
-#if !defined(CONFIG_BOOKE)
- /* If executing with the IMMU off, adjust pc rather
- * than print XXXXXXXX.
- */
- if (!(regs->msr & MSR_IR))
- pc = (unsigned long)phys_to_virt(pc);
-#endif
-
if (!__kernel_text_address(pc) ||
- probe_kernel_address((const void *)pc, instr)) {
+ get_kernel_nofault(instr, (const void *)pc)) {
pr_cont("XXXXXXXX ");
} else {
- if (regs->nip == pc)
+ if (nip == pc)
pr_cont("<%08x> ", instr);
else
pr_cont("%08x ", instr);
@@ -1287,7 +1411,8 @@ void show_user_instructions(struct pt_regs *regs)
for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
int instr;
- if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
+ if (copy_from_user_nofault(&instr, (void __user *)pc,
+ sizeof(instr))) {
seq_buf_printf(&s, "XXXXXXXX ");
continue;
}
@@ -1384,19 +1509,15 @@ static void print_msr_bits(unsigned long val)
#ifdef CONFIG_PPC64
#define REG "%016lx"
#define REGS_PER_LINE 4
-#define LAST_VOLATILE 13
#else
#define REG "%08lx"
#define REGS_PER_LINE 8
-#define LAST_VOLATILE 12
#endif
-void show_regs(struct pt_regs * regs)
+static void __show_regs(struct pt_regs *regs)
{
int i, trap;
- show_regs_print_info(KERN_DEFAULT);
-
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %px TRAP: %04lx %s (%s)\n",
@@ -1405,14 +1526,17 @@ void show_regs(struct pt_regs * regs)
print_msr_bits(regs->msr);
pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
- if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
+ if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
- if (trap == 0x200 || trap == 0x300 || trap == 0x600)
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
-#else
- pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
-#endif
+ if (trap == INTERRUPT_MACHINE_CHECK ||
+ trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_ALIGNMENT) {
+ if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
+ pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
+ else
+ pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+ }
+
#ifdef CONFIG_PPC64
pr_cont("IRQMASK: %lx ", regs->softe);
#endif
@@ -1425,19 +1549,23 @@ void show_regs(struct pt_regs * regs)
if ((i % REGS_PER_LINE) == 0)
pr_cont("\nGPR%02d: ", i);
pr_cont(REG " ", regs->gpr[i]);
- if (i == LAST_VOLATILE && !FULL_REGS(regs))
- break;
}
pr_cont("\n");
-#ifdef CONFIG_KALLSYMS
/*
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
-#endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (IS_ENABLED(CONFIG_KALLSYMS)) {
+ printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+ printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
+ }
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_DEFAULT);
+ __show_regs(regs);
+ show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
if (!user_mode(regs))
show_instructions(regs);
}
@@ -1451,34 +1579,26 @@ void flush_thread(void)
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
-#ifdef CONFIG_PPC_BOOK3S_64
void arch_setup_new_exec(void)
{
- if (radix_enabled())
- return;
- hash__setup_new_exec();
-}
-#endif
-int set_thread_uses_vas(void)
-{
#ifdef CONFIG_PPC_BOOK3S_64
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -EINVAL;
-
- current->thread.used_vas = 1;
-
+ if (!radix_enabled())
+ hash__setup_new_exec();
+#endif
/*
- * Even a process that has no foreign real address mapping can use
- * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
- * to clear any pending COPY and prevent a covert channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches.
+ * If we exec out of a kernel thread then thread.regs will not be
+ * set. Do it now.
*/
- asm volatile(PPC_CP_ABORT);
+ if (!current->thread.regs) {
+ struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
+ current->thread.regs = regs - 1;
+ }
-#endif /* CONFIG_PPC_BOOK3S_64 */
- return 0;
+#ifdef CONFIG_PPC_MEM_KEYS
+ current->thread.regs->amr = default_amr;
+ current->thread.regs->iamr = default_iamr;
+#endif
}
#ifdef CONFIG_PPC64
@@ -1535,11 +1655,6 @@ EXPORT_SYMBOL_GPL(set_thread_tidr);
#endif /* CONFIG_PPC64 */
-void
-release_thread(struct task_struct *t)
-{
-}
-
/*
* this gets called so that we can store coprocessor state into memory and
* copy the current task into the new thread.
@@ -1568,7 +1683,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -1593,56 +1708,63 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
/*
* Copy architecture-specific thread state
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
+ extern void ret_from_fork_scv(void);
extern void ret_from_kernel_thread(void);
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int i;
+#endif
klp_init_thread_info(p);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */
- if (usp)
- childregs->gpr[14] = ppc_function_entry((void *)usp);
+ if (args->fn)
+ childregs->gpr[14] = ppc_function_entry((void *)args->fn);
#ifdef CONFIG_PPC64
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = IRQS_ENABLED;
#endif
- childregs->gpr[15] = kthread_arg;
+ childregs->gpr[15] = (unsigned long)args->fn_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
/* user thread */
struct pt_regs *regs = current_pt_regs();
- CHECK_FULL_REGS(regs);
*childregs = *regs;
if (usp)
childregs->gpr[1] = usp;
p->thread.regs = childregs;
- childregs->gpr[3] = 0; /* Result from fork() */
+ /* 64s sets this in ret_from_fork */
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ childregs->gpr[3] = 0; /* Result from fork() */
if (clone_flags & CLONE_SETTLS) {
-#ifdef CONFIG_PPC64
if (!is_32bit_task())
childregs->gpr[13] = tls;
else
-#endif
childregs->gpr[2] = tls;
}
- f = ret_from_fork;
+ if (trap_is_scv(regs))
+ f = ret_from_fork_scv;
+ else
+ f = ret_from_fork;
}
childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
sp -= STACK_FRAME_OVERHEAD;
@@ -1660,17 +1782,23 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
-#ifdef CONFIG_PPC32
- p->thread.ksp_limit = (unsigned long)end_of_stack(p);
-#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- p->thread.ptrace_bps[0] = NULL;
+ for (i = 0; i < nr_wp_slots(); i++)
+ p->thread.ptrace_bps[i] = NULL;
#endif
+#ifdef CONFIG_PPC_FPU_REGS
p->thread.fp_save_area = NULL;
+#endif
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ p->thread.kuap = KUAP_NONE;
+#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ p->thread.pid = MMU_NO_CONTEXT;
+#endif
setup_ksp_vsid(p, sp);
@@ -1684,6 +1812,16 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
p->thread.tidr = 0;
#endif
+ /*
+ * Run with the current AMR value of the kernel
+ */
+#ifdef CONFIG_PPC_PKEY
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ kregs->amr = AMR_KUAP_BLOCKED;
+
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
+ kregs->iamr = AMR_KUEP_BLOCKED;
+#endif
kregs->nip = ppc_function_entry(f);
return 0;
}
@@ -1698,20 +1836,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
-#ifdef CONFIG_PPC_BOOK3S_64
- if (!radix_enabled())
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
preload_new_slb_context(start, sp);
#endif
-#endif
-
- /*
- * If we exec out of a kernel thread then thread.regs will not be
- * set. Do it now.
- */
- if (!current->thread.regs) {
- struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
- current->thread.regs = regs - 1;
- }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
@@ -1723,20 +1850,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
tm_reclaim_current(0);
#endif
- memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
regs->ccr = 0;
regs->gpr[1] = sp;
- /*
- * We have just cleared all the nonvolatile GPRs, so make
- * FULL_REGS(regs) return true. This is necessary to allow
- * ptrace to examine the thread immediately after exec.
- */
- regs->trap &= ~1UL;
-
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
@@ -1781,21 +1901,24 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
regs->gpr[2] = toc;
}
- regs->nip = entry;
- regs->msr = MSR_USER64;
+ regs_set_return_ip(regs, entry);
+ regs_set_return_msr(regs, MSR_USER64);
} else {
- regs->nip = start;
regs->gpr[2] = 0;
- regs->msr = MSR_USER32;
+ regs_set_return_ip(regs, start);
+ regs_set_return_msr(regs, MSR_USER32);
}
+
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
current->thread.load_slb = 0;
current->thread.load_fp = 0;
+#ifdef CONFIG_PPC_FPU_REGS
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
+#endif
#ifdef CONFIG_ALTIVEC
memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
@@ -1816,8 +1939,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
- thread_pkey_regs_init(&current->thread);
}
EXPORT_SYMBOL(start_thread);
@@ -1833,7 +1954,6 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
* fpexc_mode. fpexc_mode is also used for setting FP exception
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
-#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
@@ -1847,16 +1967,15 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
* anyway to restore the prctl settings from
* the saved environment.
*/
+#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#endif
return 0;
} else {
return -EINVAL;
}
-#else
- return -EINVAL;
-#endif
}
/* on a CONFIG_SPE this does not hurt us. The bits that
@@ -1867,18 +1986,18 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+ if (regs != NULL && (regs->msr & MSR_FP) != 0) {
+ regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode);
+ }
return 0;
}
int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
- unsigned int val;
+ unsigned int val = 0;
- if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
-#ifdef CONFIG_SPE
+ if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
@@ -1892,15 +2011,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
* anyway to restore the prctl settings from
* the saved environment.
*/
+#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
val = tsk->thread.fpexc_mode;
+#endif
} else
return -EINVAL;
-#else
- return -EINVAL;
-#endif
- else
+ } else {
val = __unpack_fe01(tsk->thread.fpexc_mode);
+ }
return put_user(val, (unsigned int __user *) adr);
}
@@ -1916,9 +2035,9 @@ int set_endian(struct task_struct *tsk, unsigned int val)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
- regs->msr &= ~MSR_LE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
- regs->msr |= MSR_LE;
+ regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
@@ -1976,6 +2095,35 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
return 0;
}
+static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
+{
+#ifdef CONFIG_PPC64
+ unsigned long stack_page;
+ unsigned long cpu = task_cpu(p);
+
+ if (!paca_ptrs)
+ return 0;
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+# ifdef CONFIG_PPC_BOOK3S_64
+ stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+# endif
+#endif
+
+ return 0;
+}
+
+
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
@@ -1987,30 +2135,30 @@ int validate_sp(unsigned long sp, struct task_struct *p,
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
- return valid_irq_stack(sp, p, nbytes);
+ if (valid_irq_stack(sp, p, nbytes))
+ return 1;
+
+ return valid_emergency_stack(sp, p, nbytes);
}
EXPORT_SYMBOL(validate_sp);
-static unsigned long __get_wchan(struct task_struct *p)
+static unsigned long ___get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
sp = p->thread.ksp;
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
return 0;
do {
- sp = *(unsigned long *)sp;
+ sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- p->state == TASK_RUNNING)
+ task_is_running(p))
return 0;
if (count > 0) {
- ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
+ ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
if (!in_sched_functions(ip))
return ip;
}
@@ -2018,14 +2166,14 @@ static unsigned long __get_wchan(struct task_struct *p)
return 0;
}
-unsigned long get_wchan(struct task_struct *p)
+unsigned long __get_wchan(struct task_struct *p)
{
unsigned long ret;
if (!try_get_task_stack(p))
return 0;
- ret = __get_wchan(p);
+ ret = ___get_wchan(p);
put_task_stack(p);
@@ -2034,15 +2182,15 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
int firstframe = 1;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned long ret_addr;
int ftrace_idx = 0;
-#endif
if (tsk == NULL)
tsk = current;
@@ -2053,13 +2201,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
sp = (unsigned long) stack;
if (sp == 0) {
if (tsk == current)
- sp = current_stack_pointer();
+ sp = current_stack_frame();
else
sp = tsk->thread.ksp;
}
lr = 0;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
break;
@@ -2068,13 +2216,12 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ printk("%s["REG"] ["REG"] %pS",
+ loglvl, sp, ip, (void *)ip);
ret_addr = ftrace_graph_ret_addr(current,
&ftrace_idx, ip, stack);
if (ret_addr != ip)
pr_cont(" (%pS)", (void *)ret_addr);
-#endif
if (firstframe)
pr_cont(" (unreliable)");
pr_cont("\n");
@@ -2085,13 +2232,18 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
* See if this is an exception frame.
* We look for the "regshere" marker in the current frame.
*/
- if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
+ if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
+
lr = regs->link;
- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
- regs->trap, (void *)regs->nip, (void *)lr);
+ printk("%s--- interrupt: %lx at %pS\n",
+ loglvl, regs->trap, (void *)regs->nip);
+ __show_regs(regs);
+ printk("%s--- interrupt: %lx\n",
+ loglvl, regs->trap);
+
firstframe = 1;
}
@@ -2151,46 +2303,6 @@ void notrace __ppc64_runlatch_off(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
- else
- rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
-
- return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
-#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * If we are using 1TB segments and we are allowed to randomise
- * the heap, we can put it above 1TB so it is backed by a 1TB
- * segment. Otherwise the heap will be in the bottom 1TB
- * which always uses 256MB segments and this may result in a
- * performance penalty. We don't need to worry about radix. For
- * radix, mmu_highuser_ssize remains unchanged from 256MB.
- */
- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
-