aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/process.c')
-rw-r--r--arch/sparc64/kernel/process.c133
1 files changed, 57 insertions, 76 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 059b0d025224..1c7ca2f712d9 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -44,83 +44,61 @@
#include <asm/fpumacro.h>
#include <asm/head.h>
#include <asm/cpudata.h>
+#include <asm/mmu_context.h>
#include <asm/unistd.h>
+#include <asm/hypervisor.h>
/* #define VERBOSE_SHOWREGS */
-/*
- * Nothing special yet...
- */
-void default_idle(void)
-{
-}
-
-#ifndef CONFIG_SMP
-
-/*
- * the idle loop on a Sparc... ;)
- */
-void cpu_idle(void)
+static void sparc64_yield(void)
{
- /* endless idle loop with no priority at all */
- for (;;) {
- /* If current->work.need_resched is zero we should really
- * setup for a system wakup event and execute a shutdown
- * instruction.
- *
- * But this requires writing back the contents of the
- * L2 cache etc. so implement this later. -DaveM
- */
- while (!need_resched())
- barrier();
+ if (tlb_type != hypervisor)
+ return;
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- check_pgt_cache();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb__after_clear_bit();
+
+ while (!need_resched()) {
+ unsigned long pstate;
+
+ /* Disable interrupts. */
+ __asm__ __volatile__(
+ "rdpr %%pstate, %0\n\t"
+ "andn %0, %1, %0\n\t"
+ "wrpr %0, %%g0, %%pstate"
+ : "=&r" (pstate)
+ : "i" (PSTATE_IE));
+
+ if (!need_resched())
+ sun4v_cpu_yield();
+
+ /* Re-enable interrupts. */
+ __asm__ __volatile__(
+ "rdpr %%pstate, %0\n\t"
+ "or %0, %1, %0\n\t"
+ "wrpr %0, %%g0, %%pstate"
+ : "=&r" (pstate)
+ : "i" (PSTATE_IE));
}
-}
-#else
+ set_thread_flag(TIF_POLLING_NRFLAG);
+}
-/*
- * the idle loop on a UltraMultiPenguin...
- *
- * TIF_POLLING_NRFLAG is set because we do not sleep the cpu
- * inside of the idler task, so an interrupt is not needed
- * to get a clean fast response.
- *
- * XXX Reverify this assumption... -DaveM
- *
- * Addendum: We do want it to do something for the signal
- * delivery case, we detect that by just seeing
- * if we are trying to send this to an idler or not.
- */
+/* The idle loop on sparc64. */
void cpu_idle(void)
{
- cpuinfo_sparc *cpuinfo = &local_cpu_data();
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
if (need_resched()) {
- cpuinfo->idle_volume = 0;
preempt_enable_no_resched();
schedule();
preempt_disable();
- check_pgt_cache();
}
- cpuinfo->idle_volume++;
-
- /* The store ordering is so that IRQ handlers on
- * other cpus see our increasing idleness for the buddy
- * redistribution algorithm. -DaveM
- */
- membar_storeload_storestore();
+ sparc64_yield();
}
}
-#endif
-
extern char reboot_command [];
extern void (*prom_palette)(int);
@@ -354,6 +332,7 @@ void show_regs(struct pt_regs *regs)
extern long etrap, etraptl1;
#endif
__show_regs(regs);
+#if 0
#ifdef CONFIG_SMP
{
extern void smp_report_regs(void);
@@ -361,6 +340,7 @@ void show_regs(struct pt_regs *regs)
smp_report_regs();
}
#endif
+#endif
#ifdef VERBOSE_SHOWREGS
if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
@@ -433,30 +413,15 @@ void exit_thread(void)
void flush_thread(void)
{
struct thread_info *t = current_thread_info();
+ struct mm_struct *mm;
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
- if (t->task->mm) {
- unsigned long pgd_cache = 0UL;
- if (test_thread_flag(TIF_32BIT)) {
- struct mm_struct *mm = t->task->mm;
- pgd_t *pgd0 = &mm->pgd[0];
- pud_t *pud0 = pud_offset(pgd0, 0);
+ mm = t->task->mm;
+ if (mm)
+ tsb_context_switch(mm);
- if (pud_none(*pud0)) {
- pmd_t *page = pmd_alloc_one(mm, 0);
- pud_set(pud0, page);
- }
- pgd_cache = get_pgd_cache(pgd0);
- }
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (pgd_cache),
- "r" (TSB_REG),
- "i" (ASI_DMMU));
- }
set_thread_wsaved(0);
/* Turn off performance counters if on. */
@@ -555,6 +520,18 @@ void synchronize_user_stack(void)
}
}
+static void stack_unaligned(unsigned long sp)
+{
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *) sp;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
void fault_in_user_windows(void)
{
struct thread_info *t = current_thread_info();
@@ -570,13 +547,17 @@ void fault_in_user_windows(void)
flush_user_windows();
window = get_thread_wsaved();
- if (window != 0) {
+ if (likely(window != 0)) {
window -= 1;
do {
unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
- if (copy_to_user((char __user *)sp, rwin, winsize))
+ if (unlikely(sp & 0x7UL))
+ stack_unaligned(sp);
+
+ if (unlikely(copy_to_user((char __user *)sp,
+ rwin, winsize)))
goto barf;
} while (window--);
}