aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel/process.c')
-rw-r--r--arch/ppc/kernel/process.c142
1 files changed, 103 insertions, 39 deletions
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 82de66e4db6d..cb1c7b92f8c6 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
}
#endif /* defined(CHECK_STACK) */
-#ifdef CONFIG_ALTIVEC
-int
-dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+/*
+ * Make sure the floating-point register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_fp_to_thread(struct task_struct *tsk)
{
- if (regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+ if (tsk->thread.regs) {
+ /*
+ * We need to disable preemption here because if we didn't,
+ * another process could get scheduled after the regs->msr
+ * test but before we have finished saving the FP registers
+ * to the thread_struct. That process could take over the
+ * FPU, and then when we get scheduled again we would store
+ * bogus values for the remaining FP registers.
+ */
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_FP) {
+#ifdef CONFIG_SMP
+ /*
+ * This should only ever be called for current or
+ * for a stopped child process. Since we save away
+ * the FP register state on context switch on SMP,
+ * there is something wrong if a stopped child appears
+ * to still have its FP state in the CPU registers.
+ */
+ BUG_ON(tsk != current);
+#endif
+ giveup_fpu(current);
+ }
+ preempt_enable();
+ }
+}
+
+void enable_kernel_fp(void)
+{
+ WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+ giveup_fpu(current);
+ else
+ giveup_fpu(NULL); /* just enables FP for kernel */
+#else
+ giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_fp);
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+ preempt_disable();
+ if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
+ giveup_fpu(tsk);
+ preempt_enable();
+ memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1;
}
-void
-enable_kernel_altivec(void)
+#ifdef CONFIG_ALTIVEC
+void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_altivec);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-int
-dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+/*
+ * Make sure the VMX/Altivec register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_altivec_to_thread(struct task_struct *tsk)
{
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
- memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+ if (tsk->thread.regs) {
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_VEC) {
+#ifdef CONFIG_SMP
+ BUG_ON(tsk != current);
+#endif
+ giveup_altivec(current);
+ }
+ preempt_enable();
+ }
+}
+
+int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
void
enable_kernel_spe(void)
{
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_spe);
-#endif /* CONFIG_SPE */
-void
-enable_kernel_fp(void)
+void flush_spe_to_thread(struct task_struct *tsk)
{
- WARN_ON(preemptible());
-
+ if (tsk->thread.regs) {
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_SPE) {
#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
- else
- giveup_fpu(NULL); /* just enables FP for kernel */
-#else
- giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
+ BUG_ON(tsk != current);
+#endif
+ giveup_spe(current);
+ }
+ preempt_enable();
+ }
}
-EXPORT_SYMBOL(enable_kernel_fp);
-int
-dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
{
- preempt_disable();
- if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
- giveup_fpu(tsk);
- preempt_enable();
- memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+ memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
return 1;
}
+#endif /* CONFIG_SPE */
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
@@ -287,11 +347,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_SPE */
#endif /* CONFIG_SMP */
+#ifdef CONFIG_ALTIVEC
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
+#endif
#ifdef CONFIG_SPE
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
@@ -482,7 +544,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
last_task_used_spe = NULL;
#endif
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
- current->thread.fpscr = 0;
+ current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
memset(current->thread.vr, 0, sizeof(current->thread.vr));
memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
@@ -557,14 +619,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
}
-int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
-int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);