aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/process.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 11:43:59 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-01 13:52:25 +1100
commit611b0e5c19963374175b39f42117b03ee7573228 (patch)
tree31a1ae1a8d869e574386650d5ef33e9a200d59be /arch/powerpc/kernel/process.c
parentpowerpc: Simplify TM restore checks (diff)
downloadlinux-dev-611b0e5c19963374175b39f42117b03ee7573228.tar.xz
linux-dev-611b0e5c19963374175b39f42117b03ee7573228.zip
powerpc: Create mtmsrd_isync()
mtmsrd_isync() will do an mtmsrd followed by an isync on older processors. On newer processors we avoid the isync via a feature fixup. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/kernel/process.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ef64219548d5..5bf8ec2597d4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -130,7 +130,10 @@ void enable_kernel_fp(void)
check_if_tm_restore_required(current);
giveup_fpu(current);
} else {
- giveup_fpu(NULL); /* just enables FP for kernel */
+ u64 oldmsr = mfmsr();
+
+ if (!(oldmsr & MSR_FP))
+ mtmsr_isync(oldmsr | MSR_FP);
}
}
EXPORT_SYMBOL(enable_kernel_fp);
@@ -144,7 +147,10 @@ void enable_kernel_altivec(void)
check_if_tm_restore_required(current);
giveup_altivec(current);
} else {
- giveup_altivec_notask();
+ u64 oldmsr = mfmsr();
+
+ if (!(oldmsr & MSR_VEC))
+ mtmsr_isync(oldmsr | MSR_VEC);
}
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -173,10 +179,14 @@ void enable_kernel_vsx(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
giveup_vsx(current);
- else
- giveup_vsx(NULL); /* just enable vsx for kernel - force */
+ } else {
+ u64 oldmsr = mfmsr();
+
+ if (!(oldmsr & MSR_VSX))
+ mtmsr_isync(oldmsr | MSR_VSX);
+ }
}
EXPORT_SYMBOL(enable_kernel_vsx);
@@ -209,10 +219,14 @@ void enable_kernel_spe(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+ if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
giveup_spe(current);
- else
- giveup_spe(NULL); /* just enable SPE for kernel - force */
+ } else {
+ u64 oldmsr = mfmsr();
+
+ if (!(oldmsr & MSR_SPE))
+ mtmsr_isync(oldmsr | MSR_SPE);
+ }
}
EXPORT_SYMBOL(enable_kernel_spe);