aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 11:44:11 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-02 19:34:41 +1100
commitd1e1cf2e38def301fde42c1a33f896f974941d7b (patch)
tree637ae6411bdf02dd8e7e29a57bf416beb52deca4 /arch/powerpc/include
parentpowerpc: Rearrange __switch_to() (diff)
downloadlinux-dev-d1e1cf2e38def301fde42c1a33f896f974941d7b.tar.xz
linux-dev-d1e1cf2e38def301fde42c1a33f896f974941d7b.zip
powerpc: clean up asm/switch_to.h
Remove a bunch of unnecessary fallback functions and group things in a more logical way. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/switch_to.h35
1 files changed, 10 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 81d46a433c03..5b268b6be74c 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -14,23 +14,18 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
-extern void enable_kernel_fp(void);
-extern void enable_kernel_altivec(void);
-extern void enable_kernel_vsx(void);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
+
extern int emulate_altivec(struct pt_regs *);
-extern void __giveup_vsx(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void enable_kernel_spe(void);
-extern void load_up_spe(struct task_struct *);
-extern void giveup_all(struct task_struct *);
+
extern void flush_all_to_thread(struct task_struct *);
-extern void switch_booke_debug_regs(struct debug_reg *new_debug);
+extern void giveup_all(struct task_struct *);
#ifdef CONFIG_PPC_FPU
+extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
extern void __giveup_fpu(struct task_struct *);
@@ -38,14 +33,12 @@ static inline void disable_kernel_fp(void)
{
msr_check_and_clear(MSR_FP);
}
-
#else
static inline void flush_fp_to_thread(struct task_struct *t) { }
-static inline void giveup_fpu(struct task_struct *t) { }
-static inline void __giveup_fpu(struct task_struct *t) { }
#endif
#ifdef CONFIG_ALTIVEC
+extern void enable_kernel_altivec(void);
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
extern void __giveup_altivec(struct task_struct *);
@@ -53,25 +46,21 @@ static inline void disable_kernel_altivec(void)
{
msr_check_and_clear(MSR_VEC);
}
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t) { }
-static inline void giveup_altivec(struct task_struct *t) { }
-static inline void __giveup_altivec(struct task_struct *t) { }
#endif
#ifdef CONFIG_VSX
+extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *);
+extern void giveup_vsx(struct task_struct *);
+extern void __giveup_vsx(struct task_struct *);
static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
-#else
-static inline void flush_vsx_to_thread(struct task_struct *t)
-{
-}
#endif
#ifdef CONFIG_SPE
+extern void enable_kernel_spe(void);
extern void flush_spe_to_thread(struct task_struct *);
extern void giveup_spe(struct task_struct *);
extern void __giveup_spe(struct task_struct *);
@@ -79,10 +68,6 @@ static inline void disable_kernel_spe(void)
{
msr_check_and_clear(MSR_SPE);
}
-#else
-static inline void flush_spe_to_thread(struct task_struct *t) { }
-static inline void giveup_spe(struct task_struct *t) { }
-static inline void __giveup_spe(struct task_struct *t) { }
#endif
static inline void clear_task_ebb(struct task_struct *t)