aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-23 12:31:17 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:47:24 +0200
commit4540d3faa7c3fca6a6125448861de0e2e485658b (patch)
tree6e5ae8bbd1f488cd98c3928bff4096e893e07ab7
parentx86/fpu: Remove 'struct task_struct' usage from __thread_fpu_end() (diff)
downloadlinux-dev-4540d3faa7c3fca6a6125448861de0e2e485658b.tar.xz
linux-dev-4540d3faa7c3fca6a6125448861de0e2e485658b.zip
x86/fpu: Remove 'struct task_struct' usage from __thread_fpu_begin()
Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
-rw-r--r--arch/x86/kernel/fpu/core.c3
2 files changed, 8 insertions, 5 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index b1803a656651..44516ad6c890 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -351,11 +351,11 @@ static inline void __thread_fpu_end(struct fpu *fpu)
stts();
}
-static inline void __thread_fpu_begin(struct task_struct *tsk)
+static inline void __thread_fpu_begin(struct fpu *fpu)
{
if (!use_eager_fpu())
clts();
- __thread_set_has_fpu(&tsk->thread.fpu);
+ __thread_set_has_fpu(fpu);
}
static inline void drop_fpu(struct task_struct *tsk)
@@ -451,7 +451,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
- __thread_fpu_begin(new);
+ __thread_fpu_begin(new_fpu);
}
}
return fpu;
@@ -505,9 +505,11 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
*/
static inline void user_fpu_begin(void)
{
+ struct fpu *fpu = &current->thread.fpu;
+
preempt_disable();
if (!user_has_fpu())
- __thread_fpu_begin(current);
+ __thread_fpu_begin(fpu);
preempt_enable();
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 4e1f8f1bf493..cf49cd574d32 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -329,6 +329,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
void fpu__restore(void)
{
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
if (!tsk_used_math(tsk)) {
local_irq_enable();
@@ -347,7 +348,7 @@ void fpu__restore(void)
/* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
kernel_fpu_disable();
- __thread_fpu_begin(tsk);
+ __thread_fpu_begin(fpu);
if (unlikely(restore_fpu_checking(tsk))) {
fpu_reset_state(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);