aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c109
1 files changed, 58 insertions, 51 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0e0b4288a4b2..ddd4fa718c43 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -60,6 +60,8 @@
#include <asm/unistd_32_ia32.h>
#endif
+#include "process.h"
+
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
@@ -337,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base;
}
-void x86_fsbase_write_cpu(unsigned long fsbase)
-{
- /*
- * Set the selector to 0 as a notion, that the segment base is
- * overwritten, which will be checked for skipping the segment load
- * during context switch.
- */
- loadseg(FS, 0);
- wrmsrl(MSR_FS_BASE, fsbase);
-}
-
-void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
-{
- /* Set the selector to 0 for the same reason as %fs above. */
- loadseg(GS, 0);
- wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
-}
-
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
unsigned long fsbase;
@@ -383,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase;
}
-int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{
- /*
- * Not strictly needed for %fs, but do it for symmetry
- * with %gs
- */
- if (unlikely(fsbase >= TASK_SIZE_MAX))
- return -EPERM;
+ WARN_ON_ONCE(task == current);
- preempt_disable();
task->thread.fsbase = fsbase;
- if (task == current)
- x86_fsbase_write_cpu(fsbase);
- task->thread.fsindex = 0;
- preempt_enable();
-
- return 0;
}
-int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{
- if (unlikely(gsbase >= TASK_SIZE_MAX))
- return -EPERM;
+ WARN_ON_ONCE(task == current);
- preempt_disable();
task->thread.gsbase = gsbase;
- if (task == current)
- x86_gsbase_write_cpu_inactive(gsbase);
- task->thread.gsindex = 0;
- preempt_enable();
-
- return 0;
}
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
@@ -553,7 +517,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(irq_count) != -1);
@@ -617,12 +580,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Reload sp0. */
update_task_stack(next_p);
- /*
- * Now maybe reload the debug registers and handle I/O bitmaps
- */
- if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
- task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
- __switch_to_xtra(prev_p, next_p, tss);
+ switch_to_extra(prev_p, next_p);
#ifdef CONFIG_XEN_PV
/*
@@ -758,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) {
case ARCH_SET_GS: {
- ret = x86_gsbase_write_task(task, arg2);
+ if (unlikely(arg2 >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ /*
+ * ARCH_SET_GS has always overwritten the index
+ * and the base. Zero is the most sensible value
+ * to put in the index, and is the only value that
+ * makes any sense if FSGSBASE is unavailable.
+ */
+ if (task == current) {
+ loadseg(GS, 0);
+ x86_gsbase_write_cpu_inactive(arg2);
+
+ /*
+ * On non-FSGSBASE systems, save_base_legacy() expects
+ * that we also fill in thread.gsbase.
+ */
+ task->thread.gsbase = arg2;
+
+ } else {
+ task->thread.gsindex = 0;
+ x86_gsbase_write_task(task, arg2);
+ }
+ preempt_enable();
break;
}
case ARCH_SET_FS: {
- ret = x86_fsbase_write_task(task, arg2);
+ /*
+ * Not strictly needed for %fs, but do it for symmetry
+ * with %gs
+ */
+ if (unlikely(arg2 >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ /*
+ * Set the selector to 0 for the same reason
+ * as %gs above.
+ */
+ if (task == current) {
+ loadseg(FS, 0);
+ x86_fsbase_write_cpu(arg2);
+
+ /*
+ * On non-FSGSBASE systems, save_base_legacy() expects
+ * that we also fill in thread.fsbase.
+ */
+ task->thread.fsbase = arg2;
+ } else {
+ task->thread.fsindex = 0;
+ x86_fsbase_write_task(task, arg2);
+ }
+ preempt_enable();
break;
}
case ARCH_GET_FS: {