aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-06-25 00:19:24 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 13:11:11 +0200
commit478de5a9d691dd0c048ddce62dbec23722515636 (patch)
tree82d165c8420571925a0d56c92316f10a436f1831 /arch/x86/kernel/process_64.c
parentx86, 64-bit: __switch_to(): move arch_leave_lazy_cpu_mode() to the right place (diff)
downloadlinux-dev-478de5a9d691dd0c048ddce62dbec23722515636.tar.xz
linux-dev-478de5a9d691dd0c048ddce62dbec23722515636.zip
x86: save %fs and %gs before load_TLS() and arch_leave_lazy_cpu_mode()
We must do this because load_TLS() may need to clear %fs and %gs. (e.g. under Xen). Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: xen-devel <xen-devel@lists.xensource.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 488eaca47bd8..db5eb963e4df 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -538,6 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ unsigned fsindex, gsindex;
/* we're going to use this soon, after a few expensive things */
if (next_p->fpu_counter>5)
@@ -560,6 +561,15 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
+
+ /* We must save %fs and %gs before load_TLS() because
+ * %fs and %gs may be cleared by load_TLS().
+ *
+ * (e.g. xen_load_tls())
+ */
+ savesegment(fs, fsindex);
+ savesegment(gs, gsindex);
+
load_TLS(next, cpu);
/*
@@ -575,8 +585,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Switch FS and GS.
*/
{
- unsigned fsindex;
- savesegment(fs, fsindex);
/* segment register != 0 always requires a reload.
also reload when it has changed.
when prev process used 64bit base always reload
@@ -594,10 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
- }
- {
- unsigned gsindex;
- savesegment(gs, gsindex);
+
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)