aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-27 12:56:48 +0900
committerTejun Heo <tj@kernel.org>2009-01-27 12:56:48 +0900
commit2697fbd5faf19c84c17441b1752bdcbdcfd1248c (patch)
tree70bdf58fba94bf9ff94b2b6372662da89add9723 /arch/x86
parentx86: remove extra barriers from load_gs_base() (diff)
downloadlinux-dev-2697fbd5faf19c84c17441b1752bdcbdcfd1248c.tar.xz
linux-dev-2697fbd5faf19c84c17441b1752bdcbdcfd1248c.zip
x86: load new GDT after setting up boot cpu per-cpu area
Impact: sync 32 and 64-bit code Merge load_gs_base() into switch_to_new_gdt(). Load the GDT and per-cpu state for the boot cpu when its new area is set up. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/kernel/cpu/common.c15
-rw-r--r--arch/x86/kernel/setup_percpu.c6
3 files changed, 12 insertions, 14 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 794234eba317..befa20b4a68c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -394,11 +394,6 @@ union irq_stack_union {
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
-
-static inline void load_gs_base(int cpu)
-{
- wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
-}
#endif
extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e30c8a282c..0c766b80d915 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -258,12 +258,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;
+ int cpu = smp_processor_id();
- gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+ gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
#ifdef CONFIG_X86_32
- asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
+ loadsegment(fs, __KERNEL_PERCPU);
+#else
+ loadsegment(gs, 0);
+ wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
}
@@ -968,10 +973,6 @@ void __cpuinit cpu_init(void)
struct task_struct *me;
int i;
- loadsegment(fs, 0);
- loadsegment(gs, 0);
- load_gs_base(cpu);
-
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -993,6 +994,8 @@ void __cpuinit cpu_init(void)
*/
switch_to_new_gdt();
+ loadsegment(fs, 0);
+
load_idt((const struct desc_ptr *)&idt_descr);
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bcca3a7b3748..4caa78d7cb15 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -113,13 +113,13 @@ void __init setup_per_cpu_areas(void)
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
#endif
+#endif
/*
* Up to this point, the boot CPU has been using .data.init
- * area. Reload %gs offset for the boot CPU.
+ * area. Reload any changed state for the boot CPU.
*/
if (cpu == boot_cpu_id)
- load_gs_base(cpu);
-#endif
+ switch_to_new_gdt();
DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
}