aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c4
-rw-r--r--arch/x86_64/kernel/apic.c19
-rw-r--r--arch/x86_64/kernel/cpufreq/Kconfig2
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/early-quirks.c15
-rw-r--r--arch/x86_64/kernel/entry.S3
-rw-r--r--arch/x86_64/kernel/functionlist1
-rw-r--r--arch/x86_64/kernel/hpet.c118
-rw-r--r--arch/x86_64/kernel/i8259.c54
-rw-r--r--arch/x86_64/kernel/io_apic.c442
-rw-r--r--arch/x86_64/kernel/k8.c4
-rw-r--r--arch/x86_64/kernel/mpparse.c4
-rw-r--r--arch/x86_64/kernel/nmi.c135
-rw-r--r--arch/x86_64/kernel/pci-gart.c8
-rw-r--r--arch/x86_64/kernel/process.c13
-rw-r--r--arch/x86_64/kernel/smpboot.c5
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--arch/x86_64/kernel/tsc.c7
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86_64/kernel/vsyscall.c2
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c1
21 files changed, 451 insertions, 394 deletions
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 23178ce6c783..e1548fbe95ae 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -66,8 +66,10 @@ static void init_low_mapping(void)
{
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
+ /* FIXME: We're playing with the current task's page tables here, which
+ * is potentially dangerous on SMP systems.
+ */
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- WARN_ON(num_online_cpus() != 1);
local_flush_tlb();
}
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 723417d924c0..bd3e45d47c37 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -47,6 +47,10 @@ int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
+/* Local APIC timer works in C2? */
+int local_apic_timer_c2_ok;
+EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+
static struct resource *ioapic_resources;
static struct resource lapic_resource = {
.name = "Local APIC",
@@ -930,9 +934,17 @@ EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
void smp_send_timer_broadcast_ipi(void)
{
+ int cpu = smp_processor_id();
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+
+ if (cpu_isset(cpu, mask)) {
+ cpu_clear(cpu, mask);
+ add_pda(apic_timer_irqs, 1);
+ smp_local_timer_interrupt();
+ }
+
if (!cpus_empty(mask)) {
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
}
@@ -1192,6 +1204,13 @@ static __init int setup_nolapic(char *str)
}
early_param("nolapic", setup_nolapic);
+static int __init parse_lapic_timer_c2_ok(char *arg)
+{
+ local_apic_timer_c2_ok = 1;
+ return 0;
+}
+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
+
static __init int setup_noapictimer(char *str)
{
if (str[0] != ' ' && str[0] != 0)
diff --git a/arch/x86_64/kernel/cpufreq/Kconfig b/arch/x86_64/kernel/cpufreq/Kconfig
index 45a6a1fd14ac..40acb67fb882 100644
--- a/arch/x86_64/kernel/cpufreq/Kconfig
+++ b/arch/x86_64/kernel/cpufreq/Kconfig
@@ -45,7 +45,6 @@ config X86_SPEEDSTEP_CENTRINO
config X86_SPEEDSTEP_CENTRINO_ACPI
bool
depends on X86_SPEEDSTEP_CENTRINO
- default y
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
@@ -76,6 +75,7 @@ config X86_ACPI_CPUFREQ_PROC_INTF
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
depends on EMBEDDED
+ select CPU_FREQ_TABLE
help
This adds the clock modulation driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 4651fd22b213..a490fabfcf47 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -662,7 +662,7 @@ static int __init parse_memmap_opt(char *p)
}
early_param("memmap", parse_memmap_opt);
-void finish_e820_parsing(void)
+void __init finish_e820_parsing(void)
{
if (userdef) {
printk(KERN_INFO "user-defined physical RAM map:\n");
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index bd30d138113f..fede55a53995 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -16,7 +16,7 @@
#include <asm/proto.h>
#include <asm/dma.h>
-static void via_bugs(void)
+static void __init via_bugs(void)
{
#ifdef CONFIG_IOMMU
if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
@@ -30,16 +30,13 @@ static void via_bugs(void)
#ifdef CONFIG_ACPI
-static int nvidia_hpet_detected __initdata;
-
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
- nvidia_hpet_detected = 1;
return 0;
}
#endif
-static void nvidia_bugs(void)
+static void __init nvidia_bugs(void)
{
#ifdef CONFIG_ACPI
/*
@@ -52,9 +49,7 @@ static void nvidia_bugs(void)
if (acpi_use_timer_override)
return;
- nvidia_hpet_detected = 0;
- acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
- if (nvidia_hpet_detected == 0) {
+ if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
@@ -67,7 +62,7 @@ static void nvidia_bugs(void)
}
-static void ati_bugs(void)
+static void __init ati_bugs(void)
{
if (timer_over_8254 == 1) {
timer_over_8254 = 0;
@@ -93,7 +88,7 @@ struct chipset {
void (*f)(void);
};
-static struct chipset early_qrk[] = {
+static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
{ PCI_VENDOR_ID_VIA, via_bugs },
{ PCI_VENDOR_ID_ATI, ati_bugs },
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 9f5dac64aa8f..ed4350ced3d0 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -675,6 +675,9 @@ END(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt)
+ENTRY(irq_move_cleanup_interrupt)
+ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
+END(irq_move_cleanup_interrupt)
#endif
ENTRY(apic_timer_interrupt)
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
index 01fa23580c85..7ae18ec12454 100644
--- a/arch/x86_64/kernel/functionlist
+++ b/arch/x86_64/kernel/functionlist
@@ -514,7 +514,6 @@
*(.text.dentry_open)
*(.text.dentry_iput)
*(.text.bio_alloc)
-*(.text.alloc_skb_from_cache)
*(.text.wait_on_page_bit)
*(.text.vfs_readdir)
*(.text.vfs_lstat)
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c
index 65a0edd71a17..b8286968662d 100644
--- a/arch/x86_64/kernel/hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -12,6 +12,12 @@
#include <asm/timex.h>
#include <asm/hpet.h>
+#define HPET_MASK 0xFFFFFFFF
+#define HPET_SHIFT 22
+
+/* FSEC = 10^-15 NSEC = 10^-9 */
+#define FSEC_PER_NSEC 1000000
+
int nohpet __initdata;
unsigned long hpet_address;
@@ -106,9 +112,31 @@ int hpet_timer_stop_set_go(unsigned long tick)
return 0;
}
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)hpet_readl(HPET_COUNTER);
+}
+
+static cycle_t __vsyscall_fn vread_hpet(void)
+{
+ return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+}
+
+struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = (cycle_t)HPET_MASK,
+ .mult = 0, /* set below */
+ .shift = HPET_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vread = vread_hpet,
+};
+
int hpet_arch_init(void)
{
unsigned int id;
+ u64 tmp;
if (!hpet_address)
return -1;
@@ -132,6 +160,22 @@ int hpet_arch_init(void)
hpet_use_timer = (id & HPET_ID_LEGSUP);
+ /*
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+ clocksource_register(&clocksource_hpet);
+
return hpet_timer_stop_set_go(hpet_tick);
}
@@ -147,6 +191,7 @@ int hpet_reenable(void)
#define TICK_COUNT 100000000
#define TICK_MIN 5000
+#define MAX_TRIES 5
/*
* Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
@@ -154,13 +199,15 @@ int hpet_reenable(void)
*/
static void __init read_hpet_tsc(int *hpet, int *tsc)
{
- int tsc1, tsc2, hpet1;
+ int tsc1, tsc2, hpet1, i;
- do {
+ for (i = 0; i < MAX_TRIES; i++) {
tsc1 = get_cycles_sync();
hpet1 = hpet_readl(HPET_COUNTER);
tsc2 = get_cycles_sync();
- } while (tsc2 - tsc1 > TICK_MIN);
+ if (tsc2 - tsc1 > TICK_MIN)
+ break;
+ }
*hpet = hpet1;
*tsc = tsc2;
}
@@ -444,68 +491,3 @@ static int __init nohpet_setup(char *s)
}
__setup("nohpet", nohpet_setup);
-
-#define HPET_MASK 0xFFFFFFFF
-#define HPET_SHIFT 22
-
-/* FSEC = 10^-15 NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000
-
-static void *hpet_ptr;
-
-static cycle_t read_hpet(void)
-{
- return (cycle_t)readl(hpet_ptr);
-}
-
-static cycle_t __vsyscall_fn vread_hpet(void)
-{
- return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
-}
-
-struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = (cycle_t)HPET_MASK,
- .mult = 0, /* set below */
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .vread = vread_hpet,
-};
-
-static int __init init_hpet_clocksource(void)
-{
- unsigned long hpet_period;
- void __iomem *hpet_base;
- u64 tmp;
-
- if (!hpet_address)
- return -ENODEV;
-
- /* calculate the hpet address: */
- hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
- hpet_ptr = hpet_base + HPET_COUNTER;
-
- /* calculate the frequency: */
- hpet_period = readl(hpet_base + HPET_PERIOD);
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
-
- return clocksource_register(&clocksource_hpet);
-}
-
-module_init(init_hpet_clocksource);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 01e2cf0bdeb1..489426682772 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -45,7 +45,7 @@
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
- * (these are usually mapped to vectors 0x20-0x2f)
+ * (these are usually mapped to vectors 0x30-0x3f)
*/
/*
@@ -299,7 +299,7 @@ void init_8259A(int auto_eoi)
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(0x20 + 0, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
+ outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi)
outb_p(0x03, 0x21); /* master does Auto EOI */
@@ -307,7 +307,7 @@ void init_8259A(int auto_eoi)
outb_p(0x01, 0x21); /* master expects normal EOI */
outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(0x20 + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
+ outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
is to be investigated) */
@@ -398,24 +398,24 @@ device_initcall(i8259A_init_sysfs);
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... FIRST_EXTERNAL_VECTOR - 1] = -1,
- [FIRST_EXTERNAL_VECTOR + 0] = 0,
- [FIRST_EXTERNAL_VECTOR + 1] = 1,
- [FIRST_EXTERNAL_VECTOR + 2] = 2,
- [FIRST_EXTERNAL_VECTOR + 3] = 3,
- [FIRST_EXTERNAL_VECTOR + 4] = 4,
- [FIRST_EXTERNAL_VECTOR + 5] = 5,
- [FIRST_EXTERNAL_VECTOR + 6] = 6,
- [FIRST_EXTERNAL_VECTOR + 7] = 7,
- [FIRST_EXTERNAL_VECTOR + 8] = 8,
- [FIRST_EXTERNAL_VECTOR + 9] = 9,
- [FIRST_EXTERNAL_VECTOR + 10] = 10,
- [FIRST_EXTERNAL_VECTOR + 11] = 11,
- [FIRST_EXTERNAL_VECTOR + 12] = 12,
- [FIRST_EXTERNAL_VECTOR + 13] = 13,
- [FIRST_EXTERNAL_VECTOR + 14] = 14,
- [FIRST_EXTERNAL_VECTOR + 15] = 15,
- [FIRST_EXTERNAL_VECTOR + 16 ... NR_VECTORS - 1] = -1
+ [0 ... IRQ0_VECTOR - 1] = -1,
+ [IRQ0_VECTOR] = 0,
+ [IRQ1_VECTOR] = 1,
+ [IRQ2_VECTOR] = 2,
+ [IRQ3_VECTOR] = 3,
+ [IRQ4_VECTOR] = 4,
+ [IRQ5_VECTOR] = 5,
+ [IRQ6_VECTOR] = 6,
+ [IRQ7_VECTOR] = 7,
+ [IRQ8_VECTOR] = 8,
+ [IRQ9_VECTOR] = 9,
+ [IRQ10_VECTOR] = 10,
+ [IRQ11_VECTOR] = 11,
+ [IRQ12_VECTOR] = 12,
+ [IRQ13_VECTOR] = 13,
+ [IRQ14_VECTOR] = 14,
+ [IRQ15_VECTOR] = 15,
+ [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
void __init init_ISA_irqs (void)
@@ -450,6 +450,7 @@ void spurious_interrupt(void);
void error_interrupt(void);
void reschedule_interrupt(void);
void call_function_interrupt(void);
+void irq_move_cleanup_interrupt(void);
void invalidate_interrupt0(void);
void invalidate_interrupt1(void);
void invalidate_interrupt2(void);
@@ -520,12 +521,6 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
/*
- * IRQ0 must be given a fixed assignment and initialized,
- * because it's used before the IO-APIC is set up.
- */
- __get_cpu_var(vector_irq)[FIRST_DEVICE_VECTOR] = 0;
-
- /*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
*/
@@ -543,7 +538,10 @@ void __init init_IRQ(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-#endif
+
+ /* Low priority IPI to cleanup after moving an irq */
+ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
+#endif
set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 950682f35766..c6a5bc7e8118 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -36,6 +36,7 @@
#include <acpi/acpi_bus.h>
#endif
+#include <asm/idle.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/desc.h>
@@ -47,7 +48,35 @@
#include <asm/msidef.h>
#include <asm/hypertransport.h>
-static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
+struct irq_cfg {
+ cpumask_t domain;
+ cpumask_t old_domain;
+ unsigned move_cleanup_count;
+ u8 vector;
+ u8 move_in_progress : 1;
+};
+
+/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
+ [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
+ [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
+ [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
+ [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
+ [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
+ [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
+ [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
+ [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
+ [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
+ [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
+ [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
+ [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
+ [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
+ [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
+ [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
+ [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
+};
+
+static int assign_irq_vector(int irq, cpumask_t mask);
#define __apicdebuginit __init
@@ -74,7 +103,7 @@ int nr_ioapic_registers[MAX_IO_APICS];
* Rough estimation of how many shared IRQs there are, can
* be changed anytime.
*/
-#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
+#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
/*
@@ -149,11 +178,11 @@ static inline void io_apic_sync(unsigned int apic)
reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
reg ACTION; \
io_apic_modify(entry->apic, reg); \
+ FINAL; \
if (!entry->next) \
break; \
entry = irq_2_pin + entry->next; \
} \
- FINAL; \
}
union entry_union {
@@ -237,21 +266,19 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
{
+ struct irq_cfg *cfg = irq_cfg + irq;
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
- int vector;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
- tmp = TARGET_CPUS;
-
- cpus_and(mask, tmp, CPU_MASK_ALL);
+ return;
- vector = assign_irq_vector(irq, mask, &tmp);
- if (vector < 0)
+ if (assign_irq_vector(irq, mask))
return;
+ cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
/*
@@ -260,8 +287,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
dest = SET_APIC_LOGICAL_ID(dest);
spin_lock_irqsave(&ioapic_lock, flags);
- __target_IO_APIC_irq(irq, dest, vector);
- set_native_irq_info(irq, mask);
+ __target_IO_APIC_irq(irq, dest, cfg->vector);
+ irq_desc[irq].affinity = mask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#endif
@@ -615,63 +642,7 @@ static int pin_2_irq(int idx, int apic, int pin)
return irq;
}
-static inline int IO_APIC_irq_trigger(int irq)
-{
- int apic, idx, pin;
-
- for (apic = 0; apic < nr_ioapics; apic++) {
- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
- idx = find_irq_entry(apic,pin,mp_INT);
- if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
- return irq_trigger(idx);
- }
- }
- /*
- * nonexistent IRQs are edge default
- */
- return 0;
-}
-
-/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = {
- [0] = FIRST_EXTERNAL_VECTOR + 0,
- [1] = FIRST_EXTERNAL_VECTOR + 1,
- [2] = FIRST_EXTERNAL_VECTOR + 2,
- [3] = FIRST_EXTERNAL_VECTOR + 3,
- [4] = FIRST_EXTERNAL_VECTOR + 4,
- [5] = FIRST_EXTERNAL_VECTOR + 5,
- [6] = FIRST_EXTERNAL_VECTOR + 6,
- [7] = FIRST_EXTERNAL_VECTOR + 7,
- [8] = FIRST_EXTERNAL_VECTOR + 8,
- [9] = FIRST_EXTERNAL_VECTOR + 9,
- [10] = FIRST_EXTERNAL_VECTOR + 10,
- [11] = FIRST_EXTERNAL_VECTOR + 11,
- [12] = FIRST_EXTERNAL_VECTOR + 12,
- [13] = FIRST_EXTERNAL_VECTOR + 13,
- [14] = FIRST_EXTERNAL_VECTOR + 14,
- [15] = FIRST_EXTERNAL_VECTOR + 15,
-};
-
-static cpumask_t irq_domain[NR_IRQ_VECTORS] __read_mostly = {
- [0] = CPU_MASK_ALL,
- [1] = CPU_MASK_ALL,
- [2] = CPU_MASK_ALL,
- [3] = CPU_MASK_ALL,
- [4] = CPU_MASK_ALL,
- [5] = CPU_MASK_ALL,
- [6] = CPU_MASK_ALL,
- [7] = CPU_MASK_ALL,
- [8] = CPU_MASK_ALL,
- [9] = CPU_MASK_ALL,
- [10] = CPU_MASK_ALL,
- [11] = CPU_MASK_ALL,
- [12] = CPU_MASK_ALL,
- [13] = CPU_MASK_ALL,
- [14] = CPU_MASK_ALL,
- [15] = CPU_MASK_ALL,
-};
-
-static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
+static int __assign_irq_vector(int irq, cpumask_t mask)
{
/*
* NOTE! The local APIC isn't very good at handling
@@ -685,20 +656,25 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
- int old_vector = -1;
+ unsigned int old_vector;
int cpu;
+ struct irq_cfg *cfg;
- BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
+ BUG_ON((unsigned)irq >= NR_IRQS);
+ cfg = &irq_cfg[irq];
/* Only try and allocate irqs on cpus that are present */
cpus_and(mask, mask, cpu_online_map);
- if (irq_vector[irq] > 0)
- old_vector = irq_vector[irq];
- if (old_vector > 0) {
- cpus_and(*result, irq_domain[irq], mask);
- if (!cpus_empty(*result))
- return old_vector;
+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+ return -EBUSY;
+
+ old_vector = cfg->vector;
+ if (old_vector) {
+ cpumask_t tmp;
+ cpus_and(tmp, cfg->domain, mask);
+ if (!cpus_empty(tmp))
+ return 0;
}
for_each_cpu_mask(cpu, mask) {
@@ -728,48 +704,47 @@ next:
/* Found one! */
current_vector = vector;
current_offset = offset;
- if (old_vector >= 0) {
- cpumask_t old_mask;
- int old_cpu;
- cpus_and(old_mask, irq_domain[irq], cpu_online_map);
- for_each_cpu_mask(old_cpu, old_mask)
- per_cpu(vector_irq, old_cpu)[old_vector] = -1;
+ if (old_vector) {
+ cfg->move_in_progress = 1;
+ cfg->old_domain = cfg->domain;
}
for_each_cpu_mask(new_cpu, new_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
- irq_vector[irq] = vector;
- irq_domain[irq] = domain;
- cpus_and(*result, domain, mask);
- return vector;
+ cfg->vector = vector;
+ cfg->domain = domain;
+ return 0;
}
return -ENOSPC;
}
-static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
+static int assign_irq_vector(int irq, cpumask_t mask)
{
- int vector;
+ int err;
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
- vector = __assign_irq_vector(irq, mask, result);
+ err = __assign_irq_vector(irq, mask);
spin_unlock_irqrestore(&vector_lock, flags);
- return vector;
+ return err;
}
static void __clear_irq_vector(int irq)
{
+ struct irq_cfg *cfg;
cpumask_t mask;
int cpu, vector;
- BUG_ON(!irq_vector[irq]);
+ BUG_ON((unsigned)irq >= NR_IRQS);
+ cfg = &irq_cfg[irq];
+ BUG_ON(!cfg->vector);
- vector = irq_vector[irq];
- cpus_and(mask, irq_domain[irq], cpu_online_map);
+ vector = cfg->vector;
+ cpus_and(mask, cfg->domain, cpu_online_map);
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = -1;
- irq_vector[irq] = 0;
- irq_domain[irq] = CPU_MASK_NONE;
+ cfg->vector = 0;
+ cfg->domain = CPU_MASK_NONE;
}
void __setup_vector_irq(int cpu)
@@ -779,10 +754,10 @@ void __setup_vector_irq(int cpu)
int irq, vector;
/* Mark the inuse vectors */
- for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
- if (!cpu_isset(cpu, irq_domain[irq]))
+ for (irq = 0; irq < NR_IRQS; ++irq) {
+ if (!cpu_isset(cpu, irq_cfg[irq].domain))
continue;
- vector = irq_vector[irq];
+ vector = irq_cfg[irq].vector;
per_cpu(vector_irq, cpu)[vector] = irq;
}
/* Mark the free vectors */
@@ -790,36 +765,45 @@ void __setup_vector_irq(int cpu)
irq = per_cpu(vector_irq, cpu)[vector];
if (irq < 0)
continue;
- if (!cpu_isset(cpu, irq_domain[irq]))
+ if (!cpu_isset(cpu, irq_cfg[irq].domain))
per_cpu(vector_irq, cpu)[vector] = -1;
}
}
-extern void (*interrupt[NR_IRQS])(void);
-
static struct irq_chip ioapic_chip;
-#define IOAPIC_AUTO -1
-#define IOAPIC_EDGE 0
-#define IOAPIC_LEVEL 1
-
-static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+static void ioapic_register_intr(int irq, unsigned long trigger)
{
- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
- trigger == IOAPIC_LEVEL)
+ if (trigger)
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_fasteoi_irq, "fasteoi");
else
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_edge_irq, "edge");
}
-static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
+
+static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
+ int trigger, int polarity)
{
+ struct irq_cfg *cfg = irq_cfg + irq;
struct IO_APIC_route_entry entry;
- int vector;
- unsigned long flags;
+ cpumask_t mask;
+
+ if (!IO_APIC_IRQ(irq))
+ return;
+
+ mask = TARGET_CPUS;
+ if (assign_irq_vector(irq, mask))
+ return;
+
+ cpus_and(mask, cfg->domain, mask);
+ apic_printk(APIC_VERBOSE,KERN_DEBUG
+ "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
+ "IRQ %d Mode:%i Active:%i)\n",
+ apic, mp_ioapics[apic].mpc_apicid, pin, cfg->vector,
+ irq, trigger, polarity);
/*
* add it to the IO-APIC irq-routing table:
@@ -828,41 +812,23 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
+ entry.dest = cpu_mask_to_apicid(mask);
entry.mask = 0; /* enable IRQ */
- entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
-
- entry.trigger = irq_trigger(idx);
- entry.polarity = irq_polarity(idx);
+ entry.trigger = trigger;
+ entry.polarity = polarity;
+ entry.vector = cfg->vector;
- if (irq_trigger(idx)) {
- entry.trigger = 1;
+ /* Mask level triggered irqs.
+ * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
+ */
+ if (trigger)
entry.mask = 1;
- entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
- }
- if (!apic && !IO_APIC_IRQ(irq))
- return;
-
- if (IO_APIC_IRQ(irq)) {
- cpumask_t mask;
- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
- if (vector < 0)
- return;
-
- entry.dest = cpu_mask_to_apicid(mask);
- entry.vector = vector;
-
- ioapic_register_intr(irq, vector, IOAPIC_AUTO);
- if (!apic && (irq < 16))
- disable_8259A_irq(irq);
- }
+ ioapic_register_intr(irq, trigger);
+ if (irq < 16)
+ disable_8259A_irq(irq);
ioapic_write_entry(apic, pin, entry);
-
- spin_lock_irqsave(&ioapic_lock, flags);
- set_native_irq_info(irq, TARGET_CPUS);
- spin_unlock_irqrestore(&ioapic_lock, flags);
-
}
static void __init setup_IO_APIC_irqs(void)
@@ -887,8 +853,8 @@ static void __init setup_IO_APIC_irqs(void)
irq = pin_2_irq(idx, apic, pin);
add_pin_to_irq(irq, apic, pin);
- setup_IO_APIC_irq(apic, pin, idx, irq);
-
+ setup_IO_APIC_irq(apic, pin, irq,
+ irq_trigger(idx), irq_polarity(idx));
}
}
@@ -1373,16 +1339,15 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
static int ioapic_retrigger_irq(unsigned int irq)
{
+ struct irq_cfg *cfg = &irq_cfg[irq];
cpumask_t mask;
- unsigned vector;
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
- vector = irq_vector[irq];
cpus_clear(mask);
- cpu_set(first_cpu(irq_domain[irq]), mask);
+ cpu_set(first_cpu(cfg->domain), mask);
- send_IPI_mask(mask, vector);
+ send_IPI_mask(mask, cfg->vector);
spin_unlock_irqrestore(&vector_lock, flags);
return 1;
@@ -1397,8 +1362,68 @@ static int ioapic_retrigger_irq(unsigned int irq)
* races.
*/
+#ifdef CONFIG_SMP
+asmlinkage void smp_irq_move_cleanup_interrupt(void)
+{
+ unsigned vector, me;
+ ack_APIC_irq();
+ exit_idle();
+ irq_enter();
+
+ me = smp_processor_id();
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ unsigned int irq;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+ if (irq >= NR_IRQS)
+ continue;
+
+ desc = irq_desc + irq;
+ cfg = irq_cfg + irq;
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
+ goto unlock;
+
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cfg->move_cleanup_count--;
+unlock:
+ spin_unlock(&desc->lock);
+ }
+
+ irq_exit();
+}
+
+static void irq_complete_move(unsigned int irq)
+{
+ struct irq_cfg *cfg = irq_cfg + irq;
+ unsigned vector, me;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ vector = ~get_irq_regs()->orig_rax;
+ me = smp_processor_id();
+ if ((vector == cfg->vector) &&
+ cpu_isset(smp_processor_id(), cfg->domain)) {
+ cpumask_t cleanup_mask;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ cfg->move_in_progress = 0;
+ }
+}
+#else
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
static void ack_apic_edge(unsigned int irq)
{
+ irq_complete_move(irq);
move_native_irq(irq);
ack_APIC_irq();
}
@@ -1407,6 +1432,7 @@ static void ack_apic_level(unsigned int irq)
{
int do_unmask_irq = 0;
+ irq_complete_move(irq);
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
/* If we are moving the irq we need to mask it */
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
@@ -1457,7 +1483,7 @@ static inline void init_IO_APIC_traps(void)
*/
for (irq = 0; irq < NR_IRQS ; irq++) {
int tmp = irq;
- if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
+ if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
/*
* Hmm.. We don't have an entry for this,
* so default to an old-fashioned 8259
@@ -1596,15 +1622,14 @@ static inline void unlock_ExtINT_logic(void)
*/
static inline void check_timer(void)
{
+ struct irq_cfg *cfg = irq_cfg + 0;
int apic1, pin1, apic2, pin2;
- int vector;
- cpumask_t mask;
/*
* get/set the timer IRQ vector:
*/
disable_8259A_irq(0);
- vector = assign_irq_vector(0, TARGET_CPUS, &mask);
+ assign_irq_vector(0, TARGET_CPUS);
/*
* Subtle, code in do_timer_interrupt() expects an AEOI
@@ -1624,7 +1649,7 @@ static inline void check_timer(void)
apic2 = ioapic_i8259.apic;
apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
- vector, apic1, pin1, apic2, pin2);
+ cfg->vector, apic1, pin1, apic2, pin2);
if (pin1 != -1) {
/*
@@ -1655,7 +1680,7 @@ static inline void check_timer(void)
/*
* legacy devices should be connected to IO APIC #0
*/
- setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
+ setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector);
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
nmi_watchdog_default();
@@ -1680,14 +1705,14 @@ static inline void check_timer(void)
disable_8259A_irq(0);
irq_desc[0].chip = &lapic_irq_type;
- apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_VERBOSE," works.\n");
return;
}
- apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_VERBOSE," failed.\n");
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
@@ -1834,19 +1859,16 @@ int create_irq(void)
/* Allocate an unused irq */
int irq;
int new;
- int vector = 0;
unsigned long flags;
- cpumask_t mask;
irq = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
for (new = (NR_IRQS - 1); new >= 0; new--) {
if (platform_legacy_irq(new))
continue;
- if (irq_vector[new] != 0)
+ if (irq_cfg[new].vector != 0)
continue;
- vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
- if (likely(vector > 0))
+ if (__assign_irq_vector(new, TARGET_CPUS) == 0)
irq = new;
break;
}
@@ -1875,12 +1897,15 @@ void destroy_irq(unsigned int irq)
#ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
{
- int vector;
+ struct irq_cfg *cfg = irq_cfg + irq;
+ int err;
unsigned dest;
cpumask_t tmp;
- vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
- if (vector >= 0) {
+ tmp = TARGET_CPUS;
+ err = assign_irq_vector(irq, tmp);
+ if (!err) {
+ cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
msg->address_hi = MSI_ADDR_BASE_HI;
@@ -1900,40 +1925,38 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
((INT_DELIVERY_MODE != dest_LowestPrio) ?
MSI_DATA_DELIVERY_FIXED:
MSI_DATA_DELIVERY_LOWPRI) |
- MSI_DATA_VECTOR(vector);
+ MSI_DATA_VECTOR(cfg->vector);
}
- return vector;
+ return err;
}
#ifdef CONFIG_SMP
static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
- int vector;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
- tmp = TARGET_CPUS;
-
- cpus_and(mask, tmp, CPU_MASK_ALL);
+ return;
- vector = assign_irq_vector(irq, mask, &tmp);
- if (vector < 0)
+ if (assign_irq_vector(irq, mask))
return;
+ cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
read_msi_msg(irq, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(vector);
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- set_native_irq_info(irq, mask);
+ irq_desc[irq].affinity = mask;
}
#endif /* CONFIG_SMP */
@@ -2004,24 +2027,22 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_cfg *cfg = irq_cfg + irq;
unsigned int dest;
cpumask_t tmp;
- int vector;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
- tmp = TARGET_CPUS;
-
- cpus_and(mask, tmp, CPU_MASK_ALL);
+ return;
- vector = assign_irq_vector(irq, mask, &tmp);
- if (vector < 0)
+ if (assign_irq_vector(irq, mask))
return;
+ cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
- target_ht_irq(irq, dest, vector);
- set_native_irq_info(irq, mask);
+ target_ht_irq(irq, dest, cfg->vector);
+ irq_desc[irq].affinity = mask;
}
#endif
@@ -2038,14 +2059,17 @@ static struct irq_chip ht_irq_chip = {
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
- int vector;
+ struct irq_cfg *cfg = irq_cfg + irq;
+ int err;
cpumask_t tmp;
- vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
- if (vector >= 0) {
+ tmp = TARGET_CPUS;
+ err = assign_irq_vector(irq, tmp);
+ if (!err) {
struct ht_irq_msg msg;
unsigned dest;
+ cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
@@ -2053,7 +2077,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
msg.address_lo =
HT_IRQ_LOW_BASE |
HT_IRQ_LOW_DEST_ID(dest) |
- HT_IRQ_LOW_VECTOR(vector) |
+ HT_IRQ_LOW_VECTOR(cfg->vector) |
((INT_DEST_MODE == 0) ?
HT_IRQ_LOW_DM_PHYSICAL :
HT_IRQ_LOW_DM_LOGICAL) |
@@ -2068,7 +2092,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
handle_edge_irq, "edge");
}
- return vector;
+ return err;
}
#endif /* CONFIG_HT_IRQ */
@@ -2095,11 +2119,6 @@ int __init io_apic_get_redir_entries (int ioapic)
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
- struct IO_APIC_route_entry entry;
- unsigned long flags;
- int vector;
- cpumask_t mask;
-
if (!IO_APIC_IRQ(irq)) {
apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
ioapic);
@@ -2112,42 +2131,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
if (irq >= 16)
add_pin_to_irq(irq, ioapic, pin);
-
- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
- if (vector < 0)
- return vector;
-
- /*
- * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
- * Note that we mask (disable) IRQs now -- these get enabled when the
- * corresponding device driver registers for this IRQ.
- */
-
- memset(&entry,0,sizeof(entry));
-
- entry.delivery_mode = INT_DELIVERY_MODE;
- entry.dest_mode = INT_DEST_MODE;
- entry.dest = cpu_mask_to_apicid(mask);
- entry.trigger = triggering;
- entry.polarity = polarity;
- entry.mask = 1; /* Disabled (masked) */
- entry.vector = vector & 0xff;
-
- apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
- "IRQ %d Mode:%i Active:%i)\n", ioapic,
- mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
- triggering, polarity);
-
- ioapic_register_intr(irq, entry.vector, triggering);
-
- if (!ioapic && (irq < 16))
- disable_8259A_irq(irq);
-
- ioapic_write_entry(ioapic, pin, entry);
-
- spin_lock_irqsave(&ioapic_lock, flags);
- set_native_irq_info(irq, TARGET_CPUS);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
return 0;
}
@@ -2179,8 +2163,10 @@ void __init setup_ioapic_dest(void)
* when you have too many devices, because at that time only boot
* cpu is online.
*/
- if(!irq_vector[irq])
- setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
+ if (!irq_cfg[irq].vector)
+ setup_IO_APIC_irq(ioapic, pin, irq,
+ irq_trigger(irq_entry),
+ irq_polarity(irq_entry));
else
set_ioapic_affinity_irq(irq, TARGET_CPUS);
}
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
index 6416682d33d0..bc11b32e8b4d 100644
--- a/arch/x86_64/kernel/k8.c
+++ b/arch/x86_64/kernel/k8.c
@@ -61,8 +61,8 @@ int cache_k8_northbridges(void)
dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
- k8_northbridges[i++] = dev;
- pci_read_config_dword(dev, 0x9c, &flush_words[i]);
+ k8_northbridges[i] = dev;
+ pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
}
k8_northbridges[i] = NULL;
return 0;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 50dd8bef850e..455aa0b932f0 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -60,9 +60,9 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U;
/* Internal processor count */
-unsigned int num_processors __initdata = 0;
+unsigned int num_processors __cpuinitdata = 0;
-unsigned disabled_cpus __initdata;
+unsigned disabled_cpus __cpuinitdata;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 486f4c61a948..dfab9f167366 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -39,15 +39,17 @@ int panic_on_unrecovered_nmi;
* different subsystems this reservation system just tries to coordinate
* things a little
*/
-static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
-
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
+#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS)
+
+static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
@@ -108,64 +110,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
+ int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
+ int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
-int reserve_perfctr_nmi(unsigned int msr)
+static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
-void release_perfctr_nmi(unsigned int msr)
+static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+ clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
-int reserve_evntsel_nmi(unsigned int msr)
+int reserve_perfctr_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_perfctr_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_perfctr_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu)
+ __release_perfctr_nmi(cpu, msr);
+}
+
+int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
-void release_evntsel_nmi(unsigned int msr)
+static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
+ clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_evntsel_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_evntsel_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_evntsel_nmi(cpu, msr);
+ }
}
static __cpuinit inline int nmi_known_cpu(void)
@@ -187,10 +253,7 @@ void nmi_watchdog_default(void)
{
if (nmi_watchdog != NMI_DEFAULT)
return;
- if (nmi_known_cpu())
- nmi_watchdog = NMI_LOCAL_APIC;
- else
- nmi_watchdog = NMI_IO_APIC;
+ nmi_watchdog = NMI_NONE;
}
static int endflag __initdata = 0;
@@ -256,7 +319,7 @@ int __init check_nmi_watchdog (void)
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
- mdelay((10*1000)/nmi_hz); // wait 10 ticks
+ mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) {
if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
@@ -475,10 +538,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
/* Simulator may not support it */
@@ -504,9 +567,9 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63;
return 1;
fail2:
- release_evntsel_nmi(evntsel_msr);
+ __release_evntsel_nmi(-1, evntsel_msr);
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -517,8 +580,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
@@ -584,10 +647,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -612,7 +675,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -624,8 +687,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
@@ -653,10 +716,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
@@ -683,7 +746,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -707,8 +770,8 @@ static void stop_intel_arch_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog(void *unused)
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 030eb3753358..0bae862e9a55 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -519,7 +519,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
if (!gatt)
- panic("Cannot allocate GATT table");
+ panic("Cannot allocate GATT table");
+ if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
+ panic("Could not set GART PTEs to uncacheable pages");
+ global_flush_tlb();
+
memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
@@ -675,7 +679,7 @@ void __init gart_iommu_init(void)
dma_ops = &gart_dma_ops;
}
-void gart_parse_options(char *p)
+void __init gart_parse_options(char *p)
{
int arg;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index cbbc6adc1a92..d8d5ccc245c8 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -382,14 +382,17 @@ void exit_thread(void)
void flush_thread(void)
{
struct task_struct *tsk = current;
- struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING) {
- t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
- if (t->flags & _TIF_IA32)
+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+ clear_tsk_thread_flag(tsk, TIF_IA32);
+ } else {
+ set_tsk_thread_flag(tsk, TIF_IA32);
current_thread_info()->status |= TS_COMPAT;
+ }
}
- t->flags &= ~_TIF_DEBUG;
+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
tsk->thread.debugreg0 = 0;
tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 35443729aad8..cd4643a37022 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -923,8 +923,9 @@ void __init smp_prepare_boot_cpu(void)
*/
int __cpuinit __cpu_up(unsigned int cpu)
{
- int err;
int apicid = cpu_present_to_apicid(cpu);
+ unsigned long flags;
+ int err;
WARN_ON(irqs_disabled());
@@ -958,7 +959,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
/*
* Make sure and check TSC sync:
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index a87c51705620..75d73a9aa9ff 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -358,6 +358,8 @@ void __init time_init(void)
set_cyc2ns_scale(cpu_khz);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
+ init_tsc_clocksource();
+
setup_irq(0, &irq0);
}
@@ -418,7 +420,7 @@ static struct sysdev_class timer_sysclass = {
set_kset_name("timer"),
};
-/* XXX this driverfs stuff should probably go elsewhere later -john */
+/* XXX this sysfs stuff should probably go elsewhere later -john */
static struct sys_device device_timer = {
.id = 0,
.cls = &timer_sysclass,
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index 895831865019..1a0edbbffaa0 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -210,7 +210,7 @@ void mark_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-static int __init init_tsc_clocksource(void)
+void __init init_tsc_clocksource(void)
{
if (!notsc) {
clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
@@ -218,9 +218,6 @@ static int __init init_tsc_clocksource(void)
if (check_tsc_unstable())
clocksource_tsc.rating = 0;
- return clocksource_register(&clocksource_tsc);
+ clocksource_register(&clocksource_tsc);
}
- return 0;
}
-
-module_init(init_tsc_clocksource);
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b73212c0a550..5176ecf006ee 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
-_proxy_pda = 0;
+_proxy_pda = 1;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 180ff919eaf9..b43c698cf7d3 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -112,7 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
vread = __vsyscall_gtod_data.clock.vread;
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
- gettimeofday(tv,0);
+ gettimeofday(tv,NULL);
return;
}
now = vread();
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 0dffae69f4ad..77c25b307635 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -59,3 +59,4 @@ EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(init_level4_pgt);
EXPORT_SYMBOL(load_gs_index);
+EXPORT_SYMBOL(_proxy_pda);