aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/io_apic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/io_apic.c')
-rw-r--r--arch/x86_64/kernel/io_apic.c90
1 files changed, 60 insertions, 30 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 771bcf77daf2..fe429e5d6b29 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -57,13 +57,13 @@ static int no_timer_check;
static int disable_timer_pin_1 __initdata;
-int timer_over_8254 __initdata = 0;
+int timer_over_8254 __initdata = 1;
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+DEFINE_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -612,15 +612,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- static struct {
- int vector;
- int offset;
- } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} };
+ static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
int old_vector = -1;
int cpu;
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
+ /* Only try and allocate irqs on cpus that are present */
+ cpus_and(mask, mask, cpu_online_map);
+
if (irq_vector[irq] > 0)
old_vector = irq_vector[irq];
if (old_vector > 0) {
@@ -630,15 +630,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
}
for_each_cpu_mask(cpu, mask) {
- cpumask_t domain;
- int first, new_cpu;
+ cpumask_t domain, new_mask;
+ int new_cpu;
int vector, offset;
domain = vector_allocation_domain(cpu);
- first = first_cpu(domain);
+ cpus_and(new_mask, domain, cpu_online_map);
- vector = pos[first].vector;
- offset = pos[first].offset;
+ vector = current_vector;
+ offset = current_offset;
next:
vector += 8;
if (vector >= FIRST_SYSTEM_VECTOR) {
@@ -646,24 +646,24 @@ next:
offset = (offset + 1) % 8;
vector = FIRST_DEVICE_VECTOR + offset;
}
- if (unlikely(pos[first].vector == vector))
+ if (unlikely(current_vector == vector))
continue;
if (vector == IA32_SYSCALL_VECTOR)
goto next;
- for_each_cpu_mask(new_cpu, domain)
- if (per_cpu(vector_irq, cpu)[vector] != -1)
+ for_each_cpu_mask(new_cpu, new_mask)
+ if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next;
/* Found one! */
- for_each_cpu_mask(new_cpu, domain) {
- pos[cpu].vector = vector;
- pos[cpu].offset = offset;
- }
+ current_vector = vector;
+ current_offset = offset;
if (old_vector >= 0) {
+ cpumask_t old_mask;
int old_cpu;
- for_each_cpu_mask(old_cpu, domain)
+ cpus_and(old_mask, irq_domain[irq], cpu_online_map);
+ for_each_cpu_mask(old_cpu, old_mask)
per_cpu(vector_irq, old_cpu)[old_vector] = -1;
}
- for_each_cpu_mask(new_cpu, domain)
+ for_each_cpu_mask(new_cpu, new_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
irq_vector[irq] = vector;
irq_domain[irq] = domain;
@@ -684,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
return vector;
}
+void __setup_vector_irq(int cpu)
+{
+ /* Initialize vector_irq on a new cpu */
+ /* This function must be called with vector_lock held */
+ unsigned long flags;
+ int irq, vector;
+
+
+ /* Mark the inuse vectors */
+ for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
+ if (!cpu_isset(cpu, irq_domain[irq]))
+ continue;
+ vector = irq_vector[irq];
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+ /* Mark the free vectors */
+ for (vector = 0; vector < NR_VECTORS; ++vector) {
+ irq = per_cpu(vector_irq, cpu)[vector];
+ if (irq < 0)
+ continue;
+ if (!cpu_isset(cpu, irq_domain[irq]))
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ }
+}
+
+
extern void (*interrupt[NR_IRQS])(void);
static struct irq_chip ioapic_chip;
@@ -696,11 +722,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_fasteoi_irq);
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
else
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
}
static void __init setup_IO_APIC_irqs(void)
@@ -806,7 +832,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- set_irq_chip_and_handler(0, &ioapic_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
/*
* Add it to the IO-APIC irq-routing table:
@@ -1255,12 +1281,15 @@ static int ioapic_retrigger_irq(unsigned int irq)
{
cpumask_t mask;
unsigned vector;
+ unsigned long flags;
+ spin_lock_irqsave(&vector_lock, flags);
vector = irq_vector[irq];
cpus_clear(mask);
- cpu_set(vector >> 8, mask);
+ cpu_set(first_cpu(irq_domain[irq]), mask);
- send_IPI_mask(mask, vector & 0xff);
+ send_IPI_mask(mask, vector);
+ spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
@@ -1839,7 +1868,7 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
return 0;
}
@@ -1897,7 +1926,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
}
#endif
-static struct hw_interrupt_type ht_irq_chip = {
+static struct irq_chip ht_irq_chip = {
.name = "PCI-HT",
.mask = mask_ht_irq,
.unmask = unmask_ht_irq,
@@ -1936,7 +1965,8 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
write_ht_irq_low(irq, low);
write_ht_irq_high(irq, high);
- set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
}
return vector;
}