aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/i386/kernel/irq.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index f3a9c78c4a24..6cb529f60dcc 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -42,8 +42,8 @@ union irq_ctx {
u32 stack[THREAD_SIZE/sizeof(u32)];
};
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
/*
@@ -53,13 +53,19 @@ static union irq_ctx *softirq_ctx[NR_CPUS];
*/
fastcall unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- int irq = regs->orig_eax & 0xff;
+ /* high bit used in ret_from_ code */
+ int irq = ~regs->orig_eax;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx;
u32 *isp;
#endif
+ if (unlikely((unsigned)irq >= NR_IRQS)) {
+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+ __FUNCTION__, irq);
+ BUG();
+ }
+
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
@@ -76,6 +82,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
}
#endif
+ if (!irq_desc[irq].handle_irq) {
+ __do_IRQ(irq, regs);
+ goto out_exit;
+ }
#ifdef CONFIG_4KSTACKS
curctx = (union irq_ctx *) current_thread_info();
@@ -95,6 +105,14 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer;
+ /*
+ * Copy the softirq bits in preempt_count so that the
+ * softirq checks work in the hardirq context.
+ */
+ irqctx->tinfo.preempt_count =
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
asm volatile(
" xchgl %%ebx,%%esp \n"
" call __do_IRQ \n"
@@ -107,6 +125,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
#endif
__do_IRQ(irq, regs);
+out_exit:
irq_exit();
return 1;
@@ -147,7 +166,7 @@ void irq_ctx_init(int cpu)
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
- irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
+ irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
softirq_ctx[cpu] = irqctx;
@@ -192,6 +211,10 @@ asmlinkage void do_softirq(void)
: "0"(isp)
: "memory", "cc", "edx", "ecx", "eax"
);
+ /*
+ * Shouldnt happen, we returned above if in_interrupt():
+ */
+ WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
@@ -219,7 +242,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
@@ -235,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -277,13 +300,13 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].handler->set_affinity)
- irq_desc[irq].handler->set_affinity(irq, mask);
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}