aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-08-29 13:39:37 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-09-01 13:48:43 +0200
commitc7bd3ec0531aa636ad57ed9f27e637cbd247e64a (patch)
treefa2397fb34888b9aff338d178982048cdae29aa1 /kernel/irq
parentgenirq: Distangle edge handler entry (diff)
downloadlinux-dev-c7bd3ec0531aa636ad57ed9f27e637cbd247e64a.tar.xz
linux-dev-c7bd3ec0531aa636ad57ed9f27e637cbd247e64a.zip
genirq: Create helper for flow handler entry check
All flow handlers - except the per cpu ones - check for an interrupt in progress and an eventual concurrent polling on another cpu. Create a helper function for the repeated code pattern. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c48
1 files changed, 20 insertions, 28 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f10c2e58a786..6baf86085571 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -342,6 +342,13 @@ static bool irq_check_poll(struct irq_desc *desc)
return irq_wait_for_poll(desc);
}
+static bool irq_may_run(struct irq_desc *desc)
+{
+ if (!irqd_irq_inprogress(&desc->irq_data))
+ return true;
+ return irq_check_poll(desc);
+}
+
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
@@ -359,9 +366,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
- if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
- if (!irq_check_poll(desc))
- goto out_unlock;
+ if (!irq_may_run(desc))
+ goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -412,9 +418,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
- if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
- if (!irq_check_poll(desc))
- goto out_unlock;
+ if (!irq_may_run(desc))
+ goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -485,9 +490,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
- if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
- if (!irq_check_poll(desc))
- goto out;
+ if (!irq_may_run(desc))
+ goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -541,16 +545,10 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- /*
- * If the handler is currently running, mark it pending,
- * handle the necessary masking and go out
- */
- if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
- if (!irq_check_poll(desc)) {
- desc->istate |= IRQS_PENDING;
- mask_ack_irq(desc);
- goto out_unlock;
- }
+ if (!irq_may_run(desc)) {
+ desc->istate |= IRQS_PENDING;
+ mask_ack_irq(desc);
+ goto out_unlock;
}
/*
@@ -612,15 +610,9 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- /*
- * If the handler is currently running, mark it pending,
- * handle the necessary masking and go out
- */
- if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
- if (!irq_check_poll(desc)) {
- desc->istate |= IRQS_PENDING;
- goto out_eoi;
- }
+ if (!irq_may_run(desc)) {
+ desc->istate |= IRQS_PENDING;
+ goto out_eoi;
}
/*