aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
authorSebastien Dugue <sebastien.dugue@bull.net>2008-09-04 22:37:07 +1000
committerPaul Mackerras <paulus@samba.org>2008-09-15 11:08:44 -0700
commit967e012ef306e99cfddcd7423f37414e6b568361 (patch)
treeb265dc79fcc5d46d8397a01bd310cabfa3a2e28e /arch/powerpc/kernel/irq.c
parentpowerpc: Rename PTE_SIZE to HPTE_SIZE (diff)
downloadlinux-dev-967e012ef306e99cfddcd7423f37414e6b568361.tar.xz
linux-dev-967e012ef306e99cfddcd7423f37414e6b568361.zip
powerpc: Separate the irq radix tree insertion and lookup
irq_radix_revmap() currently serves 2 purposes, irq mapping lookup and insertion which happen in interrupt and process context respectively. Separate the function into its 2 components, one for lookup only and one for insertion only. Fix the only user of the revmap tree (XICS) to use the new functions. Also, move the insertion into the radix tree of those irqs that were requested before it was initialized at said tree initialization. Mutual exclusion between the tree initialization and readers/writers is handled via a state variable (revmap_trees_allocated) set to 1 when the tree has been initialized and set to 2 after the already requested irqs have been inserted in the tree by the init path. This state is checked before any reader or writer access just like we used to check for tree.gfp_mask != 0 before. Finally, now that we're not any longer inserting nodes into the radix-tree in interrupt context, turn the GFP_ATOMIC allocations into GFP_KERNEL ones. Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/kernel/irq.c97
1 files changed, 76 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d972decf0324..2656924415da 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,6 +441,7 @@ static LIST_HEAD(irq_hosts);
static DEFINE_SPINLOCK(irq_big_lock);
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
static unsigned int irq_radix_writer;
+static unsigned int revmap_trees_allocated;
struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
@@ -821,8 +822,12 @@ void irq_dispose_mapping(unsigned int virq)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
- /* Check if radix tree allocated yet */
- if (host->revmap_data.tree.gfp_mask == 0)
+ /*
+ * Check if radix tree allocated yet, if not then nothing to
+ * remove.
+ */
+ smp_rmb();
+ if (revmap_trees_allocated < 1)
break;
irq_radix_wrlock(&flags);
radix_tree_delete(&host->revmap_data.tree, hwirq);
@@ -875,43 +880,62 @@ unsigned int irq_find_mapping(struct irq_host *host,
EXPORT_SYMBOL_GPL(irq_find_mapping);
-unsigned int irq_radix_revmap(struct irq_host *host,
- irq_hw_number_t hwirq)
+unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+ irq_hw_number_t hwirq)
{
- struct radix_tree_root *tree;
struct irq_map_entry *ptr;
unsigned int virq;
unsigned long flags;
WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
- /* Check if the radix tree exist yet. We test the value of
- * the gfp_mask for that. Sneaky but saves another int in the
- * structure. If not, we fallback to slow mode
+ /*
+ * Check if the radix tree exists and has bee initialized.
+ * If not, we fallback to slow mode
*/
- tree = &host->revmap_data.tree;
- if (tree->gfp_mask == 0)
+ if (revmap_trees_allocated < 2)
return irq_find_mapping(host, hwirq);
/* Now try to resolve */
irq_radix_rdlock(&flags);
- ptr = radix_tree_lookup(tree, hwirq);
+ ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
irq_radix_rdunlock(flags);
- /* Found it, return */
- if (ptr) {
+ /*
+ * If found in radix tree, then fine.
+ * Else fallback to linear lookup - this should not happen in practice
+ * as it means that we failed to insert the node in the radix tree.
+ */
+ if (ptr)
virq = ptr - irq_map;
- return virq;
- }
+ else
+ virq = irq_find_mapping(host, hwirq);
+
+ return virq;
+}
+
+void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ unsigned long flags;
+
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
+
+ /*
+ * Check if the radix tree exists yet.
+ * If not, then the irq will be inserted into the tree when it gets
+ * initialized.
+ */
+ smp_rmb();
+ if (revmap_trees_allocated < 1)
+ return;
- /* If not there, try to insert it */
- virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
irq_radix_wrlock(&flags);
- radix_tree_insert(tree, hwirq, &irq_map[virq]);
+ radix_tree_insert(&host->revmap_data.tree, hwirq,
+ &irq_map[virq]);
irq_radix_wrunlock(flags);
}
- return virq;
}
unsigned int irq_linear_revmap(struct irq_host *host,
@@ -1021,14 +1045,45 @@ static int irq_late_init(void)
{
struct irq_host *h;
unsigned long flags;
+ unsigned int i;
- irq_radix_wrlock(&flags);
+ /*
+ * No mutual exclusion with respect to accessors of the tree is needed
+ * here as the synchronization is done via the state variable
+ * revmap_trees_allocated.
+ */
list_for_each_entry(h, &irq_hosts, link) {
if (h->revmap_type == IRQ_HOST_MAP_TREE)
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
+ }
+
+ /*
+ * Make sure the radix trees inits are visible before setting
+ * the flag
+ */
+ smp_wmb();
+ revmap_trees_allocated = 1;
+
+ /*
+ * Insert the reverse mapping for those interrupts already present
+ * in irq_map[].
+ */
+ irq_radix_wrlock(&flags);
+ for (i = 0; i < irq_virq_count; i++) {
+ if (irq_map[i].host &&
+ (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
+ radix_tree_insert(&irq_map[i].host->revmap_data.tree,
+ irq_map[i].hwirq, &irq_map[i]);
}
irq_radix_wrunlock(flags);
+ /*
+ * Make sure the radix trees insertions are visible before setting
+ * the flag
+ */
+ smp_wmb();
+ revmap_trees_allocated = 2;
+
return 0;
}
arch_initcall(irq_late_init);