aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorIan Munsie <imunsie@au1.ibm.com>2014-10-08 19:54:53 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2014-10-08 20:15:42 +1100
commitb0345bbc6d0980e63b9730b487d4f4ccb8e5cd12 (patch)
tree82aba747d4a0c8161378f43caafb2b35ad6537bc /arch/powerpc
parentpowerpc/cell: Make spu_flush_all_slbs() generic (diff)
downloadlinux-dev-b0345bbc6d0980e63b9730b487d4f4ccb8e5cd12.tar.xz
linux-dev-b0345bbc6d0980e63b9730b487d4f4ccb8e5cd12.zip
powerpc/msi: Improve IRQ bitmap allocator
Currently msi_bitmap_alloc_hwirqs() will round up any IRQ allocation requests to the nearest power of 2. eg. ask for 5 IRQs and you'll get 8. This wastes a lot of IRQs which can be a scarce resource. For cxl we may require multiple IRQs for every context that is attached to the accelerator. There may be 1000s of contexts attached, hence we can easily run out of IRQs, especially if we are needlessly wasting them. This changes the msi_bitmap_alloc_hwirqs() to allocate only the required number of IRQs, hence avoiding this wastage. It keeps the natural alignment requirement though. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c36
1 files changed, 25 insertions, 11 deletions
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index a7c7a9fc7530..0c75214b6f92 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -20,32 +20,37 @@ int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
int offset, order = get_count_order(num);
spin_lock_irqsave(&bmp->lock, flags);
- /*
- * This is fast, but stricter than we need. We might want to add
- * a fallback routine which does a linear search with no alignment.
- */
- offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
+
+ offset = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count, 0,
+ num, (1 << order) - 1);
+ if (offset > bmp->irq_count)
+ goto err;
+
+ bitmap_set(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
- pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
- num, order, offset);
+ pr_debug("msi_bitmap: allocated 0x%x at offset 0x%x\n", num, offset);
return offset;
+err:
+ spin_unlock_irqrestore(&bmp->lock, flags);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(msi_bitmap_alloc_hwirqs);
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
unsigned int num)
{
unsigned long flags;
- int order = get_count_order(num);
- pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n",
- num, order, offset);
+ pr_debug("msi_bitmap: freeing 0x%x at offset 0x%x\n",
+ num, offset);
spin_lock_irqsave(&bmp->lock, flags);
- bitmap_release_region(bmp->bitmap, offset, order);
+ bitmap_clear(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
}
+EXPORT_SYMBOL(msi_bitmap_free_hwirqs);
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq)
{
@@ -180,6 +185,15 @@ static void __init test_basics(void)
msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2);
+ /* Check we get a naturally aligned offset */
+ check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0);
+
msi_bitmap_free(&bmp);
/* Clients may check bitmap == NULL for "not-allocated" */