aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-12-18 17:01:48 +0000
committerJoerg Roedel <jroedel@suse.de>2015-12-28 17:06:26 +0100
commit0a9afeda8057bcedc997278db193914d32c4003b (patch)
tree00815ca32b682f312fb40c12ba9ede8e7cfde363 /drivers/iommu/dma-iommu.c
parentiommu/dma: Add some missing #includes (diff)
downloadlinux-dev-0a9afeda8057bcedc997278db193914d32c4003b.tar.xz
linux-dev-0a9afeda8057bcedc997278db193914d32c4003b.zip
iommu/dma: Avoid unlikely high-order allocations
Doug reports that the equivalent page allocator on 32-bit ARM exhibits particularly pathalogical behaviour under memory pressure when fragmentation is high, where allocating a 4MB buffer takes tens of seconds and the number of calls to alloc_pages() is over 9000![1] We can drastically improve that situation without losing the other benefits of high-order allocations when they would succeed, by assuming memory pressure is relatively constant over the course of an allocation, and not retrying allocations at orders we know to have failed before. This way, the best-case behaviour remains unchanged, and in the worst case we should see at most a dozen or so (MAX_ORDER - 1) failed attempts before falling back to single pages for the remainder of the buffer. [1]:http://lists.infradead.org/pipermail/linux-arm-kernel/2015-December/394660.html Reported-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4168668f5dd4..2e7417f98116 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -194,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
+ unsigned int order = MAX_ORDER;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
@@ -207,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) {
struct page *page = NULL;
- int j, order = __fls(count);
+ int j;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
* falling back to single-page allocations.
*/
- for (order = min(order, MAX_ORDER); order > 0; order--) {
+ for (order = min_t(unsigned int, order, __fls(count));
+ order > 0; order--) {
page = alloc_pages(gfp | __GFP_NORETRY, order);
if (!page)
continue;