aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-11-03 21:01:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-03 21:01:04 -0800
commitf3ed88a6bce69942b44fa7927a92cd52011881d7 (patch)
tree98e58b314bcb0097294a81ef31306579cea752e1 /mm
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client (diff)
parentmm, cma: make parameters order consistent in func declaration and definition (diff)
downloadlinux-dev-f3ed88a6bce69942b44fa7927a92cd52011881d7.tar.xz
linux-dev-f3ed88a6bce69942b44fa7927a92cd52011881d7.zip
Merge branch 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "This contains important fixes for recently introduced highmem support for default contiguous memory region used for dma-mapping subsystem" * 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: mm, cma: make parameters order consistent in func declaration and definition mm: cma: Use %pa to print physical addresses mm: cma: Ensure that reservations never cross the low/high mem boundary mm: cma: Always consider a 0 base address reservation as dynamic mm: cma: Don't crash on allocation if CMA area can't be activated
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c68
1 files changed, 44 insertions, 24 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 963bc4add9af..fde706e1284f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma)
err:
kfree(cma->bitmap);
+ cma->count = 0;
return -EINVAL;
}
@@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t highmem_start = __pa(high_memory);
int ret = 0;
- pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
- __func__, (unsigned long)size, (unsigned long)base,
- (unsigned long)limit, (unsigned long)alignment);
+ pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
+ __func__, &size, &base, &limit, &alignment);
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
@@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base,
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
+ if (!base)
+ fixed = false;
+
/* size should be aligned with order_per_bit */
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
return -EINVAL;
/*
- * adjust limit to avoid crossing low/high memory boundary for
- * automatically allocated regions
+ * If allocating at a fixed base the request region must not cross the
+ * low/high memory boundary.
*/
- if (((limit == 0 || limit > memblock_end) &&
- (memblock_end - size < highmem_start &&
- memblock_end > highmem_start)) ||
- (!fixed && limit > highmem_start && limit - size < highmem_start)) {
- limit = highmem_start;
- }
-
- if (fixed && base < highmem_start && base+size > highmem_start) {
+ if (fixed && base < highmem_start && base + size > highmem_start) {
ret = -EINVAL;
- pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
- (unsigned long)base, (unsigned long)highmem_start);
+ pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
+ &base, &highmem_start);
goto err;
}
+ /*
+ * If the limit is unspecified or above the memblock end, its effective
+ * value will be the memblock end. Set it explicitly to simplify further
+ * checks.
+ */
+ if (limit == 0 || limit > memblock_end)
+ limit = memblock_end;
+
/* Reserve memory */
- if (base && fixed) {
+ if (fixed) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
ret = -EBUSY;
goto err;
}
} else {
- phys_addr_t addr = memblock_alloc_range(size, alignment, base,
- limit);
+ phys_addr_t addr = 0;
+
+ /*
+ * All pages in the reserved area must come from the same zone.
+ * If the requested region crosses the low/high memory boundary,
+ * try allocating from high memory first and fall back to low
+ * memory in case of failure.
+ */
+ if (base < highmem_start && limit > highmem_start) {
+ addr = memblock_alloc_range(size, alignment,
+ highmem_start, limit);
+ limit = highmem_start;
+ }
+
if (!addr) {
- ret = -ENOMEM;
- goto err;
- } else {
- base = addr;
+ addr = memblock_alloc_range(size, alignment, base,
+ limit);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
+
+ base = addr;
}
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
if (ret)
goto err;
- pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
- (unsigned long)base);
+ pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
+ &base);
return 0;
err: