From 11a5aa32562e1e90ab2d0278748fe0fbecda1cbe Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 25 Nov 2013 21:52:25 +0000 Subject: ARM: dma-mapping: check DMA mask against available memory Some buses have negative offsets, which causes the DMA mask checks to falsely fail. Fix this by using the actual amount of memory fitted in the system. Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 79f8b39801a8..f6b6bfa88ecf 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -9,6 +9,7 @@ * * DMA uncached mapping support. */ +#include #include #include #include @@ -162,6 +163,8 @@ static u64 get_coherent_dma_mask(struct device *dev) u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { + unsigned long max_dma_pfn; + mask = dev->coherent_dma_mask; /* @@ -173,6 +176,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + /* * If the mask allows for more memory than we can address, * and we actually have that much memory, then fail the @@ -180,7 +185,7 @@ static u64 get_coherent_dma_mask(struct device *dev) */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) { + dma_to_pfn(dev, ~0) > max_dma_pfn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); @@ -192,7 +197,7 @@ static u64 get_coherent_dma_mask(struct device *dev) * fits within the allowable addresses which we can * allocate. */ - if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) { + if (dma_to_pfn(dev, mask) < max_dma_pfn) { dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", mask, dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, -- cgit v1.2.3-59-g8ed1b From d8aa712c30148ba26fd89a5dc14de95d4c375184 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 Nov 2013 21:43:40 +0000 Subject: ARM: fix booting low-vectors machines Commit f6f91b0d9fd9 (ARM: allow kuser helpers to be removed from the vector page) required two pages for the vectors code. Although the code setting up the initial page tables was updated, the code which allocates page tables for new processes wasn't, neither was the code which tears down the mappings. Fix this. Fixes: f6f91b0d9fd9 ("ARM: allow kuser helpers to be removed from the vector page") Signed-off-by: Russell King Cc: --- arch/arm/include/asm/pgtable.h | 2 +- arch/arm/mm/mmap.c | 2 +- arch/arm/mm/pgd.c | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be956dbf6bae..1571d126e9dd 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); * mapping to be mapped at. This is particularly important for * non-high vector CPUs. */ -#define FIRST_USER_ADDRESS PAGE_SIZE +#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) /* * Use TASK_SIZE as the ceiling argument for free_pgtables() and diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index d27158c38eb0..5e85ed371364 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = PAGE_SIZE; + info.low_limit = FIRST_USER_ADDRESS; info.high_limit = mm->mmap_base; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 0acb089d0f70..1046b373d1ae 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); - set_pte_ext(new_pte, *init_pte, 0); + set_pte_ext(new_pte + 0, init_pte[0], 0); + set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } -- cgit v1.2.3-59-g8ed1b From 9f28cde0bc643186d32744ce69d12222e8588fe7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 6 Dec 2013 12:30:42 +0000 Subject: ARM: another fix for the DMA mapping checks Peter reports that OMAP audio broke with the recent fix for these checks, caused by OMAP audio using a 64-bit DMA mask. We should allow 64-bit DMA masks even with 32-bit dma_addr_t if we can be sure the amount of RAM we have won't allow the 32-bit dma_addr_t to overflow. Unfortunately, the checks to detect overflow were not correct. Tested-by: Peter Ujfalusi Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 91 +++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 51 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f6b6bfa88ecf..f61a5707823a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); +static int __dma_supported(struct device *dev, u64 mask, bool warn) +{ + unsigned long max_dma_pfn; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) < max_pfn) { + if (warn) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + } + return 0; + } + + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + if (dma_to_pfn(dev, mask) < max_dma_pfn) { + if (warn) + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + max_dma_pfn + 1); + return 0; + } + + return 1; +} + static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { - unsigned long max_dma_pfn; - mask = dev->coherent_dma_mask; /* @@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then fail the - * allocation. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", - mask); - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); - return 0; - } - - /* - * Now check that the mask, when translated to a PFN, - * fits within the allowable addresses which we can - * allocate. - */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - arm_dma_pfn_limit + 1); + if (!__dma_supported(dev, mask, true)) return 0; - } } return mask; @@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - unsigned long limit; - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then we must - * indicate that DMA to this device is not supported. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) - return 0; - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - limit = dma_to_pfn(dev, mask); - - if (limit < arm_dma_pfn_limit) - return 0; - - return 1; + return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); -- cgit v1.2.3-59-g8ed1b From 787b0d5c1ca7ff24feb6f92e4c7f4410ee7d81a8 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 2 Dec 2013 20:29:12 +0100 Subject: ARM: 7908/1: mm: Fix the arm_dma_limit calculation Current code is using PHYS_OFFSET to calculate the arm_dma_limit which will lead to wrong calculations in cases where PHYS_OFFSET is updated runtime. So fix the code by using __pv_phys_offset instead of PHYS_OFFSET. Cc: Catalin Marinas Cc: Nicolas Pitre Signed-off-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3e8f106ee5fe..1f7b19a47060 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; - arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; -- cgit v1.2.3-59-g8ed1b