aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/dma.c
diff options
context:
space:
mode:
authorEugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>2018-07-30 19:26:36 +0300
committerVineet Gupta <vgupta@synopsys.com>2018-09-04 13:21:38 -0700
commitdd45210b6dd4f1512eafcc41774154ebb762360f (patch)
tree7ec04d741bc790b2bafec213a8527883229cfaaf /arch/arc/mm/dma.c
parentARC: IOC: panic if both IOC and ZONE_HIGHMEM enabled (diff)
downloadlinux-dev-dd45210b6dd4f1512eafcc41774154ebb762360f.tar.xz
linux-dev-dd45210b6dd4f1512eafcc41774154ebb762360f.zip
ARC: don't check for HIGHMEM pages in arch_dma_alloc
__GFP_HIGHMEM flag is cleared by upper layer functions (in include/linux/dma-mapping.h) so we'll never get a __GFP_HIGHMEM flag in arch_dma_alloc gfp argument. That's why alloc_pages will never return highmem page here. Get rid of highmem pages handling and cleanup arch_dma_alloc and arch_dma_free functions. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to '')
-rw-r--r--arch/arc/mm/dma.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index c0b49399225d..c75d5c3470e3 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -24,30 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
struct page *page;
phys_addr_t paddr;
void *kvaddr;
- int need_coh = 1, need_kvaddr = 0;
+ bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
+
+ /*
+ * __GFP_HIGHMEM flag is cleared by upper layer functions
+ * (in include/linux/dma-mapping.h) so we should never get a
+ * __GFP_HIGHMEM here.
+ */
+ BUG_ON(gfp & __GFP_HIGHMEM);
page = alloc_pages(gfp, order);
if (!page)
return NULL;
- if (attrs & DMA_ATTR_NON_CONSISTENT)
- need_coh = 0;
-
- /*
- * - A coherent buffer needs MMU mapping to enforce non-cachability
- * - A highmem page needs a virtual handle (hence MMU mapping)
- * independent of cachability
- */
- if (PageHighMem(page) || need_coh)
- need_kvaddr = 1;
-
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
*dma_handle = paddr;
- /* This is kernel Virtual address (0x7000_0000 based) */
- if (need_kvaddr) {
+ /*
+ * A coherent buffer needs MMU mapping to enforce non-cachability.
+ * kvaddr is kernel Virtual address (0x7000_0000 based).
+ */
+ if (need_coh) {
kvaddr = ioremap_nocache(paddr, size);
if (kvaddr == NULL) {
__free_pages(page, order);
@@ -78,11 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
{
phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr);
- int is_non_coh = 1;
-
- is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT);
- if (PageHighMem(page) || !is_non_coh)
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size));