aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/dma/remap.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-11-04 17:38:39 +0100
committerChristoph Hellwig <hch@lst.de>2018-12-01 18:07:14 +0100
commitbfd56cd605219d90b210a5377fca31a644efe95c (patch)
tree9033596e9dd81a21fa0c99a12d0f3fe869684cdc /kernel/dma/remap.c
parentdma-mapping: move the arm64 noncoherent alloc/free support to common code (diff)
downloadlinux-dev-bfd56cd605219d90b210a5377fca31a644efe95c.tar.xz
linux-dev-bfd56cd605219d90b210a5377fca31a644efe95c.zip
dma-mapping: support highmem in the generic remap allocator
By using __dma_direct_alloc_pages we can deal entirely with struct page instead of having to derive a kernel virtual address. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to '')
-rw-r--r--kernel/dma/remap.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index b32bb08f96ae..dcc82dd668f8 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -196,7 +196,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs)
{
struct page *page = NULL;
- void *ret, *kaddr;
+ void *ret;
size = PAGE_ALIGN(size);
@@ -208,10 +208,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
return ret;
}
- kaddr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
- if (!kaddr)
+ page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
+ if (!page)
return NULL;
- page = virt_to_page(kaddr);
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
@@ -221,7 +220,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret)
- dma_direct_free_pages(dev, size, kaddr, *dma_handle, attrs);
+ __dma_direct_free_pages(dev, size, page);
return ret;
}
@@ -229,10 +228,11 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
- void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
+ phys_addr_t phys = dma_to_phys(dev, dma_handle);
+ struct page *page = pfn_to_page(__phys_to_pfn(phys));
vunmap(vaddr);
- dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
+ __dma_direct_free_pages(dev, size, page);
}
}