From fbd989b1d73e3b3565dad5227a581e6f456c895f Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 30 Oct 2013 01:21:27 +0000 Subject: arm: make SWIOTLB available IOMMU_HELPER is needed because SWIOTLB calls iommu_is_span_boundary, provided by lib/iommu_helper.c. Signed-off-by: Stefano Stabellini Reviewed-by: Konrad Rzeszutek Wilk CC: will.deacon@arm.com Acked-by: Russell King Changes in v9: - remove uneeded include asm/cacheflush.h; - just return 0 if !dev->dma_mask in dma_capable. Changes in v8: - use __phys_to_pfn and __pfn_to_phys. Changes in v7: - dma_mark_clean: empty implementation; - in dma_capable use coherent_dma_mask if dma_mask hasn't been allocated. Changes in v6: - check for dev->dma_mask being NULL in dma_capable. Changes in v5: - implement dma_mark_clean using dmac_flush_range. Changes in v3: - dma_capable: do not treat dma_mask as a limit; - remove SWIOTLB dependency on NEED_SG_DMA_LENGTH. --- arch/arm/Kconfig | 6 ++++++ arch/arm/include/asm/dma-mapping.h | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3f7714d8d2d2..180c1321dbfb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1872,6 +1872,12 @@ config CC_STACKPROTECTOR neutralized via a kernel panic. This feature requires gcc version 4.2 or above. +config SWIOTLB + def_bool y + +config IOMMU_HELPER + def_bool SWIOTLB + config XEN_DOM0 def_bool y depends on XEN diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 5b579b951503..1ad2c171054b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -86,6 +86,39 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + unsigned int offset = paddr & ~PAGE_MASK; + return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + unsigned int offset = dev_addr & ~PAGE_MASK; + return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + u64 limit, mask; + + if (!dev->dma_mask) + return 0; + + mask = *dev->dma_mask; + + limit = (mask + 1) & ~mask; + if (limit && size > limit) + return 0; + + if ((addr | (addr + size - 1)) & ~mask) + return 0; + + return 1; +} + +static inline void dma_mark_clean(void *addr, size_t size) { } + /* * DMA errors are defined by all-bits-set in the DMA address. */ -- cgit v1.2.3-59-g8ed1b