aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c84
1 files changed, 16 insertions, 68 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index adcabd9473eb..58c9365fa217 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -28,6 +28,7 @@
#include <linux/memblock.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
@@ -391,6 +392,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
+ phys = map;
dev_addr = xen_phys_to_bus(map);
/*
@@ -402,14 +404,9 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
}
- page = pfn_to_page(map >> PAGE_SHIFT);
- offset = map & ~PAGE_MASK;
done:
- /*
- * we are not interested in the dma_addr returned by xen_dma_map_page,
- * only in the potential cache flushes executed by the function.
- */
- xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
return dev_addr;
}
@@ -421,35 +418,29 @@ done:
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
BUG_ON(dir == DMA_NONE);
- xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
+ if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
static void
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
- xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev))
+ xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -464,7 +455,8 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
- xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev))
+ xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
}
/*
@@ -481,7 +473,8 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+ xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ dir, attrs);
}
@@ -547,51 +540,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
-/*
- * Create userspace mapping for the DMA-coherent memory.
- * This function should be called with the pages from the current domain only,
- * passing pages mapped from other domains would lead to memory corruption.
- */
-static int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
-#ifdef CONFIG_ARM
- if (xen_get_dma_ops(dev)->mmap)
- return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
- dma_addr, size, attrs);
-#endif
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-/*
- * This function should be called with the pages from the current domain only,
- * passing pages mapped from other domains would lead to memory corruption.
- */
-static int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
-#ifdef CONFIG_ARM
- if (xen_get_dma_ops(dev)->get_sgtable) {
-#if 0
- /*
- * This check verifies that the page belongs to the current domain and
- * is not one mapped from another domain.
- * This check is for debug only, and should not go to production build
- */
- unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
- BUG_ON (!page_is_ram(bfn));
-#endif
- return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
- handle, size, attrs);
- }
-#endif
- return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
-}
-
const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
@@ -604,6 +552,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
- .mmap = xen_swiotlb_dma_mmap,
- .get_sgtable = xen_swiotlb_get_sgtable,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
};