aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c126
1 files changed, 30 insertions, 96 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index cfbe46785a3b..58c9365fa217 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -28,6 +28,7 @@
#include <linux/memblock.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
@@ -83,34 +84,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address));
}
-static int check_pages_physically_contiguous(unsigned long xen_pfn,
- unsigned int offset,
- size_t length)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
- unsigned long next_bfn;
- int i;
- int nr_pages;
+ unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
next_bfn = pfn_to_bfn(xen_pfn);
- nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
- for (i = 1; i < nr_pages; i++) {
+ for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
- return 0;
- }
- return 1;
-}
+ return 1;
-static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
-{
- unsigned long xen_pfn = XEN_PFN_DOWN(p);
- unsigned int offset = p & ~XEN_PAGE_MASK;
-
- if (offset + size <= XEN_PAGE_SIZE)
- return 0;
- if (check_pages_physically_contiguous(xen_pfn, offset, size))
- return 0;
- return 1;
+ return 0;
}
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
return ret;
@@ -361,8 +347,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)) &&
+ TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
@@ -400,30 +387,26 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
- attrs);
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
+ size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
+ phys = map;
dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size))) {
- swiotlb_tbl_unmap_single(dev, map, size, dir,
+ swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
- page = pfn_to_page(map >> PAGE_SHIFT);
- offset = map & ~PAGE_MASK;
done:
- /*
- * we are not interested in the dma_addr returned by xen_dma_map_page,
- * only in the potential cache flushes executed by the function.
- */
- xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
return dev_addr;
}
@@ -435,26 +418,19 @@ done:
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
BUG_ON(dir == DMA_NONE);
- xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
+ if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
-}
-
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}
static void
@@ -463,7 +439,8 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
{
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
- xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev))
+ xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -478,7 +455,8 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
- xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev))
+ xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
}
/*
@@ -495,7 +473,8 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+ xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ dir, attrs);
}
@@ -561,51 +540,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
-/*
- * Create userspace mapping for the DMA-coherent memory.
- * This function should be called with the pages from the current domain only,
- * passing pages mapped from other domains would lead to memory corruption.
- */
-static int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
-#ifdef CONFIG_ARM
- if (xen_get_dma_ops(dev)->mmap)
- return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
- dma_addr, size, attrs);
-#endif
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-/*
- * This function should be called with the pages from the current domain only,
- * passing pages mapped from other domains would lead to memory corruption.
- */
-static int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
-#ifdef CONFIG_ARM
- if (xen_get_dma_ops(dev)->get_sgtable) {
-#if 0
- /*
- * This check verifies that the page belongs to the current domain and
- * is not one mapped from another domain.
- * This check is for debug only, and should not go to production build
- */
- unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
- BUG_ON (!page_is_ram(bfn));
-#endif
- return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
- handle, size, attrs);
- }
-#endif
- return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
-}
-
const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
@@ -618,6 +552,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
- .mmap = xen_swiotlb_dma_mmap,
- .get_sgtable = xen_swiotlb_get_sgtable,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
};