aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:20:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:20:54 -0700
commitf72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch)
tree90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /drivers
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux (diff)
parentARM: dma-mapping: Remove traces of NOMMU code (diff)
downloadlinux-dev-f72e24a1240b78f421649c4d88f5c24ab1c896a1.tar.xz
linux-dev-f72e24a1240b78f421649c4d88f5c24ab1c896a1.zip
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dma-coherent.c74
-rw-r--r--drivers/base/dma-mapping.c60
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c40
-rw-r--r--drivers/dma/ioat/init.c24
-rw-r--r--drivers/firmware/tegra/ivc.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c4
-rw-r--r--drivers/iommu/amd_iommu.c20
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c159
-rw-r--r--drivers/video/fbdev/au1100fb.c4
-rw-r--r--drivers/video/fbdev/au1200fb.c5
-rw-r--r--drivers/xen/swiotlb-xen.c113
16 files changed, 280 insertions, 256 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 640a7e63c453..2ae24c28e70c 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -16,8 +16,27 @@ struct dma_coherent_mem {
int flags;
unsigned long *bitmap;
spinlock_t spinlock;
+ bool use_dev_dma_pfn_offset;
};
+static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
+
+static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
+{
+ if (dev && dev->dma_mem)
+ return dev->dma_mem;
+ return dma_coherent_default_memory;
+}
+
+static inline dma_addr_t dma_get_device_base(struct device *dev,
+ struct dma_coherent_mem * mem)
+{
+ if (mem->use_dev_dma_pfn_offset)
+ return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
+ else
+ return mem->device_base;
+}
+
static bool dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
@@ -83,6 +102,9 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
static int dma_assign_coherent_memory(struct device *dev,
struct dma_coherent_mem *mem)
{
+ if (!dev)
+ return -ENODEV;
+
if (dev->dma_mem)
return -EBUSY;
@@ -133,7 +155,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&mem->spinlock, flags);
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
spin_unlock_irqrestore(&mem->spinlock, flags);
@@ -161,15 +183,12 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
- struct dma_coherent_mem *mem;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
- if (!dev)
- return 0;
- mem = dev->dma_mem;
if (!mem)
return 0;
@@ -186,7 +205,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
/*
* Memory was found in the per-device area.
*/
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
@@ -223,7 +242,7 @@ EXPORT_SYMBOL(dma_alloc_from_coherent);
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
@@ -257,7 +276,7 @@ EXPORT_SYMBOL(dma_release_from_coherent);
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
@@ -287,6 +306,8 @@ EXPORT_SYMBOL(dma_mmap_from_coherent);
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
+static struct reserved_mem *dma_reserved_default_memory __initdata;
+
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
@@ -299,6 +320,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
}
+ mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
return 0;
@@ -307,7 +329,8 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
- dev->dma_mem = NULL;
+ if (dev)
+ dev->dma_mem = NULL;
}
static const struct reserved_mem_ops rmem_dma_ops = {
@@ -327,6 +350,12 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
}
+
+ if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
+ WARN(dma_reserved_default_memory,
+ "Reserved memory: region for default DMA coherent area is redefined\n");
+ dma_reserved_default_memory = rmem;
+ }
#endif
rmem->ops = &rmem_dma_ops;
@@ -334,5 +363,32 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
+
+static int __init dma_init_reserved_memory(void)
+{
+ const struct reserved_mem_ops *ops;
+ int ret;
+
+ if (!dma_reserved_default_memory)
+ return -ENOMEM;
+
+ ops = dma_reserved_default_memory->ops;
+
+ /*
+ * We rely on rmem_dma_device_init() does not propagate error of
+ * dma_assign_coherent_memory() for "NULL" device.
+ */
+ ret = ops->device_init(dma_reserved_default_memory, NULL);
+
+ if (!ret) {
+ dma_coherent_default_memory = dma_reserved_default_memory->priv;
+ pr_info("DMA: default coherent area is set\n");
+ }
+
+ return ret;
+}
+
+core_initcall(dma_init_reserved_memory);
+
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 9dbef4d1baa4..5096755d185e 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -22,20 +22,15 @@ struct dma_devres {
size_t size;
void *vaddr;
dma_addr_t dma_handle;
+ unsigned long attrs;
};
-static void dmam_coherent_release(struct device *dev, void *res)
+static void dmam_release(struct device *dev, void *res)
{
struct dma_devres *this = res;
- dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
-}
-
-static void dmam_noncoherent_release(struct device *dev, void *res)
-{
- struct dma_devres *this = res;
-
- dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
+ dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
+ this->attrs);
}
static int dmam_match(struct device *dev, void *res, void *match_data)
@@ -69,7 +64,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size,
struct dma_devres *dr;
void *vaddr;
- dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
+ dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
if (!dr)
return NULL;
@@ -104,35 +99,35 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
struct dma_devres match_data = { size, vaddr, dma_handle };
dma_free_coherent(dev, size, vaddr, dma_handle);
- WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
- &match_data));
+ WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
}
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
+ * dmam_alloc_attrs - Managed dma_alloc_attrs()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
+ * @attrs: Flags in the DMA_ATTR_* namespace.
*
- * Managed dma_alloc_noncoherent(). Memory allocated using this
- * function will be automatically released on driver detach.
+ * Managed dma_alloc_attrs(). Memory allocated using this function will be
+ * automatically released on driver detach.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void *dmam_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
struct dma_devres *dr;
void *vaddr;
- dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
+ dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
if (!dr)
return NULL;
- vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
+ vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
if (!vaddr) {
devres_free(dr);
return NULL;
@@ -141,32 +136,13 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size,
dr->vaddr = vaddr;
dr->dma_handle = *dma_handle;
dr->size = size;
+ dr->attrs = attrs;
devres_add(dev, dr);
return vaddr;
}
-EXPORT_SYMBOL(dmam_alloc_noncoherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_noncoherent()
- * @dev: Device to free noncoherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_noncoherent().
- */
-void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- struct dma_devres match_data = { size, vaddr, dma_handle };
-
- dma_free_noncoherent(dev, size, vaddr, dma_handle);
- WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
- &match_data));
-}
-EXPORT_SYMBOL(dmam_free_noncoherent);
+EXPORT_SYMBOL(dmam_alloc_attrs);
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
@@ -251,7 +227,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
-#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
+#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -268,7 +244,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
-#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 5b5efcc52cb5..baffae817259 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -686,7 +686,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, blp)))
- goto err;
+ goto err_in;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -699,7 +699,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
- goto err;
+ goto err_in;
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
@@ -717,10 +717,10 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
- goto err;
+ goto err_in;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err;
+ goto err_out;
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -732,7 +732,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg->length,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
- goto err;
+ goto err_out;
bufers[y].len = sg->length;
sg_nctr++;
}
@@ -747,9 +747,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sz_out = 0;
}
return 0;
-err:
- dev_err(dev, "Failed to map buf for dma\n");
- sg_nctr = 0;
+
+err_out:
+ n = sg_nents(sglout);
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+ kfree(buflout);
+
+err_in:
+ n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
@@ -759,17 +770,8 @@ err:
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
- if (sgl != sglout && buflout) {
- n = sg_nents(sglout);
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
- kfree(buflout);
- }
+
+ dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 6ad4384b3fa8..ed8ed1192775 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -839,8 +839,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
goto free_resources;
}
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -910,8 +908,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
xor_val_result = 1;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -965,8 +961,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR_VAL;
xor_val_result = 0;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -1017,18 +1011,14 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
goto free_resources;
dma_unmap:
if (op == IOAT_OP_XOR) {
- if (dest_dma != DMA_ERROR_CODE)
- dma_unmap_page(dev, dest_dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ while (--i >= 0)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
} else if (op == IOAT_OP_XOR_VAL) {
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ while (--i >= 0)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
}
free_resources:
dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index 29ecfd815320..a01461d63f68 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -646,12 +646,12 @@ int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
if (peer) {
ivc->rx.phys = dma_map_single(peer, rx, queue_size,
DMA_BIDIRECTIONAL);
- if (ivc->rx.phys == DMA_ERROR_CODE)
+ if (dma_mapping_error(peer, ivc->rx.phys))
return -ENOMEM;
ivc->tx.phys = dma_map_single(peer, tx, queue_size,
DMA_BIDIRECTIONAL);
- if (ivc->tx.phys == DMA_ERROR_CODE) {
+ if (dma_mapping_error(peer, ivc->tx.phys)) {
dma_unmap_single(peer, ivc->rx.phys, queue_size,
DMA_BIDIRECTIONAL);
return -ENOMEM;
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 2a7eb6817c36..92e6b08ea64a 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
}
/* Framebuffer objects must have a valid device address for scanout */
- if (obj->dev_addr == DMA_ERROR_CODE) {
+ if (!obj->mapped) {
ret = -EINVAL;
goto err_unref;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index d6c2a5d190eb..a76ca21d063b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->phys_addr = obj->linear->start;
obj->dev_addr = obj->linear->start;
+ obj->mapped = true;
}
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
@@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return NULL;
drm_gem_private_object_init(dev, &obj->obj, size);
- obj->dev_addr = DMA_ERROR_CODE;
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
return NULL;
}
- obj->dev_addr = DMA_ERROR_CODE;
-
mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
@@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ dobj->mapped = true;
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index b88d2b9853c7..6e524e0676bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -16,6 +16,7 @@ struct armada_gem_object {
void *addr;
phys_addr_t phys_addr;
resource_size_t dev_addr;
+ bool mapped;
struct drm_mm_node *linear; /* for linear backed */
struct page *page; /* for page backed */
struct sg_table *sgt; /* for imported */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c77a5aced81a..d48fd7c918f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -181,8 +181,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- if (index >= MAX_FB_BUFFER)
- return DMA_ERROR_CODE;
+ if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
+ return 0;
return exynos_fb->dma_addr[index];
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 5c9759ed22ca..f16d0f26ee24 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -54,6 +54,8 @@
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#define AMD_IOMMU_MAPPING_ERROR 0
+
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define LOOP_TIMEOUT 100000
@@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev,
paddr &= PAGE_MASK;
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out;
prot = dir2prot(direction);
@@ -2431,7 +2433,7 @@ out_unmap:
dma_ops_free_iova(dma_dom, address, pages);
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
}
/*
@@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
@@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out_err;
prot = dir2prot(direction);
@@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
size, DMA_BIDIRECTIONAL, dma_mask);
- if (*dma_addr == DMA_ERROR_CODE)
+ if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
return page_address(page);
@@ -2729,9 +2731,16 @@ free_mem:
*/
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
+ if (!x86_dma_supported(dev, mask))
+ return 0;
return check_device(dev);
}
+static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == AMD_IOMMU_MAPPING_ERROR;
+}
+
static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
@@ -2740,6 +2749,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
.map_sg = map_sg,
.unmap_sg = unmap_sg,
.dma_supported = amd_iommu_dma_supported,
+ .mapping_error = amd_iommu_mapping_error,
};
static int init_reserved_iova_ranges(void)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 62618e77bedc..9403336f1fa6 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -31,6 +31,8 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#define IOMMU_MAPPING_ERROR 0
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -500,7 +502,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
}
/**
@@ -533,7 +535,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -627,11 +629,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
return iova + iova_off;
}
@@ -671,7 +673,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
/*
@@ -714,11 +716,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_ERROR_CODE)
+ if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
}
@@ -836,7 +838,7 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == DMA_ERROR_CODE;
+ return dma_addr == IOMMU_MAPPING_ERROR;
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8500deda9175..1e95475883cd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
+#ifdef CONFIG_X86
+ .dma_supported = x86_dma_supported,
+#endif
};
static inline int iommu_domain_cache_init(void)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3e0a695537e2..d17c2b03f580 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -469,56 +469,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
-static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
-{
- int i;
- struct device *dev = &adapter->vdev->dev;
-
- if (adapter->buffer_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
- dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->buffer_list_addr);
- adapter->buffer_list_addr = NULL;
- }
-
- if (adapter->filter_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
- dma_unmap_single(dev, adapter->filter_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->filter_list_addr);
- adapter->filter_list_addr = NULL;
- }
-
- if (adapter->rx_queue.queue_addr != NULL) {
- dma_free_coherent(dev, adapter->rx_queue.queue_len,
- adapter->rx_queue.queue_addr,
- adapter->rx_queue.queue_dma);
- adapter->rx_queue.queue_addr = NULL;
- }
-
- for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
- if (adapter->rx_buff_pool[i].active)
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
-
- if (adapter->bounce_buffer != NULL) {
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
- }
-}
-
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -575,14 +525,17 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
+ rc = -ENOMEM;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->buffer_list_addr) {
+ netdev_err(netdev, "unable to allocate list pages\n");
+ goto out;
+ }
- if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
- netdev_err(netdev, "unable to allocate filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter pages\n");
+ goto out_free_buffer_list;
}
dev = &adapter->vdev->dev;
@@ -592,22 +545,21 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
- if (!adapter->rx_queue.queue_addr) {
- rc = -ENOMEM;
- goto err_out;
- }
+ if (!adapter->rx_queue.queue_addr)
+ goto out_free_filter_list;
adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ netdev_err(netdev, "unable to map buffer list pages\n");
+ goto out_free_queue_mem;
+ }
+
adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-
- if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
- (dma_mapping_error(dev, adapter->filter_list_dma))) {
- netdev_err(netdev, "unable to map filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ if (dma_mapping_error(dev, adapter->filter_list_dma)) {
+ netdev_err(netdev, "unable to map filter list pages\n");
+ goto out_unmap_buffer_list;
}
adapter->rx_queue.index = 0;
@@ -638,7 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc,
mac_address);
rc = -ENONET;
- goto err_out;
+ goto out_unmap_filter_list;
}
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
@@ -648,7 +600,7 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
rc = -ENOMEM;
- goto err_out;
+ goto out_free_buffer_pools;
}
}
@@ -662,22 +614,21 @@ static int ibmveth_open(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- goto err_out;
+ goto out_free_buffer_pools;
}
+ rc = -ENOMEM;
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- rc = -ENOMEM;
- goto err_out_free_irq;
- }
+ if (!adapter->bounce_buffer)
+ goto out_free_irq;
+
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n");
- rc = -ENOMEM;
- goto err_out_free_irq;
+ goto out_free_bounce_buffer;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -689,10 +640,31 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-err_out_free_irq:
+out_free_bounce_buffer:
+ kfree(adapter->bounce_buffer);
+out_free_irq:
free_irq(netdev->irq, netdev);
-err_out:
- ibmveth_cleanup(adapter);
+out_free_buffer_pools:
+ while (--i >= 0) {
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ }
+out_unmap_filter_list:
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_unmap_buffer_list:
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_free_queue_mem:
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+out_free_filter_list:
+ free_page((unsigned long)adapter->filter_list_addr);
+out_free_buffer_list:
+ free_page((unsigned long)adapter->buffer_list_addr);
+out:
napi_disable(&adapter->napi);
return rc;
}
@@ -700,7 +672,9 @@ err_out:
static int ibmveth_close(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
long lpar_rc;
+ int i;
netdev_dbg(netdev, "close starting\n");
@@ -724,7 +698,27 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_update_rx_no_buffer(adapter);
- ibmveth_cleanup(adapter);
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->buffer_list_addr);
+
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->filter_list_addr);
+
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ kfree(adapter->bounce_buffer);
netdev_dbg(netdev, "close complete\n");
@@ -1719,11 +1713,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
-
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- adapter->filter_list_dma = DMA_ERROR_CODE;
- adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
-
netdev_dbg(netdev, "registering netdev...\n");
ibmveth_set_features(netdev, netdev->features);
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 35df2c1a8a63..8de42f617d16 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -532,10 +532,6 @@ failed:
clk_disable_unprepare(fbdev->lcdclk);
clk_put(fbdev->lcdclk);
}
- if (fbdev->fb_mem) {
- dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
- fbdev->fb_phys);
- }
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 6c2b2ca4a909..5f04b4096c42 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1694,9 +1694,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
- fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
+ fbdev->fb_mem = dmam_alloc_attrs(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
- &fbdev->fb_phys, GFP_KERNEL);
+ &fbdev->fb_phys, GFP_KERNEL,
+ DMA_ATTR_NON_CONSISTENT);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..82fc54f8eb77 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -67,6 +67,8 @@ static unsigned long dma_alloc_coherent_mask(struct device *dev,
}
#endif
+#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
+
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
@@ -295,7 +297,8 @@ error:
free_pages((unsigned long)xen_io_tlb_start, order);
return rc;
}
-void *
+
+static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
@@ -346,9 +349,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
memset(ret, 0, size);
return ret;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
-void
+static void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr, unsigned long attrs)
{
@@ -369,8 +371,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
@@ -379,7 +379,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
* Once the device is given the dma address, the device owns this memory until
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
*/
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
@@ -412,7 +412,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs);
if (map == SWIOTLB_MAP_ERROR)
- return DMA_ERROR_CODE;
+ return XEN_SWIOTLB_ERROR_CODE;
dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
@@ -427,9 +427,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
- return DMA_ERROR_CODE;
+ return XEN_SWIOTLB_ERROR_CODE;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -467,13 +466,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
dma_mark_clean(phys_to_virt(paddr), size);
}
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +514,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
void
xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +521,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +557,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
* Device ownership issues as mentioned above for xen_swiotlb_map_page are the
* same here.
*/
-int
+static int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
@@ -599,27 +614,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
}
return nelems;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +635,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sg), dir, target);
}
-void
+static void
xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
-void
+static void
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -663,31 +655,18 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
-int
+static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
-
-int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
/*
* Create userspace mapping for the DMA-coherent memory.
* This function should be called with the pages from the current domain only,
* passing pages mapped from other domains would lead to memory corruption.
*/
-int
+static int
xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -699,13 +678,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
#endif
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
/*
* This function should be called with the pages from the current domain only,
* passing pages mapped from other domains would lead to memory corruption.
*/
-int
+static int
xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
@@ -727,4 +705,25 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
#endif
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == XEN_SWIOTLB_ERROR_CODE;
+}
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+ .alloc = xen_swiotlb_alloc_coherent,
+ .free = xen_swiotlb_free_coherent,
+ .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = xen_swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+ .map_sg = xen_swiotlb_map_sg_attrs,
+ .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_page = xen_swiotlb_map_page,
+ .unmap_page = xen_swiotlb_unmap_page,
+ .dma_supported = xen_swiotlb_dma_supported,
+ .mmap = xen_swiotlb_dma_mmap,
+ .get_sgtable = xen_swiotlb_get_sgtable,
+ .mapping_error = xen_swiotlb_mapping_error,
+};