aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API.txt4
-rw-r--r--arch/arm/common/dmabounce.c2
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c5
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c2
-rw-r--r--arch/powerpc/platforms/iseries/mf.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c27
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/pci-nommu.c14
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--drivers/firewire/fw-iso.c2
-rw-r--r--drivers/firewire/fw-ohci.c2
-rw-r--r--drivers/firewire/fw-sbp2.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/bnx2x_main.c4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/ibmveth.c38
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/s2io.c48
-rw-r--r--drivers/net/sfc/rx.c4
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tc35815.c4
-rw-r--r--drivers/net/wireless/ath5k/base.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c2
-rw-r--r--drivers/spi/atmel_spi.c4
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/spi/omap2_mcspi.c4
-rw-r--r--drivers/spi/pxa2xx_spi.c4
-rw-r--r--drivers/spi/spi_imx.c6
-rw-r--r--include/asm-alpha/dma-mapping.h6
-rw-r--r--include/asm-alpha/pci.h2
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-avr32/dma-mapping.h2
-rw-r--r--include/asm-cris/dma-mapping.h2
-rw-r--r--include/asm-frv/dma-mapping.h2
-rw-r--r--include/asm-generic/dma-mapping-broken.h2
-rw-r--r--include/asm-generic/dma-mapping.h4
-rw-r--r--include/asm-generic/pci-dma-compat.h4
-rw-r--r--include/asm-ia64/machvec.h2
-rw-r--r--include/asm-m68k/dma-mapping.h2
-rw-r--r--include/asm-mips/dma-mapping.h2
-rw-r--r--include/asm-mn10300/dma-mapping.h2
-rw-r--r--include/asm-parisc/dma-mapping.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h2
-rw-r--r--include/asm-sh/dma-mapping.h2
-rw-r--r--include/asm-sparc/dma-mapping_64.h2
-rw-r--r--include/asm-sparc/pci_32.h3
-rw-r--r--include/asm-sparc/pci_64.h5
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping.h99
-rw-r--r--include/asm-x86/swiotlb.h2
-rw-r--r--include/asm-xtensa/dma-mapping.h2
-rw-r--r--include/linux/i2o.h2
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--lib/swiotlb.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c3
76 files changed, 256 insertions, 210 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 80d150458c80..d8b63d164e41 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -298,10 +298,10 @@ recommended that you never use these unless you really know what the
cache width is.
int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr)
In some circumstances dma_map_single and dma_map_page will fail to create
a mapping. A driver can check for these errors by testing the returned
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index dd2947342604..69130f365904 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
/*
* Trying to unmap an invalid mapping
*/
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Trying to unmap invalid mapping\n");
return;
}
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 1c44ec2a1d58..88b6e6f3fd88 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask)
}
int
-hwsw_dma_mapping_error (dma_addr_t dma_addr)
+hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr);
+ return hwiommu_dma_mapping_error(dev, dma_addr) ||
+ swiotlb_dma_mapping_error(dev, dma_addr);
}
EXPORT_SYMBOL(hwsw_dma_mapping_error);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 34421aed1e2a..4956be40d7b5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask)
}
int
-sba_dma_mapping_error (dma_addr_t dma_addr)
+sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 52175af299a0..53ebb6484495 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
-int sn_dma_mapping_error(dma_addr_t dma_addr)
+int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index ae39dd88b9aa..891312f8e5a6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
EXPORT_SYMBOL(dma_sync_sg_for_device);
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 0e04f8fb152a..3e7e0f1568ef 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dummy_page_da)) {
+ if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
pr_err("PCIEX:Map dummy page failed.\n");
kfree(dummy_page_va);
return -1;
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index 418b605ac35a..5122ec145271 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dummy_page_da)) {
+ if (dma_mapping_error(phb->parent, dummy_page_da)) {
pr_err("SPIDER-IOWA:Map dummy page filed.\n");
kfree(dummy_page_va);
return -1;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 1dc7295746da..731d7b157749 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
count = 256 - off;
dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_addr))
+ if (dma_mapping_error(NULL, dma_addr))
return -ENOMEM;
memset(page, 0, off + count);
memset(&vsp_cmd, 0, sizeof(vsp_cmd));
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 19e7fc7c2c4f..1eb86be93d7a 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -544,7 +544,7 @@ error:
return ret;
}
-static const struct dma_mapping_ops calgary_dma_ops = {
+static struct dma_mapping_ops calgary_dma_ops = {
.alloc_coherent = calgary_alloc_coherent,
.map_single = calgary_map_single,
.unmap_single = calgary_unmap_single,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index cbecb05551bb..37544123896d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
static int forbid_dac __read_mostly;
-const struct dma_mapping_ops *dma_ops;
+struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
int dma_supported(struct device *dev, u64 mask)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask)
}
#endif
- if (dma_ops->dma_supported)
- return dma_ops->dma_supported(dev, mask);
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
@@ -367,6 +369,7 @@ void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
void *memory = NULL;
struct page *page;
unsigned long dma_mask = 0;
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* Let low level make its own zone decisions */
gfp &= ~(GFP_DMA32|GFP_DMA);
- if (dma_ops->alloc_coherent)
- return dma_ops->alloc_coherent(dev, size,
+ if (ops->alloc_coherent)
+ return ops->alloc_coherent(dev, size,
dma_handle, gfp);
return NULL;
}
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
}
}
- if (dma_ops->alloc_coherent) {
+ if (ops->alloc_coherent) {
free_pages((unsigned long)memory, get_order(size));
gfp &= ~(GFP_DMA|GFP_DMA32);
- return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+ return ops->alloc_coherent(dev, size, dma_handle, gfp);
}
- if (dma_ops->map_simple) {
- *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
+ if (ops->map_simple) {
+ *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
size,
PCI_DMA_BIDIRECTIONAL);
if (*dma_handle != bad_dma_address)
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
int order = get_order(size);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_coherent(dev, order, vaddr))
return;
- if (dma_ops->unmap_single)
- dma_ops->unmap_single(dev, bus, size, 0);
+ if (ops->unmap_single)
+ ops->unmap_single(dev, bus, size, 0);
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index df5f142657d2..744126e64950 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
extern int agp_amd64_init(void);
-static const struct dma_mapping_ops gart_dma_ops = {
- .mapping_error = NULL,
+static struct dma_mapping_ops gart_dma_ops = {
.map_single = gart_map_single,
.map_simple = gart_map_simple,
.unmap_single = gart_unmap_single,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 792b9179eff3..3f91f71cdc3e 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return nents;
}
-/* Make sure we keep the same behaviour */
-static int nommu_mapping_error(dma_addr_t dma_addr)
-{
-#ifdef CONFIG_X86_32
- return 0;
-#else
- return (dma_addr == bad_dma_address);
-#endif
-}
-
-
-const struct dma_mapping_ops nommu_dma_ops = {
+struct dma_mapping_ops nommu_dma_ops = {
.map_single = nommu_map_single,
.map_sg = nommu_map_sg,
- .mapping_error = nommu_mapping_error,
.is_phys = 1,
};
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 20df839b9c20..c4ce0332759e 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
}
-const struct dma_mapping_ops swiotlb_dma_ops = {
+struct dma_mapping_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index bcbe794a3ea5..e14c03dc0065 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
- if (dma_mapping_error(address)) {
+ if (dma_mapping_error(card->device, address)) {
__free_page(buffer->pages[i]);
goto out_pages;
}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 333b12544dd1..566672e0bcff 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
payload_bus =
dma_map_single(ohci->card.device, packet->payload,
packet->payload_length, DMA_TO_DEVICE);
- if (dma_mapping_error(payload_bus)) {
+ if (dma_mapping_error(ohci->card.device, payload_bus)) {
packet->ack = RCODE_SEND_ERROR;
return -1;
}
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 53fc5a641e6d..aaff50ebba1d 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
orb->response_bus =
dma_map_single(device->card->device, &orb->response,
sizeof(orb->response), DMA_FROM_DEVICE);
- if (dma_mapping_error(orb->response_bus))
+ if (dma_mapping_error(device->card->device, orb->response_bus))
goto fail_mapping_response;
orb->request.response.high = 0;
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->base.request_bus))
+ if (dma_mapping_error(device->card->device, orb->base.request_bus))
goto fail_mapping_request;
sbp2_send_orb(&orb->base, lu, node_id, generation,
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->page_table_bus))
+ if (dma_mapping_error(device->card->device, orb->page_table_bus))
goto fail_page_table;
/*
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->base.request_bus))
+ if (dma_mapping_error(device->card->device, orb->base.request_bus))
goto out;
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index eaba03273e4f..284c9bca517e 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -698,7 +698,7 @@ retry:
addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
tx->map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, addr)) {
ret = -EIO;
goto unlock;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 86e016916cd1..82d9a0b5ca2f 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_unmap;
}
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
pages[j], 0, flen, DMA_TO_DEVICE);
unsigned long fofs = addr & ~PAGE_MASK;
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto done;
}
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
if (page) {
dma_addr = dma_map_page(&dd->pcidev->dev,
page, 0, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_pbc;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 4e36aa7cb3d2..cc6858f0b65b 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return -ENOMEM;
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
+ if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 1360403b88b6..a9653c63f4db 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto)
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
- return pci_dma_mapping_error(pluto->dma_addr);
+ return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
}
static void pluto_dma_unmap(struct pluto *pluto)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c3a5db72ddd7..5f95e10229b5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_addr = dma_map_single(mmc_dev(host->mmc),
host->align_buffer, 128 * 4, direction);
- if (dma_mapping_error(host->align_addr))
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto fail;
BUG_ON(host->align_addr & 0x3);
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->adma_addr = dma_map_single(mmc_dev(host->mmc),
host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
- if (dma_mapping_error(host->align_addr))
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto unmap_entries;
BUG_ON(host->adma_addr & 0x3);
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 7a14980f3472..18d3eeb7eab2 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 0263bef9cc6d..c7cc760a1777 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a96331c875e6..1b0861d73ab7 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
dma_addr_t mapping;
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(mapping)))
+ if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
pci_unmap_addr_set(sd, dma_addr, mapping);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1037b1332312..19d32a227be1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rx->dma_addr)) {
+ if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a14561f40db0..9350564065e7 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, 2048,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 9c0f56b3c518..d13677899767 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -195,7 +195,7 @@ map_skb:
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(ps_page->dma)) {
+ if (pci_dma_mapping_error(pdev, ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_ps_bsize0,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
skb->data + offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
adapter->tx_dma_failed++;
return -1;
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)) {
dev_err(&adapter->pdev->dev,
"TX DMA page map failed\n");
adapter->tx_dma_failed++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index e5a6e2e84540..91ec9fdc7184 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_addr))
+ if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
goto failure;
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
pool->consumer_index = pool->size - 1;
else
pool->consumer_index--;
- if (!dma_mapping_error(dma_addr))
+ if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
int i;
+ struct device *dev = &adapter->vdev->dev;
if(adapter->buffer_list_addr != NULL) {
- if(!dma_mapping_error(adapter->buffer_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->buffer_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->filter_list_addr != NULL) {
- if(!dma_mapping_error(adapter->filter_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->filter_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->rx_queue.queue_addr != NULL) {
- if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
adapter->rx_queue.queue_dma,
adapter->rx_queue.queue_len,
DMA_BIDIRECTIONAL);
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
int rc;
union ibmveth_buf_desc rxq_desc;
int i;
+ struct device *dev;
ibmveth_debug_printk("open starting\n");
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
return -ENOMEM;
}
- adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
- if((dma_mapping_error(adapter->buffer_list_dma) ) ||
- (dma_mapping_error(adapter->filter_list_dma)) ||
- (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(adapter->bounce_buffer_dma)) {
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
ibmveth_error_printk("unable to map bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
- if (dma_mapping_error(data_dma_addr)) {
+ if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
ibmveth_error_printk("tx: unable to map xmit buffer\n");
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index b8d0639c1cdf..c46864d626b2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
msg->data.addr[0] = dma_map_single(port->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(msg->data.addr[0]))
+ if (dma_mapping_error(port->dev, msg->data.addr[0]))
goto recycle_and_drop;
msg->dev = port->dev;
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
dma_address = msg->data.addr[0];
dma_length = msg->data.len[0];
- if (!dma_mapping_error(dma_address))
+ if (!dma_mapping_error(msg->dev, dma_address))
dma_unmap_single(msg->dev, dma_address, dma_length,
DMA_TO_DEVICE);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index ea3a09aaa844..7df928d3a3d8 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
return -ENOMEM;
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+ if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
__free_page(priv->eq_table.icm_page);
return -ENOMEM;
}
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 993d87c9296f..edc0fd588985 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(dma))) {
+ if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
dev_kfree_skb_irq(info->skb);
break;
}
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
PCI_DMA_TODEVICE);
map_size[0] = skb_headlen(skb);
- if (dma_mapping_error(map[0]))
+ if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
goto out_err_nolock;
for (i = 0; i < nfrags; i++) {
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
map_size[i+1] = frag->size;
- if (dma_mapping_error(map[i+1])) {
+ if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
nfrags = i;
goto out_err_nolock;
}
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e7d48a352beb..e82b37bbd6c3 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
*/
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
sizeof(struct oal),
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
qdev->ndev->name, err);
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 9dae40ccf048..86d77d05190a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
* Return Value:
* SUCCESS on success or an appropriate -ve value on failure.
*/
-
-static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+ int from_card_up)
{
struct sk_buff *skb;
struct RxD_t *rxdp;
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp1->Buffer0_ptr = pci_map_single
(ring->pdev, skb->data, size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0,
BUF0_LEN, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer0_ptr))
goto pci_map_failed;
} else
pci_dma_sync_single_for_device(ring->pdev,
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
(ring->pdev, skb->data, ring->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer2_ptr))
goto pci_map_failed;
if (from_card_up) {
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error
- (rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single
(ring->pdev,
(dma_addr_t)(unsigned long)
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
}
}
-static int s2io_chk_rx_buffers(struct ring_info *ring)
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
{
- if (fill_rx_buffers(ring, 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
return 0;
pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
netif_rx_complete(dev, napi);
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < config->rx_ring_num; i++) {
ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed;
budget -= ring_pkts_processed;
if (budget <= 0)
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+ -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break;
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long) skb;
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
netif_rx_schedule(dev, &ring->napi);
} else {
rx_intr_handler(ring, 0);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(sp, ring);
}
return IRQ_HANDLED;
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
*/
if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++)
- s2io_chk_rx_buffers(&mac_control->rings[i]);
+ s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single( sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
goto memalloc_failed;
rxdp->Host_Control = (unsigned long) (*skb);
}
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
goto memalloc_failed;
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer0_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4, PCI_DMA_FROMDEVICE);
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].mtu = dev->mtu;
- ret = fill_rx_buffers(&mac_control->rings[i], 1);
+ ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 601b001437c0..0d27dd39bc09 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
rx_buf->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
dev_kfree_skb_any(rx_buf->skb);
rx_buf->skb = NULL;
return -EIO;
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
return -EIO;
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5cdd082ab8f6..5e8374ab28ee 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(dma_addr)))
+ if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
goto pci_err;
/* Store fields for marking in the per-fragment final
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
TSOH_BUFFER(tsoh), header_len,
PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
st->ifc.unmap_len = len;
st->ifc.len = len;
st->ifc.dma_addr = st->ifc.unmap_addr;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 00aa0b108cb9..b6435d0d71f9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* iommu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
if (netif_msg_rx_err(card) && net_ratelimit())
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
unsigned long flags;
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index a645e5028c14..8487ace9d2e3 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
return NULL;
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
free_page((unsigned long)buf);
return NULL;
}
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
return NULL;
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 217d506527a9..d9769c527346 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
- if (pci_dma_mapping_error(bf->skbaddr)) {
+ if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
return -EIO;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c4a7c06793c5..61f8fdea2d96 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
crq->msg_token = dma_map_single(dev, crq->msgs,
PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(crq->msg_token))
+ if (dma_mapping_error(dev, crq->msg_token))
goto map_failed;
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
async_q->size * sizeof(*async_q->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(async_q->msg_token)) {
+ if (dma_mapping_error(dev, async_q->msg_token)) {
dev_err(dev, "Failed to map async queue\n");
goto free_async_crq;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 20000ec79b04..6b24b9cdb04c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(req->buffer)) {
+ if (dma_mapping_error(hostdata->dev, req->buffer)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"Unable to map request_buffer for "
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(host_config->buffer)) {
+ if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"dma_mapping error getting host config\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 3b9514c8f1f1..2e13ec00172a 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(target->dev, queue->msg_token))
goto map_failed;
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 182146100dc1..462a8574dad9 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
gather_partition_info();
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index e81d59d78910..0c7165660853 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
xfer->tx_dma = dma_map_single(dev,
(void *) xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(xfer->tx_dma))
+ if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
}
if (xfer->rx_buf) {
xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(xfer->rx_dma)) {
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
if (xfer->tx_buf)
dma_unmap_single(dev,
xfer->tx_dma, xfer->len,
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 9149689c79d9..87b73e0169c5 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
hw->dma_rx_tmpbuf_size = size;
hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
size, DMA_FROM_DEVICE);
- if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) {
+ if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
kfree(hw->dma_rx_tmpbuf);
hw->dma_rx_tmpbuf = 0;
hw->dma_rx_tmpbuf_size = 0;
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
dma_rx_addr = dma_map_single(hw->dev,
(void *)t->rx_buf,
t->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_rx_addr))
+ if (dma_mapping_error(hw->dev, dma_rx_addr))
dev_err(hw->dev, "rx dma map error\n");
}
} else {
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
dma_tx_addr = dma_map_single(hw->dev,
(void *)t->tx_buf,
t->len, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_tx_addr))
+ if (dma_mapping_error(hw->dev, dma_tx_addr))
dev_err(hw->dev, "tx dma map error\n");
}
} else {
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index b1cc148036c1..f6f987bb71ca 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
if (tx_buf != NULL) {
t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(t->tx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
'T', len);
return -EINVAL;
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
if (rx_buf != NULL) {
t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(t->rx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->rx_dma)) {
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
'R', len);
if (tx_buf != NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 0c452c46ab07..067299d6d192 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(drv_data->rx_dma))
+ if (dma_mapping_error(dev, drv_data->rx_dma))
return 0;
/* Stream map the tx buffer */
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma)) {
+ if (dma_mapping_error(dev, drv_data->tx_dma)) {
dma_unmap_single(dev, drv_data->rx_dma,
drv_data->rx_map_len, DMA_FROM_DEVICE);
return 0;
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 54ac7bea5f8c..6fb77fcc4971 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma))
+ if (dma_mapping_error(dev, drv_data->tx_dma))
return -1;
drv_data->tx_dma_needs_unmap = 1;
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(drv_data->rx_dma))
+ if (dma_mapping_error(dev, drv_data->rx_dma))
return -1;
drv_data->rx_dma_needs_unmap = 1;
}
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma)) {
+ if (dma_mapping_error(dev, drv_data->tx_dma)) {
if (drv_data->rx_dma) {
dma_unmap_single(dev,
drv_data->rx_dma,
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index db351d1296f4..a5801ae02e4b 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -24,8 +24,8 @@
pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
#define dma_supported(dev, mask) \
pci_dma_supported(alpha_gendev_to_pci(dev), mask)
-#define dma_mapping_error(addr) \
- pci_dma_mapping_error(addr)
+#define dma_mapping_error(dev, addr) \
+ pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
#else /* no PCI - no IOMMU. */
@@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#define dma_unmap_page(dev, addr, size, dir) ((void)0)
#define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
-#define dma_mapping_error(addr) (0)
+#define dma_mapping_error(dev, addr) (0)
#endif /* !CONFIG_PCI */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index d31fd49ff79a..2a14302c17a3 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
/* Test for pci_map_single or pci_map_page having generated an error. */
static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index e99406a7bece..f41335ba6337 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 57dc672bab8e..0399359ab5d8 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
/*
* dma_map_single can't fail as it is implemented now.
*/
-static inline int dma_mapping_error(dma_addr_t addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
{
return 0;
}
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index edc8d1bfaae2..cb2fb25ff8d9 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index 2e8966ca030d..b2898877c07b 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
}
static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index e2468f894d2a..82cd0cb1c3fe 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
extern int
-dma_mapping_error(dma_addr_t dma_addr);
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int
dma_supported(struct device *dev, u64 mask);
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 783ab9944d70..189486c3f92e 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return pci_dma_mapping_error(dma_addr);
+ return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
}
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h
index 25c10e96b2b7..37b3706226e7 100644
--- a/include/asm-generic/pci-dma-compat.h
+++ b/include/asm-generic/pci-dma-compat.h
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
}
static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(&pdev->dev, dma_addr);
}
#endif
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 0721a5e8271e..a6d50c77b6bf 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
typedef int ia64_mv_dma_supported (struct device *, u64);
typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index a26cdeb46a57..91f7944333d4 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s
{
}
-static inline int dma_mapping_error(dma_addr_t handle)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
{
return 0;
}
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 230b3f1b69b1..c64afb40cd06 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(dma_addr_t dma_addr);
+extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int dma_supported(struct device *dev, u64 mask);
static inline int
diff --git a/include/asm-mn10300/dma-mapping.h b/include/asm-mn10300/dma-mapping.h
index 7c882fca9ec8..ccae8f6c6326 100644
--- a/include/asm-mn10300/dma-mapping.h
+++ b/include/asm-mn10300/dma-mapping.h
@@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index c6c0e9ff6bde..53af696f23d2 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev);
#endif
/* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(x) 0
+#define dma_mapping_error(dev, x) 0
#endif
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 74c549780987..c7ca45f97dd2 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_PPC64
return (dma_addr == DMA_ERROR_CODE);
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 22cc419389fe..6c0b8a2de143 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void)
return L1_CACHE_BYTES;
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h
index 38cbec76a33f..bfa64f9702d5 100644
--- a/include/asm-sparc/dma-mapping_64.h
+++ b/include/asm-sparc/dma-mapping_64.h
@@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
/* No flushing needed to sync cpu writes to the device. */
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h
index b93b6c79e08f..0ee949d220c0 100644
--- a/include/asm-sparc/pci_32.h
+++ b/include/asm-sparc/pci_32.h
@@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+ dma_addr_t dma_addr)
{
return (dma_addr == PCI_DMA_ERROR_CODE);
}
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h
index f59f2571295b..4f79a54948f6 100644
--- a/include/asm-sparc/pci_64.h
+++ b/include/asm-sparc/pci_64.h
@@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+ dma_addr_t dma_addr)
{
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(&pdev->dev, dma_addr);
}
#ifdef CONFIG_PCI
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..3c034f48fdb0 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
+#ifdef CONFIG_X86_64
+struct dma_mapping_ops *dma_ops;
+#endif
#ifdef CONFIG_DMAR
void *iommu; /* hook for IOMMU specific extension */
#endif
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index c2ddd3d1b883..0eaa9bf6011f 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
extern int force_iommu;
struct dma_mapping_ops {
- int (*mapping_error)(dma_addr_t dma_addr);
+ int (*mapping_error)(struct device *dev,
+ dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
int is_phys;
};
-extern const struct dma_mapping_ops *dma_ops;
+extern struct dma_mapping_ops *dma_ops;
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dma_addr);
+#ifdef CONFIG_X86_32
+ return dma_ops;
+#else
+ if (unlikely(!dev) || !dev->archdata.dma_ops)
+ return dma_ops;
+ else
+ return dev->archdata.dma_ops;
+#endif
+}
+
+/* Make sure we keep the same behaviour */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+#ifdef CONFIG_X86_32
+ return 0;
+#else
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
+#endif
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -83,44 +102,53 @@ static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+ return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_single)
- dma_ops->unmap_single(dev, addr, size, direction);
+ if (ops->unmap_single)
+ ops->unmap_single(dev, addr, size, direction);
}
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
+ return ops->map_sg(hwdev, sg, nents, direction);
}
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_sg)
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
+ if (ops->unmap_sg)
+ ops->unmap_sg(hwdev, sg, nents, direction);
}
static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_cpu)
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
@@ -128,10 +156,11 @@ static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
@@ -139,11 +168,12 @@ static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_cpu)
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
- size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_cpu)
+ ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+ size, direction);
flush_write_buffers();
}
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
- offset, size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_device)
+ ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
flush_write_buffers();
}
@@ -164,9 +195,11 @@ static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
@@ -174,9 +207,11 @@ static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device)
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(hwdev, sg, nelems, direction);
flush_write_buffers();
}
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(dev, page_to_phys(page)+offset,
- size, direction);
+ return ops->map_single(dev, page_to_phys(page) + offset,
+ size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index c706a7442633..2730b351afcf 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
-extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index 3c7d537dd15d..51882ae3db4d 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 7d51cbca49ab..75ae6d8aba4f 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
}
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
- if (!dma_mapping_error(dma_addr)) {
+ if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
*mptr++ = cpu_to_le32(0x7C020002);
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 4bf8cade9dbc..e530026eedf7 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
- return pci_dma_mapping_error(addr);
+ return pci_dma_mapping_error(dev->bus->host_pci, addr);
case SSB_BUSTYPE_SSB:
- return dma_mapping_error(addr);
+ return dma_mapping_error(dev->dev, addr);
default:
__ssb_dma_not_implemented(dev);
}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 90b529f7a154..936e333e7ce5 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
if (dev->dma_ops)
return dev->dma_ops->mapping_error(dev, dma_addr);
- return dma_mapping_error(dma_addr);
+ return dma_mapping_error(dev->dma_device, dma_addr);
}
/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8cc..977edbdbc1de 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(handle))
+ if (swiotlb_dma_mapping_error(hwdev, handle))
return NULL;
ret = bus_to_virt(handle);
@@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index a19b22b452a3..84d328329d98 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -169,7 +169,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
(void *)
vec->sge[xdr_sge_no].iov_base + sge_off,
sge_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(sge[sge_no].addr))
+ if (dma_mapping_error(xprt->sc_cm_id->device->dma_device,
+ sge[sge_no].addr))
goto err;
sge_off = 0;
sge_no++;