aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/x86_64/kernel/pci-dma.c59
1 files changed, 52 insertions, 7 deletions
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index af035ede70cd..9c44f4f2433d 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/proto.h>
+#include <asm/calgary.h>
int iommu_merge __read_mostly = 0;
EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly= 0;
#endif
+/* Set this to 1 if there is a HW IOMMU in the system */
+int iommu_detected __read_mostly = 0;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
struct device fallback_dev = {
.bus_id = "fallback device",
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask,
};
@@ -54,6 +58,10 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
else
#endif
node = numa_node_id();
+
+ if (node < first_node(node_online_map))
+ node = first_node(node_online_map);
+
page = alloc_pages_node(node, gfp, order);
return page ? page_address(page) : NULL;
}
@@ -73,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
dev = &fallback_dev;
dma_mask = dev->coherent_dma_mask;
if (dma_mask == 0)
- dma_mask = 0xffffffff;
+ dma_mask = DMA_32BIT_MASK;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
@@ -86,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */
- if (dma_mask <= 0xffffffff)
+ if (dma_mask <= DMA_32BIT_MASK)
gfp |= GFP_DMA32;
again:
@@ -107,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* Don't use the 16MB ZONE_DMA unless absolutely
needed. It's better to use remapping first. */
- if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
+ if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
@@ -170,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
- if (mask < 0x00ffffff)
+ if (mask < DMA_24BIT_MASK)
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
@@ -185,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
+ if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
return 0;
}
@@ -262,7 +270,7 @@ __init int iommu_setup(char *p)
swiotlb = 1;
#endif
-#ifdef CONFIG_GART_IOMMU
+#ifdef CONFIG_IOMMU
gart_parse_options(p);
#endif
@@ -272,3 +280,40 @@ __init int iommu_setup(char *p)
}
return 1;
}
+__setup("iommu=", iommu_setup);
+
+void __init pci_iommu_alloc(void)
+{
+ /*
+ * The order of these functions is important for
+ * fall-back/fail-over reasons
+ */
+#ifdef CONFIG_IOMMU
+ iommu_hole_init();
+#endif
+
+#ifdef CONFIG_CALGARY_IOMMU
+ detect_calgary();
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ pci_swiotlb_init();
+#endif
+}
+
+static int __init pci_iommu_init(void)
+{
+#ifdef CONFIG_CALGARY_IOMMU
+ calgary_iommu_init();
+#endif
+
+#ifdef CONFIG_IOMMU
+ gart_iommu_init();
+#endif
+
+ no_iommu_init();
+ return 0;
+}
+
+/* Must execute after PCI subsystem */
+fs_initcall(pci_iommu_init);