aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 4eba60a32890..ba0694071728 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -23,7 +23,6 @@
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -38,6 +37,7 @@
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/kdump.h>
#define DBG(...)
@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
-struct iommu_table *iommu_init_table(struct iommu_table *tbl)
+struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
+ struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
@@ -429,18 +430,47 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
- tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
- if (!tbl->it_map)
+ page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
-
+ tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
tbl->it_largehint = tbl->it_halfpoint;
spin_lock_init(&tbl->it_lock);
+#ifdef CONFIG_CRASH_DUMP
+ if (ppc_md.tce_get) {
+ unsigned long index, tceval;
+ unsigned long tcecount = 0;
+
+ /*
+ * Reserve the existing mappings left by the first kernel.
+ */
+ for (index = 0; index < tbl->it_size; index++) {
+ tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
+ /*
+ * Freed TCE entry contains 0x7fffffffffffffff on JS20
+ */
+ if (tceval && (tceval != 0x7fffffffffffffffUL)) {
+ __set_bit(index, tbl->it_map);
+ tcecount++;
+ }
+ }
+ if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
+ printk(KERN_WARNING "TCE table is full; ");
+ printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
+ KDUMP_MIN_TCE_ENTRIES);
+ for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
+ index < tbl->it_size; index++)
+ __clear_bit(index, tbl->it_map);
+ }
+ }
+#else
/* Clear the hardware table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
+#endif
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
@@ -536,11 +566,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
+ dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
+ struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
@@ -560,9 +591,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
- ret = (void *)__get_free_pages(flag, order);
- if (!ret)
+ page = alloc_pages_node(node, flag, order);
+ if (!page)
return NULL;
+ ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
@@ -570,9 +602,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
- ret = NULL;
- } else
- *dma_handle = mapping;
+ return NULL;
+ }
+ *dma_handle = mapping;
return ret;
}