aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c6
-rw-r--r--arch/ia64/mm/discontig.c10
-rw-r--r--arch/ia64/mm/fault.c26
-rw-r--r--arch/ia64/mm/init.c23
-rw-r--r--arch/ia64/mm/ioremap.c2
-rw-r--r--arch/ia64/mm/tlb.c6
6 files changed, 23 insertions, 50 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index d29fb6b9fa33..5b00dc3898e1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -108,7 +108,6 @@ setup_per_cpu_areas(void)
struct pcpu_group_info *gi;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int rc;
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
if (!ai)
@@ -134,10 +133,7 @@ setup_per_cpu_areas(void)
ai->atom_size = PAGE_SIZE;
ai->alloc_size = PERCPU_PAGE_SIZE;
- rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
- if (rc)
- panic("failed to setup percpu area (err=%d)", rc);
-
+ pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
pcpu_free_alloc_info(ai);
}
#else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 05490dd073e6..4f33f6e7e206 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -186,7 +186,7 @@ void __init setup_per_cpu_areas(void)
unsigned long base_offset;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int node, prev_node, unit, nr_units, rc;
+ int node, prev_node, unit, nr_units;
ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
if (!ai)
@@ -245,10 +245,7 @@ void __init setup_per_cpu_areas(void)
gi->cpu_map = &cpu_map[unit];
}
- rc = pcpu_setup_first_chunk(ai, base);
- if (rc)
- panic("failed to setup percpu area (err=%d)", rc);
-
+ pcpu_setup_first_chunk(ai, base);
pcpu_free_alloc_info(ai);
}
#endif
@@ -396,8 +393,7 @@ static void __meminit scatter_node_data(void)
*
* Each node's per-node area has a copy of the global pg_data_t list, so
* we copy that to each node here, as well as setting the per-cpu pointer
- * to the local node data structure. The active_cpus field of the per-node
- * structure gets setup by the platform_cpu_init() function later.
+ * to the local node data structure.
*/
static void __init initialize_pernode_data(void)
{
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 5baeb022f474..c2f299fe9e04 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -21,28 +21,6 @@
extern int die(char *, struct pt_regs *, long);
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- int ret = 0;
-
- if (!user_mode(regs)) {
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, trap))
- ret = 1;
- preempt_enable();
- }
-
- return ret;
-}
-#else
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- return 0;
-}
-#endif
-
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
@@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
- if (notify_page_fault(regs, TRAP_BRKPT))
+ if (kprobe_page_fault(regs, TRAP_BRKPT))
return;
if (user_mode(regs))
@@ -249,7 +227,7 @@ retry:
}
if (user_mode(regs)) {
force_sig_fault(signal, code, (void __user *) address,
- 0, __ISR_VALID, isr, current);
+ 0, __ISR_VALID, isr);
return;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d28e29103bdb..bf9df2625bc8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/dma-noncoherent.h>
+#include <linux/dmar.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -23,10 +24,10 @@
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
+#include <linux/swiotlb.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
@@ -63,11 +64,10 @@ __ia64_sync_icache_dcache (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+ flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
-#ifdef CONFIG_SWIOTLB
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
@@ -82,7 +82,6 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
} while (++pfn <= PHYS_PFN(paddr + size - 1));
}
-#endif
inline void
ia64_set_rbs_bot (void)
@@ -632,13 +631,17 @@ mem_init (void)
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
-#ifdef CONFIG_PCI
/*
- * This needs to be called _after_ the command line has been parsed but _before_
- * any drivers that may need the PCI DMA interface are initialized or bootmem has
- * been freed.
+ * This needs to be called _after_ the command line has been parsed but
+ * _before_ any drivers that may need the PCI DMA interface are
+ * initialized or bootmem has been freed.
*/
- platform_dma_init();
+#ifdef CONFIG_INTEL_IOMMU
+ detect_intel_iommu();
+ if (!iommu_detected)
+#endif
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
#endif
#ifdef CONFIG_FLATMEM
@@ -681,7 +684,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
@@ -693,4 +695,3 @@ void arch_remove_memory(int nid, u64 start, u64 size,
__remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
-#endif
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 5e3e7b1fdac5..0c0de2c4ec69 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -42,7 +42,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
/*
* For things in kern_memmap, we must use the same attribute
* as the rest of the kernel. For more details, see
- * Documentation/ia64/aliasing.txt.
+ * Documentation/ia64/aliasing.rst.
*/
attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 0714df1b7854..72cc568bc841 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -245,7 +245,8 @@ resetsema:
spinaphore_init(&ptcg_sem, max_purges);
}
-void
+#ifdef CONFIG_SMP
+static void
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
@@ -282,6 +283,7 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
activate_context(active_mm);
}
}
+#endif /* CONFIG_SMP */
void
local_flush_tlb_all (void)
@@ -332,7 +334,7 @@ __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_disable();
#ifdef CONFIG_SMP
if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
- platform_global_tlb_purge(mm, start, end, nbits);
+ ia64_global_tlb_purge(mm, start, end, nbits);
preempt_enable();
return;
}