aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/fault.c14
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sh/mm/ioremap.c55
-rw-r--r--arch/sh/mm/ioremap.h23
-rw-r--r--arch/sh/mm/ioremap_fixed.c1
-rw-r--r--arch/sh/mm/pgtable.c7
8 files changed, 85 insertions, 29 deletions
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 487da0ff03b3..f69ddc70b146 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -43,5 +43,3 @@ obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
GCOV_PROFILE_pmb.o := n
-
-ccflags-y := -Werror
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 3169a343a5ab..0de206c1acfe 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -57,8 +57,6 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
return -ENOMEM;
}
- memset(buf, 0, memsize);
-
r->flags = IORESOURCE_MEM;
r->start = dma_handle;
r->end = r->start + memsize - 1;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index fbe1f2fe9a8c..88a1f453d73e 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -208,13 +208,12 @@ show_fault_oops(struct pt_regs *regs, unsigned long address)
if (!oops_may_print())
return;
- printk(KERN_ALERT "PC:");
pr_alert("BUG: unable to handle kernel %s at %08lx\n",
address < PAGE_SIZE ? "NULL pointer dereference"
: "paging request",
address);
pr_alert("PC:");
- printk_address(regs->pc, 1, KERN_ALERT);
+ printk_address(regs->pc, 1);
show_pte(NULL, address);
}
@@ -482,22 +481,13 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
if (mm_fault_error(regs, error_code, address, fault))
return;
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 613de8096335..4735176ab811 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -29,6 +29,7 @@
#include <asm/cache.h>
#include <asm/pgalloc.h>
#include <linux/sizes.h>
+#include "ioremap.h"
pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -425,15 +426,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 addr)
-{
- /* Node 0 for now.. */
- return 0;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index f6d02246d665..21342581144d 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -18,12 +18,59 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/io.h>
+#include <asm/io_trapped.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mmu.h>
+#include "ioremap.h"
+
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment. This is true for P1
+ * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t last_addr = offset + size - 1;
+
+ /*
+ * For P1 and P2 space this is trivial, as everything is already
+ * mapped. Uncached access for P1 addresses are done through P2.
+ * In the P3 case or for addresses outside of the 29-bit space,
+ * mapping must be done by the PMB or by using page tables.
+ */
+ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+ u64 flags = pgprot_val(prot);
+
+ /*
+ * Anything using the legacy PTEA space attributes needs
+ * to be kicked down to page table mappings.
+ */
+ if (unlikely(flags & _PAGE_PCC_MASK))
+ return NULL;
+ if (unlikely(flags & _PAGE_CACHABLE))
+ return (void __iomem *)P1SEGADDR(offset);
+
+ return (void __iomem *)P2SEGADDR(offset);
+ }
+
+ /* P4 above the store queues are always mapped. */
+ if (unlikely(offset >= P3_ADDR_MAX))
+ return (void __iomem *)P4SEGADDR(offset);
+
+ return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot) NULL
+#endif /* CONFIG_29BIT */
/*
* Remap an arbitrary physical address space into the kernel virtual
@@ -42,6 +89,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
unsigned long offset, last_addr, addr, orig_addr;
void __iomem *mapped;
+ mapped = __ioremap_trapped(phys_addr, size);
+ if (mapped)
+ return mapped;
+
+ mapped = __ioremap_29bit(phys_addr, size, pgprot);
+ if (mapped)
+ return mapped;
+
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
diff --git a/arch/sh/mm/ioremap.h b/arch/sh/mm/ioremap.h
new file mode 100644
index 000000000000..f2544e721a35
--- /dev/null
+++ b/arch/sh/mm/ioremap.h
@@ -0,0 +1,23 @@
+#ifndef _SH_MM_IORMEMAP_H
+#define _SH_MM_IORMEMAP_H 1
+
+#ifdef CONFIG_IOREMAP_FIXED
+void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
+int iounmap_fixed(void __iomem *);
+void ioremap_fixed_init(void);
+#else
+static inline void __iomem *
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
+{
+ BUG();
+ return NULL;
+}
+static inline void ioremap_fixed_init(void)
+{
+}
+static inline int iounmap_fixed(void __iomem *addr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_IOREMAP_FIXED */
+#endif /* _SH_MM_IORMEMAP_H */
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index aab3f82856bb..136113bcac25 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -23,6 +23,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
+#include "ioremap.h"
struct ioremap_map {
void __iomem *addr;
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 5c8f9247c3c2..cf7ce4b57359 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -2,8 +2,6 @@
#include <linux/mm.h>
#include <linux/slab.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
-
static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2
static struct kmem_cache *pmd_cachep;
@@ -13,6 +11,7 @@ void pgd_ctor(void *x)
{
pgd_t *pgd = x;
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
@@ -32,7 +31,7 @@ void pgtable_cache_init(void)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
+ return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -48,7 +47,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
+ return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
}
void pmd_free(struct mm_struct *mm, pmd_t *pmd)