aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/sh/include/asm/io.h23
-rw-r--r--arch/sh/include/asm/mmu.h31
-rw-r--r--arch/sh/mm/ioremap.c70
-rw-r--r--arch/sh/mm/ioremap_fixed.c11
-rw-r--r--arch/sh/mm/pmb.c256
5 files changed, 223 insertions, 168 deletions
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 7dab7b23a5ec..f689554e17c1 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr,
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
-void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
+void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr);
static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
}
static inline void __iomem *
-__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
#ifdef CONFIG_29BIT
- unsigned long last_addr = offset + size - 1;
+ phys_addr_t last_addr = offset + size - 1;
/*
* For P1 and P2 space this is trivial, as everything is already
@@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
}
static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
void __iomem *ret;
@@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
-static inline void __iomem *
-ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}
static inline void __iomem *
-ioremap_cache(unsigned long offset, unsigned long size)
+ioremap_cache(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL);
}
#ifdef CONFIG_HAVE_IOREMAP_PROT
static inline void __iomem *
-ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
+ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
{
return __ioremap_mode(offset, size, __pgprot(flags));
}
#endif
#ifdef CONFIG_IOREMAP_FIXED
-extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
- unsigned long, pgprot_t);
+extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
-ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
- unsigned long size, pgprot_t prot)
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
BUG();
return NULL;
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 15a05b615ba7..19fe84550b49 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -55,19 +55,29 @@ typedef struct {
#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
-long pmb_remap(unsigned long virt, unsigned long phys,
- unsigned long size, pgprot_t prot);
-void pmb_unmap(unsigned long addr);
-void pmb_init(void);
bool __in_29bit_mode(void);
+
+void pmb_init(void);
+int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
+ unsigned long size, pgprot_t prot);
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller);
+int pmb_unmap(void __iomem *addr);
+
#else
-static inline long pmb_remap(unsigned long virt, unsigned long phys,
- unsigned long size, pgprot_t prot)
+
+static inline void __iomem *
+pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
+{
+ return NULL;
+}
+
+static inline int pmb_unmap(void __iomem *addr)
{
return -EINVAL;
}
-#define pmb_unmap(addr) do { } while (0)
#define pmb_init(addr) do { } while (0)
#ifdef CONFIG_29BIT
@@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
#endif
#endif /* CONFIG_PMB */
+
+static inline void __iomem *
+pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
+{
+ return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index c68d2d7d00a9..1ab2385ecefe 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -34,11 +34,12 @@
* caller shouldn't need to know that small detail.
*/
void __iomem * __init_refok
-__ioremap_caller(unsigned long phys_addr, unsigned long size,
+__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
pgprot_t pgprot, void *caller)
{
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
+ void __iomem *mapped;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -46,6 +47,20 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
return NULL;
/*
+ * If we can't yet use the regular approach, go the fixmap route.
+ */
+ if (!mem_init_done)
+ return ioremap_fixed(phys_addr, size, pgprot);
+
+ /*
+ * First try to remap through the PMB.
+ * PMB entries are all pre-faulted.
+ */
+ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
+ if (mapped && !IS_ERR(mapped))
+ return mapped;
+
+ /*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
@@ -53,12 +68,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
- * If we can't yet use the regular approach, go the fixmap route.
- */
- if (!mem_init_done)
- return ioremap_fixed(phys_addr, offset, size, pgprot);
-
- /*
* Ok, go for it..
*/
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;
-#ifdef CONFIG_PMB
- /*
- * First try to remap through the PMB once a valid VMA has been
- * established. Smaller allocations (or the rest of the size
- * remaining after a PMB mapping due to the size not being
- * perfectly aligned on a PMB size boundary) are then mapped
- * through the UTLB using conventional page tables.
- *
- * PMB entries are all pre-faulted.
- */
- if (unlikely(phys_addr >= P1SEG)) {
- unsigned long mapped;
-
- mapped = pmb_remap(addr, phys_addr, size, pgprot);
- if (likely(mapped)) {
- addr += mapped;
- phys_addr += mapped;
- size -= mapped;
- }
+ if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+ vunmap((void *)orig_addr);
+ return NULL;
}
-#endif
-
- if (likely(size))
- if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
- vunmap((void *)orig_addr);
- return NULL;
- }
return (void __iomem *)(offset + (char *)orig_addr);
}
@@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr)
if (iounmap_fixed(addr) == 0)
return;
-#ifdef CONFIG_PMB
/*
- * Purge any PMB entries that may have been established for this
- * mapping, then proceed with conventional VMA teardown.
- *
- * XXX: Note that due to the way that remove_vm_area() does
- * matching of the resultant VMA, we aren't able to fast-forward
- * the address past the PMB space until the end of the VMA where
- * the page tables reside. As such, unmap_vm_area() will be
- * forced to linearly scan over the area until it finds the page
- * tables where PTEs that need to be unmapped actually reside,
- * which is far from optimal. Perhaps we need to use a separate
- * VMA for the PMB mappings?
- * -- PFM.
+ * If the PMB handled it, there's nothing else to do.
*/
- pmb_unmap(vaddr);
-#endif
+ if (pmb_unmap(addr) == 0)
+ return;
p = remove_vm_area((void *)(vaddr & PAGE_MASK));
if (!p) {
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 0b78b1e20ef1..7f682e5dafcf 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void)
}
void __init __iomem *
-ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
- unsigned long size, pgprot_t prot)
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
enum fixed_addresses idx0, idx;
struct ioremap_map *map;
unsigned int nrpages;
+ unsigned long offset;
int i, slot;
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(phys_addr + size) - phys_addr;
+
slot = -1;
for (i = 0; i < FIX_N_IOREMAPS; i++) {
map = &ioremap_maps[i];
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 35b364f931ea..9a516b89839a 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -51,6 +52,16 @@ struct pmb_entry {
struct pmb_entry *link;
};
+static struct {
+ unsigned long size;
+ int flag;
+} pmb_sizes[] = {
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
+};
+
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
@@ -72,6 +83,88 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+/*
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
+ */
+static __always_inline unsigned long pmb_cache_flags(void)
+{
+ unsigned long flags = 0;
+
+#if defined(CONFIG_CACHE_OFF)
+ flags |= PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITETHROUGH)
+ flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ flags |= PMB_C;
+#endif
+
+ return flags;
+}
+
+/*
+ * Convert typical pgprot value to the PMB equivalent
+ */
+static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
+{
+ unsigned long pmb_flags = 0;
+ u64 flags = pgprot_val(prot);
+
+ if (flags & _PAGE_CACHABLE)
+ pmb_flags |= PMB_C;
+ if (flags & _PAGE_WT)
+ pmb_flags |= PMB_WT | PMB_UB;
+
+ return pmb_flags;
+}
+
+static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
+{
+ return (b->vpn == (a->vpn + a->size)) &&
+ (b->ppn == (a->ppn + a->size)) &&
+ (b->flags == a->flags);
+}
+
+static bool pmb_size_valid(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return true;
+
+ return false;
+}
+
+static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
+{
+ return (addr >= P1SEG && (addr + size - 1) < P3SEG);
+}
+
+static inline bool pmb_prot_valid(pgprot_t prot)
+{
+ return (pgprot_val(prot) & _PAGE_USER) == 0;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return pmb_sizes[i].flag;
+
+ return 0;
+}
+
static int pmb_alloc_entry(void)
{
int pos;
@@ -139,33 +232,13 @@ static void pmb_free(struct pmb_entry *pmbe)
}
/*
- * Ensure that the PMB entries match our cache configuration.
- *
- * When we are in 32-bit address extended mode, CCR.CB becomes
- * invalid, so care must be taken to manually adjust cacheable
- * translations.
- */
-static __always_inline unsigned long pmb_cache_flags(void)
-{
- unsigned long flags = 0;
-
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- flags |= PMB_C | PMB_WT | PMB_UB;
-#elif defined(CONFIG_CACHE_WRITEBACK)
- flags |= PMB_C;
-#endif
-
- return flags;
-}
-
-/*
* Must be run uncached.
*/
static void __set_pmb_entry(struct pmb_entry *pmbe)
{
- writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
- writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
- mk_pmb_data(pmbe->entry));
+ /* Set V-bit */
+ __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
+ __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
}
static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -193,39 +266,56 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
spin_unlock_irqrestore(&pmbe->lock, flags);
}
-static struct {
- unsigned long size;
- int flag;
-} pmb_sizes[] = {
- { .size = SZ_512M, .flag = PMB_SZ_512M, },
- { .size = SZ_128M, .flag = PMB_SZ_128M, },
- { .size = SZ_64M, .flag = PMB_SZ_64M, },
- { .size = SZ_16M, .flag = PMB_SZ_16M, },
-};
+int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size, pgprot_t prot)
+{
+ return 0;
+}
-long pmb_remap(unsigned long vaddr, unsigned long phys,
- unsigned long size, pgprot_t prot)
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
{
struct pmb_entry *pmbp, *pmbe;
- unsigned long wanted;
- int pmb_flags, i;
- long err;
- u64 flags;
+ unsigned long pmb_flags;
+ int i, mapped;
+ unsigned long orig_addr, vaddr;
+ phys_addr_t offset, last_addr;
+ phys_addr_t align_mask;
+ unsigned long aligned;
+ struct vm_struct *area;
- flags = pgprot_val(prot);
+ /*
+ * Small mappings need to go through the TLB.
+ */
+ if (size < SZ_16M)
+ return ERR_PTR(-EINVAL);
+ if (!pmb_prot_valid(prot))
+ return ERR_PTR(-EINVAL);
- pmb_flags = PMB_WT | PMB_UB;
+ pmbp = NULL;
+ pmb_flags = pgprot_to_pmb_flags(prot);
+ mapped = 0;
- /* Convert typical pgprot value to the PMB equivalent */
- if (flags & _PAGE_CACHABLE) {
- pmb_flags |= PMB_C;
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (size >= pmb_sizes[i].size)
+ break;
- if ((flags & _PAGE_WT) == 0)
- pmb_flags &= ~(PMB_WT | PMB_UB);
- }
+ last_addr = phys + size;
+ align_mask = ~(pmb_sizes[i].size - 1);
+ offset = phys & ~align_mask;
+ phys &= align_mask;
+ aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
- pmbp = NULL;
- wanted = size;
+ area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
+ P3SEG, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys;
+ orig_addr = vaddr = (unsigned long)area->addr;
+
+ if (!pmb_addr_valid(vaddr, aligned))
+ return ERR_PTR(-EFAULT);
again:
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
@@ -237,19 +327,19 @@ again:
pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
- err = PTR_ERR(pmbe);
- goto out;
+ pmb_unmap_entry(pmbp, mapped);
+ return pmbe;
}
spin_lock_irqsave(&pmbe->lock, flags);
+ pmbe->size = pmb_sizes[i].size;
+
__set_pmb_entry(pmbe);
- phys += pmb_sizes[i].size;
- vaddr += pmb_sizes[i].size;
- size -= pmb_sizes[i].size;
-
- pmbe->size = pmb_sizes[i].size;
+ phys += pmbe->size;
+ vaddr += pmbe->size;
+ size -= pmbe->size;
/*
* Link adjacent entries that span multiple PMB entries
@@ -269,6 +359,7 @@ again:
* pmb_sizes[i].size again.
*/
i--;
+ mapped++;
spin_unlock_irqrestore(&pmbe->lock, flags);
}
@@ -276,61 +367,35 @@ again:
if (size >= SZ_16M)
goto again;
- return wanted - size;
-
-out:
- pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
-
- return err;
+ return (void __iomem *)(offset + (char *)orig_addr);
}
-void pmb_unmap(unsigned long addr)
+int pmb_unmap(void __iomem *addr)
{
struct pmb_entry *pmbe = NULL;
- int i;
+ unsigned long vaddr = (unsigned long __force)addr;
+ int i, found = 0;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
- if (pmbe->vpn == addr)
+ if (pmbe->vpn == vaddr) {
+ found = 1;
break;
+ }
}
}
read_unlock(&pmb_rwlock);
- pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
-}
-
-static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
-{
- return (b->vpn == (a->vpn + a->size)) &&
- (b->ppn == (a->ppn + a->size)) &&
- (b->flags == a->flags);
-}
-
-static bool pmb_size_valid(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return true;
-
- return false;
-}
-
-static int pmb_size_to_flags(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return pmb_sizes[i].flag;
+ if (found) {
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
+ return 0;
+ }
- return 0;
+ return -EINVAL;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -368,11 +433,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
write_unlock_irqrestore(&pmb_rwlock, flags);
}
-static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
-{
- return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
-}
-
static void __init pmb_notify(void)
{
int i;