aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/abort-ev6.S6
-rw-r--r--arch/arm/mm/abort-ev7.S6
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/cache-l2x0.c121
-rw-r--r--arch/arm/mm/dma-mapping.c210
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/idmap.c2
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S5
-rw-r--r--arch/arm/mm/proc-v7.S2
12 files changed, 210 insertions, 173 deletions
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 3815a8262af0..8c48c5c22a33 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -17,12 +17,6 @@
*/
.align 5
ENTRY(v6_early_abort)
-#ifdef CONFIG_CPU_V6
- sub r1, sp, #4 @ Get unused stack location
- strex r0, r1, [r1] @ Clear the exclusive monitor
-#elif defined(CONFIG_CPU_32v6K)
- clrex
-#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 703375277ba6..4812ad054214 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -13,12 +13,6 @@
*/
.align 5
ENTRY(v7_early_abort)
- /*
- * The effect of data aborts on on the exclusive access monitor are
- * UNPREDICTABLE. Do a CLREX to clear the state
- */
- clrex
-
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 0c1ab49e5f7b..83792f4324ea 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -41,6 +41,7 @@
* This code is not portable to processors with late data abort handling.
*/
#define CODING_BITS(i) (i & 0x0e000000)
+#define COND_BITS(i) (i & 0xf0000000)
#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
@@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
break;
case 0x04000000: /* ldr or str immediate */
+ if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
+ goto bad;
offset.un = OFFSET_BITS(instr);
handler = do_alignment_ldrstr;
break;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 5f2c988a06ac..55f9d6e0cc88 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
+#include <linux/log2.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -945,6 +946,98 @@ static int l2_wt_override;
* pass it though the device tree */
static u32 cache_id_part_number_from_dt;
+/**
+ * l2x0_cache_size_of_parse() - read cache size parameters from DT
+ * @np: the device tree node for the l2 cache
+ * @aux_val: pointer to machine-supplied auxilary register value, to
+ * be augmented by the call (bits to be set to 1)
+ * @aux_mask: pointer to machine-supplied auxilary register mask, to
+ * be augmented by the call (bits to be set to 0)
+ * @associativity: variable to return the calculated associativity in
+ * @max_way_size: the maximum size in bytes for the cache ways
+ */
+static void __init l2x0_cache_size_of_parse(const struct device_node *np,
+ u32 *aux_val, u32 *aux_mask,
+ u32 *associativity,
+ u32 max_way_size)
+{
+ u32 mask = 0, val = 0;
+ u32 cache_size = 0, sets = 0;
+ u32 way_size_bits = 1;
+ u32 way_size = 0;
+ u32 block_size = 0;
+ u32 line_size = 0;
+
+ of_property_read_u32(np, "cache-size", &cache_size);
+ of_property_read_u32(np, "cache-sets", &sets);
+ of_property_read_u32(np, "cache-block-size", &block_size);
+ of_property_read_u32(np, "cache-line-size", &line_size);
+
+ if (!cache_size || !sets)
+ return;
+
+ /* All these l2 caches have the same line = block size actually */
+ if (!line_size) {
+ if (block_size) {
+ /* If linesize if not given, it is equal to blocksize */
+ line_size = block_size;
+ } else {
+ /* Fall back to known size */
+ pr_warn("L2C OF: no cache block/line size given: "
+ "falling back to default size %d bytes\n",
+ CACHE_LINE_SIZE);
+ line_size = CACHE_LINE_SIZE;
+ }
+ }
+
+ if (line_size != CACHE_LINE_SIZE)
+ pr_warn("L2C OF: DT supplied line size %d bytes does "
+ "not match hardware line size of %d bytes\n",
+ line_size,
+ CACHE_LINE_SIZE);
+
+ /*
+ * Since:
+ * set size = cache size / sets
+ * ways = cache size / (sets * line size)
+ * way size = cache size / (cache size / (sets * line size))
+ * way size = sets * line size
+ * associativity = ways = cache size / way size
+ */
+ way_size = sets * line_size;
+ *associativity = cache_size / way_size;
+
+ if (way_size > max_way_size) {
+ pr_err("L2C OF: set size %dKB is too large\n", way_size);
+ return;
+ }
+
+ pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
+ cache_size, cache_size >> 10);
+ pr_info("L2C OF: override line size: %d bytes\n", line_size);
+ pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
+ way_size, way_size >> 10);
+ pr_info("L2C OF: override associativity: %d\n", *associativity);
+
+ /*
+ * Calculates the bits 17:19 to set for way size:
+ * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
+ */
+ way_size_bits = ilog2(way_size >> 10) - 3;
+ if (way_size_bits < 1 || way_size_bits > 6) {
+ pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
+ way_size);
+ return;
+ }
+
+ mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
+ val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
+
+ *aux_val &= ~mask;
+ *aux_val |= val;
+ *aux_mask &= ~mask;
+}
+
static void __init l2x0_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
@@ -952,6 +1045,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
u32 tag = 0;
u32 dirty = 0;
u32 val = 0, mask = 0;
+ u32 assoc;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
@@ -974,6 +1068,15 @@ static void __init l2x0_of_parse(const struct device_node *np,
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
+ l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
+ if (assoc > 8) {
+ pr_err("l2x0 of: cache setting yield too high associativity\n");
+ pr_err("l2x0 of: %d calculated, max 8\n", assoc);
+ } else {
+ mask |= L2X0_AUX_CTRL_ASSOC_MASK;
+ val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
+ }
+
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
@@ -1021,6 +1124,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
+ u32 assoc;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
@@ -1047,6 +1151,23 @@ static void __init l2c310_of_parse(const struct device_node *np,
writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
l2x0_base + L310_ADDR_FILTER_START);
}
+
+ l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
+ switch (assoc) {
+ case 16:
+ *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+ *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
+ *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+ break;
+ case 8:
+ *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+ *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+ break;
+ default:
+ pr_err("PL310 OF: cache setting yield illegal associativity\n");
+ pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
+ break;
+ }
}
static const struct l2c_init_data of_l2c310_data __initconst = {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7a996aaa061e..c245d903927f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -12,6 +12,7 @@
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/genalloc.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -298,57 +299,29 @@ static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
- struct vm_struct *area;
- unsigned long addr;
-
/*
* DMA allocation can be mapped to user space, so lets
* set VM_USERMAP flags too.
*/
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- caller);
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(page_to_pfn(page));
-
- if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
- vunmap((void *)addr);
- return NULL;
- }
- return (void *)addr;
+ return dma_common_contiguous_remap(page, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ prot, caller);
}
static void __dma_free_remap(void *cpu_addr, size_t size)
{
- unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || (area->flags & flags) != flags) {
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
- return;
- }
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ dma_common_free_remap(cpu_addr, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+static struct gen_pool *atomic_pool;
-struct dma_pool {
- size_t size;
- spinlock_t lock;
- unsigned long *bitmap;
- unsigned long nr_pages;
- void *vaddr;
- struct page **pages;
-};
-
-static struct dma_pool atomic_pool = {
- .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
-};
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
- atomic_pool.size = memparse(p, &p);
+ atomic_pool_size = memparse(p, &p);
return 0;
}
early_param("coherent_pool", early_coherent_pool);
@@ -358,14 +331,14 @@ void __init init_dma_coherent_pool_size(unsigned long size)
/*
* Catch any attempt to set the pool size too late.
*/
- BUG_ON(atomic_pool.vaddr);
+ BUG_ON(atomic_pool);
/*
* Set architecture specific coherent pool size only if
* it has not been changed by kernel command line parameter.
*/
- if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
- atomic_pool.size = size;
+ if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
+ atomic_pool_size = size;
}
/*
@@ -373,52 +346,44 @@ void __init init_dma_coherent_pool_size(unsigned long size)
*/
static int __init atomic_pool_init(void)
{
- struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
gfp_t gfp = GFP_KERNEL | GFP_DMA;
- unsigned long nr_pages = pool->size >> PAGE_SHIFT;
- unsigned long *bitmap;
struct page *page;
- struct page **pages;
void *ptr;
- int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!bitmap)
- goto no_bitmap;
-
- pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- goto no_pages;
+ atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!atomic_pool)
+ goto out;
if (dev_get_cma_area(NULL))
- ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
- atomic_pool_init);
+ ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
+ &page, atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
- atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
+ &page, atomic_pool_init);
if (ptr) {
- int i;
-
- for (i = 0; i < nr_pages; i++)
- pages[i] = page + i;
-
- spin_lock_init(&pool->lock);
- pool->vaddr = ptr;
- pool->pages = pages;
- pool->bitmap = bitmap;
- pool->nr_pages = nr_pages;
- pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
- (unsigned)pool->size / 1024);
+ int ret;
+
+ ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
+ page_to_phys(page),
+ atomic_pool_size, -1);
+ if (ret)
+ goto destroy_genpool;
+
+ gen_pool_set_algo(atomic_pool,
+ gen_pool_first_fit_order_align,
+ (void *)PAGE_SHIFT);
+ pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
+ atomic_pool_size / 1024);
return 0;
}
- kfree(pages);
-no_pages:
- kfree(bitmap);
-no_bitmap:
- pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
- (unsigned)pool->size / 1024);
+destroy_genpool:
+ gen_pool_destroy(atomic_pool);
+ atomic_pool = NULL;
+out:
+ pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
+ atomic_pool_size / 1024);
return -ENOMEM;
}
/*
@@ -522,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
- struct dma_pool *pool = &atomic_pool;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned int pageno;
- unsigned long flags;
+ unsigned long val;
void *ptr = NULL;
- unsigned long align_mask;
- if (!pool->vaddr) {
+ if (!atomic_pool) {
WARN(1, "coherent pool not initialised!\n");
return NULL;
}
- /*
- * Align the region allocation - allocations from pool are rather
- * small, so align them to their order in pages, minimum is a page
- * size. This helps reduce fragmentation of the DMA space.
- */
- align_mask = (1 << get_order(size)) - 1;
-
- spin_lock_irqsave(&pool->lock, flags);
- pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
- 0, count, align_mask);
- if (pageno < pool->nr_pages) {
- bitmap_set(pool->bitmap, pageno, count);
- ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->pages[pageno];
- } else {
- pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
- "Please increase it with coherent_pool= kernel parameter!\n",
- (unsigned)pool->size / 1024);
+ val = gen_pool_alloc(atomic_pool, size);
+ if (val) {
+ phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+ *ret_page = phys_to_page(phys);
+ ptr = (void *)val;
}
- spin_unlock_irqrestore(&pool->lock, flags);
return ptr;
}
static bool __in_atomic_pool(void *start, size_t size)
{
- struct dma_pool *pool = &atomic_pool;
- void *end = start + size;
- void *pool_start = pool->vaddr;
- void *pool_end = pool->vaddr + pool->size;
-
- if (start < pool_start || start >= pool_end)
- return false;
-
- if (end <= pool_end)
- return true;
-
- WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
- start, end - 1, pool_start, pool_end - 1);
-
- return false;
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}
static int __free_from_pool(void *start, size_t size)
{
- struct dma_pool *pool = &atomic_pool;
- unsigned long pageno, count;
- unsigned long flags;
-
if (!__in_atomic_pool(start, size))
return 0;
- pageno = (start - pool->vaddr) >> PAGE_SHIFT;
- count = size >> PAGE_SHIFT;
-
- spin_lock_irqsave(&pool->lock, flags);
- bitmap_clear(pool->bitmap, pageno, count);
- spin_unlock_irqrestore(&pool->lock, flags);
+ gen_pool_free(atomic_pool, (unsigned long)start, size);
return 1;
}
@@ -1271,29 +1196,8 @@ static void *
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
- unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct vm_struct *area;
- unsigned long p;
-
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- caller);
- if (!area)
- return NULL;
-
- area->pages = pages;
- area->nr_pages = nr_pages;
- p = (unsigned long)area->addr;
-
- for (i = 0; i < nr_pages; i++) {
- phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
- if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
- goto err;
- p += PAGE_SIZE;
- }
- return area->addr;
-err:
- unmap_kernel_range((unsigned long)area->addr, size);
- vunmap(area->addr);
+ return dma_common_pages_remap(pages, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
return NULL;
}
@@ -1355,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
static struct page **__atomic_get_pages(void *addr)
{
- struct dma_pool *pool = &atomic_pool;
- struct page **pages = pool->pages;
- int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+ struct page *page;
+ phys_addr_t phys;
+
+ phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+ page = phys_to_page(phys);
- return pages + offs;
+ return (struct page **)page;
}
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
@@ -1501,8 +1407,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ dma_common_free_remap(cpu_addr, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
__iommu_remove_mapping(dev, handle, size);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 43d54f5b26b9..265b836b3bd1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -400,3 +400,18 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
*/
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = pmd_mksplitting(*pmdp);
+ VM_BUG_ON(address & ~PMD_MASK);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+
+ /* dummy IPI to serialise against fast_gup */
+ kick_all_cpus_sync();
+}
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index c447ec70e868..e7a81cebbb2e 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -27,7 +27,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
pmd = pmd_alloc_one(&init_mm, addr);
if (!pmd) {
- pr_warning("Failed to allocate identity pmd.\n");
+ pr_warn("Failed to allocate identity pmd.\n");
return;
}
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 659c75d808dc..92bba32d9230 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -322,7 +322,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
- dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+ dma_contiguous_reserve(arm_dma_limit);
arm_memblock_steal_permitted = false;
memblock_dump_all();
@@ -636,6 +636,11 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
+ if (start == initrd_start)
+ start = round_down(start, PAGE_SIZE);
+ if (end == initrd_end)
+ end = round_up(end, PAGE_SIZE);
+
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8348ed6b2efe..9f98cec7fe1e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -223,13 +223,13 @@ early_param("ecc", early_ecc);
static int __init early_cachepolicy(char *p)
{
- pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+ pr_warn("cachepolicy kernel parameter not supported without cp15\n");
}
early_param("cachepolicy", early_cachepolicy);
static int __init noalign_setup(char *__unused)
{
- pr_warning("noalign kernel parameter not supported without cp15\n");
+ pr_warn("noalign kernel parameter not supported without cp15\n");
}
__setup("noalign", noalign_setup);
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index ee1d80593958..ba1196c968d8 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -279,7 +279,7 @@ ENTRY(\name\()_processor_functions)
.if \suspend
.word cpu_\name\()_suspend_size
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
.word cpu_\name\()_do_suspend
.word cpu_\name\()_do_resume
#else
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 1a24e9232ec8..d3daed0ae0ad 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext)
mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
- adcls \tmp, \tmp, #0
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
@@ -158,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext)
* TFR EV X F IHD LR S
* .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
- * 11 0 110 1 0011 1100 .111 1101 < we want
+ * 11 0 110 0 0011 1100 .111 1101 < we want
*/
.align 2
.type v7_crval, #object
v7_crval:
- crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
+ crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b5d67db20897..b3a947863ac7 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -570,7 +570,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
- __v7_proc __v7_b15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_b15mp_setup
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*