aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/include/asm/div64.h2
-rw-r--r--arch/arm/include/asm/glue-proc.h9
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h23
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/include/asm/smp_plat.h2
-rw-r--r--arch/arm/include/asm/spinlock.h25
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/tlb.h27
-rw-r--r--arch/arm/include/asm/tls.h40
12 files changed, 91 insertions, 64 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bff71388e72a..17d0ae8672fa 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 7652712d1d14..dba62cb1ad08 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -32,6 +32,8 @@
#define MPIDR_HWID_BITMASK 0xFFFFFF
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index fe92ccf1d0b0..191ada6e4d2d 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -46,7 +46,7 @@
__rem; \
})
-#if __GNUC__ < 4
+#if __GNUC__ < 4 || !defined(CONFIG_AEABI)
/*
* gcc versions earlier than 4.0 are simply too problematic for the
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index ac1dd54724b6..8017e94acc5e 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
# endif
#endif
+#ifdef CONFIG_CPU_PJ4B
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_pj4b
+# endif
+#endif
+
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 652b56086de7..d070741b2b37 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
-extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
+extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
-extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
extern void (*arch_iounmap)(volatile void __iomem *);
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a7b85e0d0cc1..b5792b7fd8d3 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/proc-fns.h>
+#include <asm/smp_plat.h>
#include <asm-generic/mm_hooks.h>
void __check_vmalloc_seq(struct mm_struct *mm);
@@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
-DECLARE_PER_CPU(atomic64_t, active_asids);
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask);
+#else /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
#else /* !CONFIG_CPU_HAS_ASID */
@@ -98,12 +107,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- /* check for possible thread migration */
- if (!cpumask_empty(mm_cpumask(next)) &&
+ /*
+ * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
+ * so check for possible thread migration and invalidate the I-cache
+ * if we're new to this CPU.
+ */
+ if (cache_ops_need_broadcast() &&
+ !cpumask_empty(mm_cpumask(next)) &&
!cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
-#endif
+
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_and_switch_context(next, tsk);
if (cache_is_vivt())
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a3..209e6504922e 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- /* Read TPIDRPRW */
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * Read TPIDRPRW.
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
return off;
}
#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index aaa61b6f50ff..e78983202737 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
/*
* Logical CPU mapping.
*/
-extern int __cpu_logical_map[];
+extern u32 __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 6220e9fdf4c7..f8b8965666e9 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long contended, res;
u32 slock;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" subs %1, %0, %0, ror #16\n"
-" addeq %0, %0, %3\n"
-" strexeq %1, %0, [%2]"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
-
- if (tmp == 0) {
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%3]\n"
+ " mov %2, #0\n"
+ " subs %1, %0, %0, ror #16\n"
+ " addeq %0, %0, %4\n"
+ " strexeq %2, %0, [%3]"
+ : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 1995d1a84060..214d4158089a 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -58,7 +58,7 @@ struct thread_info {
struct cpu_context_save cpu_context; /* cpu context */
__u32 syscall; /* syscall number */
__u8 used_cp[16]; /* thread used copro */
- unsigned long tp_value;
+ unsigned long tp_value[2]; /* TLS registers */
#ifdef CONFIG_CRUNCH
struct crunch_state crunchstate;
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 99a19512ee26..bdf2b8458ec1 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,18 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb) 0
-#else
-#define tlb_fast_mode(tlb) 1
-#endif
-
#define MMU_GATHER_BUNDLE 8
/*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
- if (!tlb_fast_mode(tlb)) {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
- }
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ if (tlb->pages == tlb->local)
+ __tlb_alloc_page(tlb);
}
static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
- }
-
tlb->pages[tlb->nr++] = page;
VM_BUG_ON(tlb->nr > tlb->max);
return tlb->max - tlb->nr;
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 73409e6c0251..83259b873333 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -2,27 +2,30 @@
#define __ASMARM_TLS_H
#ifdef __ASSEMBLY__
- .macro set_tls_none, tp, tmp1, tmp2
+#include <asm/asm-offsets.h>
+ .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
.endm
- .macro set_tls_v6k, tp, tmp1, tmp2
+ .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
+ mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
- mov \tmp1, #0
- mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
+ mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
+ str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
- .macro set_tls_v6, tp, tmp1, tmp2
+ .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
ldr \tmp1, =elf_hwcap
ldr \tmp1, [\tmp1, #0]
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- movne \tmp1, #0
- mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
+ mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
+ mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+ mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
+ strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
- .macro set_tls_software, tp, tmp1, tmp2
+ .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
@@ -31,19 +34,30 @@
#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu 1
#define has_tls_reg 1
-#define set_tls set_tls_none
+#define switch_tls switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
-#define set_tls set_tls_v6
+#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
-#define set_tls set_tls_v6k
+#define switch_tls switch_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
-#define set_tls set_tls_software
+#define switch_tls switch_tls_software
#endif
+#ifndef __ASSEMBLY__
+static inline unsigned long get_tpuser(void)
+{
+ unsigned long reg = 0;
+
+ if (has_tls_reg && !tls_emu)
+ __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
+
+ return reg;
+}
+#endif
#endif /* __ASMARM_TLS_H */