aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/include/asm/atomic.h124
-rw-r--r--arch/xtensa/include/asm/bitops.h323
-rw-r--r--arch/xtensa/include/asm/cache.h6
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h71
-rw-r--r--arch/xtensa/include/asm/fixmap.h8
-rw-r--r--arch/xtensa/include/asm/futex.h10
-rw-r--r--arch/xtensa/include/asm/hw_irq.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h3
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h29
-rw-r--r--arch/xtensa/include/asm/page.h11
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/include/asm/processor.h3
-rw-r--r--arch/xtensa/include/asm/syscall.h4
-rw-r--r--arch/xtensa/include/asm/uaccess.h16
-rw-r--r--arch/xtensa/include/asm/user.h20
-rw-r--r--arch/xtensa/include/asm/vectors.h44
-rw-r--r--arch/xtensa/include/uapi/asm/ipcbuf.h2
-rw-r--r--arch/xtensa/include/uapi/asm/msgbuf.h2
-rw-r--r--arch/xtensa/include/uapi/asm/sembuf.h1
20 files changed, 277 insertions, 420 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index ffa0cf7f66c3..3acc31e55e02 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += hardirq.h
+generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
@@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7b00d26f472e..3e7c6134ed32 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
} \
@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- " " #op " %0, %1, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
} \
@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- " " #op " %0, %0, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15, "__stringify(TOPLEVEL)"\n"\
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " rsil a15, "__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
} \
@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %3, 0\n" \
- " " #op " %1, %0, %2\n" \
- " s32i %1, %3, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[tmp], %[result], %[i]\n" \
+ " s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index be8b2be5a98b..3f71d364ba90 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " or %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " and %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (~mask), "a" (p)
- : "memory");
-}
-
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " xor %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " or %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " and %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " xor %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " "insn" %[tmp], %[tmp], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[value], %[addr]\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+ \
+ return value & mask; \
}
#elif XCHAL_HAVE_S32C1I
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+ \
+ return tmp & mask; \
}
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
+#else
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#include <asm-generic/bitops/atomic.h>
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#endif /* XCHAL_HAVE_S32C1I */
-#else
+#define BIT_OPS(op, insn, inv) \
+ BIT_OP(op, insn, inv) \
+ TEST_AND_BIT_OP(op, insn, inv)
-#include <asm-generic/bitops/atomic.h>
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
-#endif /* XCHAL_HAVE_S32C1I */
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index b21fd133ff62..54e147ac26bf 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 7ccc5cbf441b..a175f8aec3fb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " bne %0, %4, 2f\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
+ "1: l32ex %[result], %[addr]\n"
+ " bne %[result], %[cmp], 2f\n"
+ " mov %[tmp], %[new]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
"2:\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (new), "a" (p), "a" (old)
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
- " wsr %2, scompare1\n"
- " s32c1i %0, %1, 0\n"
- : "+a" (new)
- : "a" (p), "a" (old)
+ " wsr %[cmp], scompare1\n"
+ " s32c1i %[new], %[mem]\n"
+ : [new] "+a" (new), [mem] "+m" (*p)
+ : [cmp] "a" (old)
: "memory"
);
@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " bne %0, %2, 1f\n"
- " s32i %3, %1, 0\n"
+ " l32i %[old], %[mem]\n"
+ " bne %[old], %[cmp], 1f\n"
+ " s32i %[new], %[mem]\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
+ : [old] "=&a" (old), [mem] "+m" (*p)
+ : [cmp] "a" (old), [new] "r" (new)
: "a15", "memory");
return old;
#endif
@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (val), "a" (m)
+ "1: l32ex %[result], %[addr]\n"
+ " mov %[tmp], %[val]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [val] "a" (val), [addr] "a" (m)
: "memory"
);
@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32i %1, %2, 0\n"
- " mov %0, %3\n"
- " wsr %1, scompare1\n"
- " s32c1i %0, %2, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (m), "a" (val)
+ "1: l32i %[tmp], %[mem]\n"
+ " mov %[result], %[val]\n"
+ " wsr %[tmp], scompare1\n"
+ " s32c1i %[result], %[mem]\n"
+ " bne %[result], %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp),
+ [mem] "+m" (*m)
+ : [val] "a" (val)
: "memory"
);
return result;
@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " s32i %2, %1, 0\n"
+ " l32i %[tmp], %[mem]\n"
+ " s32i %[val], %[mem]\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
+ : [tmp] "=&a" (tmp), [mem] "+m" (*m)
+ : [val] "a" (val)
: "a15", "memory");
return tmp;
#endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 7e25c1b50ac0..cfb8696917e9 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
- pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
- (vaddr) \
- )
+ pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr))
#endif
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 0c4457ca0a85..964611083224 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
- "1: l32i %[oldval], %[addr], 0\n" \
+ "1: l32i %[oldval], %[mem]\n" \
insn "\n" \
" wsr %[oldval], scompare1\n" \
- "2: s32c1i %[newval], %[addr], 0\n" \
+ "2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
@@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
- : [oldval] "=&r" (old), [newval] "=&r" (ret) \
- : [addr] "r" (uaddr), [oparg] "r" (arg), \
- [fault] "I" (-EFAULT) \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
+ [mem] "+m" (*(uaddr)) \
+ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory")
#endif
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
deleted file mode 100644
index 3ddbea759b2b..000000000000
--- a/arch/xtensa/include/asm/hw_irq.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * include/asm-xtensa/hw_irq.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_HW_IRQ_H
-#define _XTENSA_HW_IRQ_H
-
-#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 3b054d2bede0..e3e1d9a1ef69 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
+#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/vectors.h>
@@ -183,7 +184,7 @@
#endif
#if XCHAL_HAVE_MPU
- .data
+ __REFCONST
.align 4
.Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 9c12babc016c..7cbf68ca7106 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H
+#include <asm/core.h>
#include <asm/types.h>
#ifdef CONFIG_MMU
@@ -65,6 +66,34 @@
#endif
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 09c56cba442e..f4771c29c7e9 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE;
+#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET;
+#else
+ if (off < XCHAL_KSEG_SIZE)
+ return off + PHYS_OFFSET;
+
+ off -= XCHAL_KSEG_SIZE;
+ if (off >= XCHAL_KIO_SIZE)
+ off -= XCHAL_KIO_SIZE;
+
+ return off + XCHAL_KIO_PADDR;
+#endif
}
#define __pa(x) ___pa((unsigned long)(x))
#else
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 3f7fe5a8c286..27ac17c9da09 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir,address) ((pmd_t*)(dir))
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 7495520d7a3e..6fa903daf2a2 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
do { \
+ unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
@@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \
(regs)->windowbase = 0; \
(regs)->windowstart = 1; \
- (regs)->syscall = NO_SYSCALL; \
+ (regs)->syscall = syscall; \
} while (0)
/* Forward declaration */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 359ab40e935a..f9a671cbf933 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- regs->areg[0] = (long) error ? error : val;
+ regs->areg[2] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i];
}
-asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 3f80386f1883..47b7702aaa40 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -132,13 +132,13 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
- " _bbci.l %[addr], 0, 1f \n" \
+ " _bbci.l %[mem] * 0, 1f \n" \
" movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
- " _bbsi.l %[addr], 0, 0f \n" \
- " _bbci.l %[addr], 1, 1f \n" \
+ " _bbsi.l %[mem] * 0, 0f \n" \
+ " _bbci.l %[mem] * 0 + 1, 1f \n" \
"0: movi %[err], %[efault] \n" \
" _j 2f \n"
@@ -154,7 +154,7 @@ do { \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %[x], %[addr], 0 \n" \
+ "1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@@ -167,8 +167,8 @@ __asm__ __volatile__( \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
- :[err] "+r"(err_), [tmp] "=r"(cb) \
- :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
+ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
+ :[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
@@ -222,7 +222,7 @@ do { \
u32 __x = 0; \
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %[x], %[addr], 0 \n" \
+ "1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@@ -236,7 +236,7 @@ do { \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
- :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \
} while (0)
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
deleted file mode 100644
index 2c3ed23354a8..000000000000
--- a/arch/xtensa/include/asm/user.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-xtensa/user.h
- *
- * Xtensa Processor version.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_USER_H
-#define _XTENSA_USER_H
-
-/* This file usually defines a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on Xtensa.
- */
-
-#endif /* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 79fe3007919e..fd99b25037a7 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -21,50 +21,18 @@
#include <asm/core.h>
#include <asm/kmem_layout.h>
-#if XCHAL_HAVE_PTP_MMU
-#define XCHAL_KIO_CACHED_VADDR 0xe0000000
-#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
-#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#else
-#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
-#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
-#endif
-#define XCHAL_KIO_SIZE 0x10000000
-
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
-#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
-#ifndef __ASSEMBLY__
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
- return xtensa_kio_paddr;
-}
-#endif
-#else
-#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
-#endif
-
-#if defined(CONFIG_MMU)
-
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
-/* Image Virtual Start Address */
-#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
- CONFIG_KERNEL_LOAD_ADDRESS - \
+#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
+ XCHAL_KSEG_CACHED_VADDR - \
XCHAL_KSEG_PADDR)
+#endif
#else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
-#else /* !defined(CONFIG_MMU) */
- /* MMU Not being used - Virtual == Physical */
-
-/* Location of the start of the kernel text, _start */
-#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
-
-
-#endif /* CONFIG_MMU */
-
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
diff --git a/arch/xtensa/include/uapi/asm/ipcbuf.h b/arch/xtensa/include/uapi/asm/ipcbuf.h
index a57afa0b606f..3bd0642f6660 100644
--- a/arch/xtensa/include/uapi/asm/ipcbuf.h
+++ b/arch/xtensa/include/uapi/asm/ipcbuf.h
@@ -12,6 +12,8 @@
#ifndef _XTENSA_IPCBUF_H
#define _XTENSA_IPCBUF_H
+#include <linux/posix_types.h>
+
/*
* Pad space is left for:
* - 32-bit mode_t and seq
diff --git a/arch/xtensa/include/uapi/asm/msgbuf.h b/arch/xtensa/include/uapi/asm/msgbuf.h
index d6915e9f071c..1ed2c85b693a 100644
--- a/arch/xtensa/include/uapi/asm/msgbuf.h
+++ b/arch/xtensa/include/uapi/asm/msgbuf.h
@@ -17,6 +17,8 @@
#ifndef _XTENSA_MSGBUF_H
#define _XTENSA_MSGBUF_H
+#include <asm/ipcbuf.h>
+
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __XTENSA_EB__
diff --git a/arch/xtensa/include/uapi/asm/sembuf.h b/arch/xtensa/include/uapi/asm/sembuf.h
index 09f348d643f1..3b9cdd406dfe 100644
--- a/arch/xtensa/include/uapi/asm/sembuf.h
+++ b/arch/xtensa/include/uapi/asm/sembuf.h
@@ -22,6 +22,7 @@
#define _XTENSA_SEMBUF_H
#include <asm/byteorder.h>
+#include <asm/ipcbuf.h>
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */