aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig18
-rw-r--r--arch/xtensa/boot/lib/Makefile1
-rw-r--r--arch/xtensa/include/asm/barrier.h12
-rw-r--r--arch/xtensa/include/asm/bitops.h10
-rw-r--r--arch/xtensa/include/asm/coprocessor.h11
-rw-r--r--arch/xtensa/include/asm/processor.h7
-rw-r--r--arch/xtensa/include/asm/sections.h2
-rw-r--r--arch/xtensa/include/asm/thread_info.h11
-rw-r--r--arch/xtensa/include/asm/timex.h6
-rw-r--r--arch/xtensa/include/asm/traps.h40
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/asm-offsets.c19
-rw-r--r--arch/xtensa/kernel/coprocessor.S234
-rw-r--r--arch/xtensa/kernel/entry.S335
-rw-r--r--arch/xtensa/kernel/hibernate.c25
-rw-r--r--arch/xtensa/kernel/jump_label.c2
-rw-r--r--arch/xtensa/kernel/process.c112
-rw-r--r--arch/xtensa/kernel/ptrace.c3
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c7
-rw-r--r--arch/xtensa/kernel/signal.c3
-rw-r--r--arch/xtensa/kernel/smp.c7
-rw-r--r--arch/xtensa/kernel/traps.c143
-rw-r--r--arch/xtensa/lib/Makefile2
-rw-r--r--arch/xtensa/lib/kcsan-stubs.c54
-rw-r--r--arch/xtensa/lib/memcopy.S20
-rw-r--r--arch/xtensa/mm/Makefile3
-rw-r--r--arch/xtensa/mm/fault.c112
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/platforms/iss/console.c8
-rw-r--r--arch/xtensa/platforms/iss/network.c150
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c18
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c2
32 files changed, 884 insertions, 496 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index bd113bc6e192..0b0f0172cced 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -4,6 +4,7 @@ config XTENSA
select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT if MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
@@ -29,8 +30,10 @@ config XTENSA
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
+ select HAVE_ARCH_KCSAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
@@ -42,6 +45,7 @@ config XTENSA
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC
@@ -79,6 +83,7 @@ config STACKTRACE_SUPPORT
config MMU
def_bool n
+ select PFAULT
config HAVE_XTENSA_GPIO32
def_bool n
@@ -178,6 +183,16 @@ config XTENSA_FAKE_NMI
If unsure, say N.
+config PFAULT
+ bool "Handle protection faults" if EXPERT && !MMU
+ default y
+ help
+ Handle protection faults. MMU configurations must enable it.
+ noMMU configurations may disable it if used memory map never
+ generates protection faults or faults are always fatal.
+
+ If unsure, say Y.
+
config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in user space"
help
@@ -773,6 +788,9 @@ endmenu
menu "Power management options"
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y
+
source "kernel/power/Kconfig"
endmenu
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index e3d717c7bfa1..162d10af36f3 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg
endif
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index d6f8d4ddc2bc..898ea397e9bc 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -11,9 +11,15 @@
#include <asm/core.h>
-#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
-#define rmb() barrier()
-#define wmb() mb()
+#define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define __rmb() barrier()
+#define __wmb() __mb()
+
+#ifdef CONFIG_SMP
+#define __smp_mb() __mb()
+#define __smp_rmb() __rmb()
+#define __smp_wmb() __wmb()
+#endif
#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier()
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index cd225896c40f..e02ec5833389 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
#define BIT_OP(op, insn, inv) \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
@@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
#elif XCHAL_HAVE_S32C1I
#define BIT_OP(op, insn, inv) \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -205,6 +205,8 @@ BIT_OPS(change, "xor", )
#undef BIT_OP
#undef TEST_AND_BIT_OP
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 0fbe2a740b8d..3b1a0d5d2169 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -142,11 +142,12 @@ typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
-extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
-extern void coprocessor_flush(struct thread_info*, int);
-
-extern void coprocessor_release_all(struct thread_info*);
-extern void coprocessor_flush_all(struct thread_info*);
+struct thread_info;
+void coprocessor_flush(struct thread_info *ti, int cp_index);
+void coprocessor_release_all(struct thread_info *ti);
+void coprocessor_flush_all(struct thread_info *ti);
+void coprocessor_flush_release_all(struct thread_info *ti);
+void local_coprocessors_flush_release_all(void);
#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 4489a27d527a..76bc63127c66 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -246,6 +246,13 @@ extern unsigned long __get_wchan(struct task_struct *p);
v; \
})
+#define xtensa_xsr(x, sr) \
+ ({ \
+ unsigned int __v__ = (unsigned int)(x); \
+ __asm__ __volatile__ ("xsr %0, " __stringify(sr) : "+a"(__v__)); \
+ __v__; \
+ })
+
#if XCHAL_HAVE_EXTERN_REGS
static inline void set_er(unsigned long value, unsigned long addr)
diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h
index a8c42d08e281..3bc6b9afa993 100644
--- a/arch/xtensa/include/asm/sections.h
+++ b/arch/xtensa/include/asm/sections.h
@@ -29,7 +29,7 @@ extern char _Level5InterruptVector_text_end[];
extern char _Level6InterruptVector_text_start[];
extern char _Level6InterruptVector_text_end[];
#endif
-#ifdef CONFIG_SMP
+#ifdef CONFIG_SECONDARY_RESET_VECTOR
extern char _SecondaryResetVector_text_start[];
extern char _SecondaryResetVector_text_end[];
#endif
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index f6fcbba1d02f..326db1c1d5d8 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -52,12 +52,21 @@ struct thread_info {
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
- unsigned long cpenable;
#if XCHAL_HAVE_EXCLUSIVE
/* result of the most recent exclusive store */
unsigned long atomctl8;
#endif
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /* Address where PS.WOE was enabled by the ABI probing code */
+ unsigned long ps_woe_fix_addr;
+#endif
+ /*
+ * If i-th bit is set then coprocessor state is loaded into the
+ * coprocessor i on CPU cp_owner_cpu.
+ */
+ unsigned long cpenable;
+ u32 cp_owner_cpu;
/* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp;
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 233ec75e60c6..3f2462f2d027 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -29,10 +29,6 @@
extern unsigned long ccount_freq;
-typedef unsigned long long cycles_t;
-
-#define get_cycles() (0)
-
void local_timer_setup(unsigned cpu);
/*
@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
}
+#include <asm-generic/timex.h>
+
#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 6fa47cd8e02d..6f74ccc0c7ea 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -12,6 +12,8 @@
#include <asm/ptrace.h>
+typedef void xtensa_exception_handler(struct pt_regs *regs);
+
/*
* Per-CPU exception handling data structure.
* EXCSAVE1 points to it.
@@ -25,31 +27,47 @@ struct exc_table {
void *fixup;
/* For passing a parameter to fixup */
void *fixup_param;
+#if XTENSA_HAVE_COPROCESSORS
+ /* Pointers to owner struct thread_info */
+ struct thread_info *coprocessor_owner[XCHAL_CP_MAX];
+#endif
/* Fast user exception handlers */
void *fast_user_handler[EXCCAUSE_N];
/* Fast kernel exception handlers */
void *fast_kernel_handler[EXCCAUSE_N];
/* Default C-Handlers */
- void *default_handler[EXCCAUSE_N];
+ xtensa_exception_handler *default_handler[EXCCAUSE_N];
};
-/*
- * handler must be either of the following:
- * void (*)(struct pt_regs *regs);
- * void (*)(struct pt_regs *regs, unsigned long exccause);
- */
-extern void * __init trap_set_handler(int cause, void *handler);
-extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
-void fast_second_level_miss(void);
+DECLARE_PER_CPU(struct exc_table, exc_table);
+
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler);
+
+asmlinkage void fast_illegal_instruction_user(void);
+asmlinkage void fast_syscall_user(void);
+asmlinkage void fast_alloca(void);
+asmlinkage void fast_unaligned(void);
+asmlinkage void fast_second_level_miss(void);
+asmlinkage void fast_store_prohibited(void);
+asmlinkage void fast_coprocessor(void);
+
+asmlinkage void kernel_exception(void);
+asmlinkage void user_exception(void);
+asmlinkage void system_call(struct pt_regs *regs);
+
+void do_IRQ(int hwirq, struct pt_regs *regs);
+void do_page_fault(struct pt_regs *regs);
+void do_unhandled(struct pt_regs *regs);
/* Initialize minimal exc_table structure sufficient for basic paging */
static inline void __init early_trap_init(void)
{
- static struct exc_table exc_table __initdata = {
+ static struct exc_table init_exc_table __initdata = {
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
};
- __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+ xtensa_set_sr(&init_exc_table, excsave1);
}
void secondary_trap_init(void);
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 5fd6cd15e0fb..897c1c741058 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 37278e2785fb..da38de20ae59 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -21,6 +21,7 @@
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
+#include <linux/suspend.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
@@ -87,14 +88,19 @@ int main(void)
OFFSET(TI_STSTUS, thread_info, status);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr);
+#endif
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
- DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XCHAL_HAVE_EXCLUSIVE
DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
#endif
+ DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable));
+ DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu));
#if XTENSA_HAVE_COPROCESSORS
DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
@@ -137,11 +143,22 @@ int main(void)
DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(EXC_TABLE_COPROCESSOR_OWNER,
+ offsetof(struct exc_table, coprocessor_owner));
+#endif
DEFINE(EXC_TABLE_FAST_USER,
offsetof(struct exc_table, fast_user_handler));
DEFINE(EXC_TABLE_FAST_KERNEL,
offsetof(struct exc_table, fast_kernel_handler));
DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+#ifdef CONFIG_HIBERNATION
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+#endif
+
return 0;
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 45cc0ae0af6f..ef33e76e07d8 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -19,6 +19,26 @@
#include <asm/current.h>
#include <asm/regs.h>
+/*
+ * Rules for coprocessor state manipulation on SMP:
+ *
+ * - a task may have live coprocessors only on one CPU.
+ *
+ * - whether coprocessor context of task T is live on some CPU is
+ * denoted by T's thread_info->cpenable.
+ *
+ * - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu
+ * is valid in the T's thread_info. Zero thread_info->cpenable means that
+ * coprocessor context is valid in the T's thread_info.
+ *
+ * - if a coprocessor context of task T is live on CPU X, only CPU X changes
+ * T's thread_info->cpenable, cp_owner_cpu and coprocessor save area.
+ * This is done by making sure that for the task T with live coprocessor
+ * on CPU X cpenable SR is 0 when T runs on any other CPU Y.
+ * When fast_coprocessor exception is taken on CPU Y it goes to the
+ * C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors.
+ */
+
#if XTENSA_HAVE_COPROCESSORS
/*
@@ -29,35 +49,31 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
- xchal_cp##x##_store a2 a4 a5 a6 a7; \
- jx a0; \
+ xchal_cp##x##_store a2 a3 a4 a5 a6; \
+ ret; \
.endif
-#define SAVE_CP_REGS_TAB(x) \
- .if XTENSA_HAVE_COPROCESSOR(x); \
- .long .Lsave_cp_regs_cp##x; \
- .else; \
- .long 0; \
- .endif; \
- .long THREAD_XTREGS_CP##x
-
-
#define LOAD_CP_REGS(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
- xchal_cp##x##_load a2 a4 a5 a6 a7; \
- jx a0; \
+ xchal_cp##x##_load a2 a3 a4 a5 a6; \
+ ret; \
.endif
-#define LOAD_CP_REGS_TAB(x) \
+#define CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x; \
.long .Lload_cp_regs_cp##x; \
.else; \
- .long 0; \
+ .long 0, 0; \
.endif; \
.long THREAD_XTREGS_CP##x
+#define CP_REGS_TAB_SAVE 0
+#define CP_REGS_TAB_LOAD 4
+#define CP_REGS_TAB_OFFSET 8
+
__XTENSA_HANDLER
SAVE_CP_REGS(0)
@@ -79,25 +95,15 @@
LOAD_CP_REGS(7)
.align 4
-.Lsave_cp_regs_jump_table:
- SAVE_CP_REGS_TAB(0)
- SAVE_CP_REGS_TAB(1)
- SAVE_CP_REGS_TAB(2)
- SAVE_CP_REGS_TAB(3)
- SAVE_CP_REGS_TAB(4)
- SAVE_CP_REGS_TAB(5)
- SAVE_CP_REGS_TAB(6)
- SAVE_CP_REGS_TAB(7)
-
-.Lload_cp_regs_jump_table:
- LOAD_CP_REGS_TAB(0)
- LOAD_CP_REGS_TAB(1)
- LOAD_CP_REGS_TAB(2)
- LOAD_CP_REGS_TAB(3)
- LOAD_CP_REGS_TAB(4)
- LOAD_CP_REGS_TAB(5)
- LOAD_CP_REGS_TAB(6)
- LOAD_CP_REGS_TAB(7)
+.Lcp_regs_jump_table:
+ CP_REGS_TAB(0)
+ CP_REGS_TAB(1)
+ CP_REGS_TAB(2)
+ CP_REGS_TAB(3)
+ CP_REGS_TAB(4)
+ CP_REGS_TAB(5)
+ CP_REGS_TAB(6)
+ CP_REGS_TAB(7)
/*
* Entry condition:
@@ -115,9 +121,37 @@
ENTRY(fast_coprocessor)
+ s32i a3, a2, PT_AREG3
+
+#ifdef CONFIG_SMP
+ /*
+ * Check if any coprocessor context is live on another CPU
+ * and if so go through the C-level coprocessor exception handler
+ * to flush it to memory.
+ */
+ GET_THREAD_INFO (a0, a2)
+ l32i a3, a0, THREAD_CPENABLE
+ beqz a3, .Lload_local
+
+ /*
+ * Pairs with smp_wmb in local_coprocessor_release_all
+ * and with both memws below.
+ */
+ memw
+ l32i a3, a0, THREAD_CPU
+ l32i a0, a0, THREAD_CP_OWNER_CPU
+ beq a0, a3, .Lload_local
+
+ rsr a0, ps
+ l32i a3, a2, PT_AREG3
+ bbci.l a0, PS_UM_BIT, 1f
+ call0 user_exception
+1: call0 kernel_exception
+#endif
+
/* Save remaining registers a1-a3 and SAR */
- s32i a3, a2, PT_AREG3
+.Lload_local:
rsr a3, sar
s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR
@@ -125,13 +159,15 @@ ENTRY(fast_coprocessor)
rsr a2, depc
s32i a2, a1, PT_AREG2
- /*
- * The hal macros require up to 4 temporary registers. We use a3..a6.
- */
+ /* The hal macros require up to 4 temporary registers. We use a3..a6. */
s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
@@ -148,58 +184,74 @@ ENTRY(fast_coprocessor)
wsr a0, cpenable
rsync
- /* Retrieve previous owner. (a3 still holds CP number) */
+ /* Get coprocessor save/load table entry (a7). */
- movi a0, coprocessor_owner # list of owners
- addx4 a0, a3, a0 # entry for CP
- l32i a4, a0, 0
+ movi a7, .Lcp_regs_jump_table
+ addx8 a7, a3, a7
+ addx4 a7, a3, a7
- beqz a4, 1f # skip 'save' if no previous owner
+ /* Retrieve previous owner (a8). */
- /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+ rsr a0, excsave1 # exc_table
+ addx4 a0, a3, a0 # entry for CP
+ l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER
+
+ /* Set new owner (a9). */
- l32i a5, a4, THREAD_CPENABLE
- xor a5, a5, a2 # (1 << cp-id) still in a2
- s32i a5, a4, THREAD_CPENABLE
+ GET_THREAD_INFO (a9, a1)
+ l32i a4, a9, THREAD_CPU
+ s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER
+ s32i a4, a9, THREAD_CP_OWNER_CPU
/*
- * Get context save area and 'call' save routine.
- * (a4 still holds previous owner (thread_info), a3 CP number)
+ * Enable coprocessor for the new owner. (a2 = 1 << CP number)
+ * This can be done before loading context into the coprocessor.
*/
+ l32i a4, a9, THREAD_CPENABLE
+ or a4, a4, a2
- movi a5, .Lsave_cp_regs_jump_table
- movi a0, 2f # a0: 'return' address
- addx8 a3, a3, a5 # a3: coprocessor number
- l32i a2, a3, 4 # a2: xtregs offset
- l32i a3, a3, 0 # a3: jump address
- add a2, a2, a4
- jx a3
+ /*
+ * Make sure THREAD_CP_OWNER_CPU is in memory before updating
+ * THREAD_CPENABLE
+ */
+ memw # (2)
+ s32i a4, a9, THREAD_CPENABLE
- /* Note that only a0 and a1 were preserved. */
+ beqz a8, 1f # skip 'save' if no previous owner
-2: rsr a3, exccause
- addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
- movi a0, coprocessor_owner
- addx4 a0, a3, a0
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
- /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+ l32i a10, a8, THREAD_CPENABLE
+ xor a10, a10, a2
-1: GET_THREAD_INFO (a4, a1)
- s32i a4, a0, 0
+ /* Get context save area and call save routine. */
- /* Get context save area and 'call' load routine. */
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_SAVE
+ add a2, a2, a8
+ callx0 a3
- movi a5, .Lload_cp_regs_jump_table
- movi a0, 1f
- addx8 a3, a3, a5
- l32i a2, a3, 4 # a2: xtregs offset
- l32i a3, a3, 0 # a3: jump address
- add a2, a2, a4
- jx a3
+ /*
+ * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory
+ * before updating THREAD_CPENABLE
+ */
+ memw # (3)
+ s32i a10, a8, THREAD_CPENABLE
+1:
+ /* Get context save area and call load routine. */
+
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_LOAD
+ add a2, a2, a9
+ callx0 a3
/* Restore all registers and return from exception handler. */
-1: l32i a6, a1, PT_AREG6
+ l32i a10, a1, PT_AREG10
+ l32i a9, a1, PT_AREG9
+ l32i a8, a1, PT_AREG8
+ l32i a7, a1, PT_AREG7
+ l32i a6, a1, PT_AREG6
l32i a5, a1, PT_AREG5
l32i a4, a1, PT_AREG4
@@ -230,29 +282,21 @@ ENDPROC(fast_coprocessor)
ENTRY(coprocessor_flush)
- /* reserve 4 bytes on stack to save a0 */
- abi_entry(4)
-
- s32i a0, a1, 0
- movi a0, .Lsave_cp_regs_jump_table
- addx8 a3, a3, a0
- l32i a4, a3, 4
- l32i a3, a3, 0
- add a2, a2, a4
- beqz a3, 1f
- callx0 a3
-1: l32i a0, a1, 0
-
- abi_ret(4)
+ abi_entry_default
+
+ movi a4, .Lcp_regs_jump_table
+ addx8 a4, a3, a4
+ addx4 a3, a3, a4
+ l32i a4, a3, CP_REGS_TAB_SAVE
+ beqz a4, 1f
+ l32i a3, a3, CP_REGS_TAB_OFFSET
+ add a2, a2, a3
+ mov a7, a0
+ callx0 a4
+ mov a0, a7
+1:
+ abi_ret_default
ENDPROC(coprocessor_flush)
- .data
-
-ENTRY(coprocessor_owner)
-
- .fill XCHAL_CP_MAX, 4, 0
-
-END(coprocessor_owner)
-
#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 6b6eff658795..e3eae648ba2e 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -28,15 +28,6 @@
#include <asm/tlbflush.h>
#include <variant/tie-asm.h>
-/* Unimplemented features. */
-
-#undef KERNEL_STACK_OVERFLOW_CHECK
-
-/* Not well tested.
- *
- * - fast_coprocessor
- */
-
/*
* Macro to find first bit set in WINDOWBASE from the left + 1
*
@@ -178,28 +169,26 @@ _user_exception:
/* Save only live registers. */
-UABI_W _bbsi.l a2, 1, 1f
+UABI_W _bbsi.l a2, 1, .Lsave_window_registers
s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7
-UABI_W _bbsi.l a2, 2, 1f
+UABI_W _bbsi.l a2, 2, .Lsave_window_registers
s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10
s32i a11, a1, PT_AREG11
-UABI_W _bbsi.l a2, 3, 1f
+UABI_W _bbsi.l a2, 3, .Lsave_window_registers
s32i a12, a1, PT_AREG12
s32i a13, a1, PT_AREG13
s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15
#if defined(USER_SUPPORT_WINDOWED)
- _bnei a2, 1, 1f # only one valid frame?
+ /* If only one valid frame skip saving regs. */
- /* Only one valid frame, skip saving regs. */
-
- j 2f
+ beqi a2, 1, common_exception
/* Save the remaining registers.
* We have to save all registers up to the first '1' from
@@ -208,8 +197,8 @@ UABI_W _bbsi.l a2, 3, 1f
* All register frames starting from the top field to the marked '1'
* must be saved.
*/
-
-1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
+.Lsave_window_registers:
+ addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
and a3, a3, a2 # max. only one bit is set
@@ -250,7 +239,7 @@ UABI_W _bbsi.l a2, 3, 1f
/* We are back to the original stack pointer (a1) */
#endif
-2: /* Now, jump to the common exception handler. */
+ /* Now, jump to the common exception handler. */
j common_exception
@@ -350,15 +339,6 @@ KABI_W _bbsi.l a2, 3, 1f
l32i a0, a1, PT_AREG0 # restore saved a0
wsr a0, depc
-#ifdef KERNEL_STACK_OVERFLOW_CHECK
-
- /* Stack overflow check, for debugging */
- extui a2, a1, TASK_SIZE_BITS,XX
- movi a3, SIZE??
- _bge a2, a3, out_of_stack_panic
-
-#endif
-
/*
* This is the common exception handler.
* We get here from the user exception handler or simply by falling through
@@ -442,7 +422,6 @@ KABI_W or a3, a3, a0
moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
KABI_W movi a2, PS_WOE_MASK
KABI_W or a3, a3, a2
- rsr a2, exccause
#endif
/* restore return address (or 0 if return to userspace) */
@@ -469,42 +448,56 @@ KABI_W or a3, a3, a2
save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+#ifdef CONFIG_TRACE_IRQFLAGS
+ rsr abi_tmp0, ps
+ extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beqz abi_tmp0, 1f
+ abi_call trace_hardirqs_off
+1:
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ l32i abi_tmp0, a1, PT_PS
+ bbci.l abi_tmp0, PS_UM_BIT, 1f
+ abi_call context_tracking_user_exit
+1:
+#endif
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
- rsr a4, excsave1
- addx4 a4, a2, a4
- l32i a4, a4, EXC_TABLE_DEFAULT # load handler
- mov abi_arg1, a2 # pass EXCCAUSE
- mov abi_arg0, a1 # pass stack frame
+ l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE
+ rsr abi_tmp0, excsave1
+ addx4 abi_tmp0, abi_arg1, abi_tmp0
+ l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler
+ mov abi_arg0, a1 # pass stack frame
/* Call the second-level handler */
- abi_callx a4
+ abi_callx abi_tmp0
/* Jump here for exception exit */
.global common_exception_return
common_exception_return:
#if XTENSA_FAKE_NMI
- l32i abi_tmp0, a1, PT_EXCCAUSE
- movi abi_tmp1, EXCCAUSE_MAPPED_NMI
- l32i abi_saved1, a1, PT_PS
- beq abi_tmp0, abi_tmp1, .Lrestore_state
+ l32i abi_tmp0, a1, PT_EXCCAUSE
+ movi abi_tmp1, EXCCAUSE_MAPPED_NMI
+ l32i abi_saved1, a1, PT_PS
+ beq abi_tmp0, abi_tmp1, .Lrestore_state
#endif
.Ltif_loop:
- irq_save a2, a3
+ irq_save abi_tmp0, abi_tmp1
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_off
#endif
/* Jump if we are returning from kernel exceptions. */
- l32i abi_saved1, a1, PT_PS
- GET_THREAD_INFO(a2, a1)
- l32i a4, a2, TI_FLAGS
- _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
+ l32i abi_saved1, a1, PT_PS
+ GET_THREAD_INFO(abi_tmp0, a1)
+ l32i abi_saved0, abi_tmp0, TI_FLAGS
+ _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
@@ -513,75 +506,80 @@ common_exception_return:
* Note that we don't disable interrupts here.
*/
- _bbsi.l a4, TIF_NEED_RESCHED, .Lresched
- movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
- bnone a4, a2, .Lexit_tif_loop_user
+ _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched
+ movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
+ bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user
- l32i a4, a1, PT_DEPC
- bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
/* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_on
#endif
- rsil a2, 0
- mov abi_arg0, a1
+ rsil abi_tmp0, 0
+ mov abi_arg0, a1
abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
- j .Ltif_loop
+ j .Ltif_loop
.Lresched:
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_on
#endif
- rsil a2, 0
+ rsil abi_tmp0, 0
abi_call schedule # void schedule (void)
- j .Ltif_loop
+ j .Ltif_loop
.Lexit_tif_loop_kernel:
#ifdef CONFIG_PREEMPTION
- _bbci.l a4, TIF_NEED_RESCHED, .Lrestore_state
+ _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state
/* Check current_thread_info->preempt_count */
- l32i a4, a2, TI_PRE_COUNT
- bnez a4, .Lrestore_state
+ l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT
+ bnez abi_tmp1, .Lrestore_state
abi_call preempt_schedule_irq
#endif
- j .Lrestore_state
+ j .Lrestore_state
.Lexit_tif_loop_user:
+#ifdef CONFIG_CONTEXT_TRACKING
+ abi_call context_tracking_user_enter
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- _bbci.l a4, TIF_DB_DISABLED, 1f
+ _bbci.l abi_saved0, TIF_DB_DISABLED, 1f
abi_call restore_dbreak
1:
#endif
#ifdef CONFIG_DEBUG_TLB_SANITY
- l32i a4, a1, PT_DEPC
- bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
abi_call check_tlb_sanity
#endif
.Lrestore_state:
#ifdef CONFIG_TRACE_IRQFLAGS
- extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- bgei a4, LOCKLEVEL, 1f
+ extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei abi_tmp0, LOCKLEVEL, 1f
abi_call trace_hardirqs_on
1:
#endif
- /* Restore optional registers. */
+ /*
+ * Restore optional registers.
+ * abi_arg* are used as temporary registers here.
+ */
- load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+ load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT
/* Restore SCOMPARE1 */
#if XCHAL_HAVE_S32C1I
- l32i a2, a1, PT_SCOMPARE1
- wsr a2, scompare1
+ l32i abi_tmp0, a1, PT_SCOMPARE1
+ wsr abi_tmp0, scompare1
#endif
- wsr abi_saved1, ps /* disable interrupts */
-
- _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
+ wsr abi_saved1, ps /* disable interrupts */
+ _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
user_exception_exit:
@@ -795,7 +793,7 @@ ENDPROC(kernel_exception)
ENTRY(debug_exception)
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
- bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
+ bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode
/* Set EPC1 and EXCCAUSE */
@@ -814,10 +812,10 @@ ENTRY(debug_exception)
/* Switch to kernel/user stack, restore jump vector, and save a0 */
- bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
-
+ bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode
addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack
-3:
+
+.Ldebug_exception_continue:
l32i a0, a3, DT_DEBUG_SAVE
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG0
@@ -845,10 +843,12 @@ ENTRY(debug_exception)
bbsi.l a2, PS_UM_BIT, _user_exception
j _kernel_exception
-2: rsr a2, excsave1
+.Ldebug_exception_user:
+ rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
- j 3b
+ j .Ldebug_exception_continue
+.Ldebug_exception_in_exception:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* Debug exception while in exception mode. This may happen when
* window overflow/underflow handler or fast exception handler hits
@@ -856,8 +856,8 @@ ENTRY(debug_exception)
* breakpoints, single-step faulting instruction and restore data
* breakpoints.
*/
-1:
- bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode
+
+ bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode
rsr a0, debugcause
bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
@@ -901,7 +901,7 @@ ENTRY(debug_exception)
rfi XCHAL_DEBUGLEVEL
#else
/* Debug exception while in exception mode. Should not happen. */
-1: j 1b // FIXME!!
+ j .Ldebug_exception_in_exception // FIXME!!
#endif
ENDPROC(debug_exception)
@@ -1056,6 +1056,11 @@ ENTRY(fast_illegal_instruction_user)
movi a3, PS_WOE_MASK
or a0, a0, a3
wsr a0, ps
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ GET_THREAD_INFO(a3, a2)
+ rsr a0, epc1
+ s32i a0, a3, TI_PS_WOE_FIX_ADDR
+#endif
l32i a3, a2, PT_AREG3
l32i a0, a2, PT_AREG0
rsr a2, depc
@@ -1630,12 +1635,13 @@ ENTRY(fast_second_level_miss)
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, 9f
+ beqz a0, .Lfast_second_level_miss_no_mm
-8: rsr a3, excvaddr # fault address
+.Lfast_second_level_miss_continue:
+ rsr a3, excvaddr # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
- beqz a0, 2f
+ beqz a0, .Lfast_second_level_miss_no_pmd
/* Read ptevaddr and convert to top of page-table page.
*
@@ -1678,12 +1684,13 @@ ENTRY(fast_second_level_miss)
addi a3, a3, DTLB_WAY_PGD
add a1, a1, a3 # ... + way_number
-3: wdtlb a0, a1
+.Lfast_second_level_miss_wdtlb:
+ wdtlb a0, a1
dsync
/* Exit critical section. */
-
-4: rsr a3, excsave1
+.Lfast_second_level_miss_skip_wdtlb:
+ rsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
@@ -1707,19 +1714,21 @@ ENTRY(fast_second_level_miss)
esync
rfde
-9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- bnez a0, 8b
+.Lfast_second_level_miss_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ bnez a0, .Lfast_second_level_miss_continue
/* Even more unlikely case active_mm == 0.
* We can get here with NMI in the middle of context_switch that
* touches vmalloc area.
*/
movi a0, init_mm
- j 8b
+ j .Lfast_second_level_miss_continue
+.Lfast_second_level_miss_no_pmd:
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
-2: /* Special case for cache aliasing.
+ /* Special case for cache aliasing.
* We (should) only get here if a clear_user_page, copy_user_page
* or the aliased cache flush functions got preemptively interrupted
* by another task. Re-establish temporary mapping to the
@@ -1729,24 +1738,24 @@ ENTRY(fast_second_level_miss)
/* We shouldn't be in a double exception */
l32i a0, a2, PT_DEPC
- bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
rsr a3, epc1
- bltu a3, a0, 2f
+ bltu a3, a0, .Lfast_second_level_miss_slow
movi a0, __tlbtemp_mapping_end
- bgeu a3, a0, 2f
+ bgeu a3, a0, .Lfast_second_level_miss_slow
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
rsr a0, excvaddr
- bltu a0, a3, 2f
+ bltu a0, a3, .Lfast_second_level_miss_slow
addi a1, a0, -TLBTEMP_SIZE
- bgeu a1, a3, 2f
+ bgeu a1, a3, .Lfast_second_level_miss_slow
/* Check if we have to restore an ITLB mapping. */
@@ -1772,19 +1781,19 @@ ENTRY(fast_second_level_miss)
mov a0, a6
movnez a0, a7, a3
- j 3b
+ j .Lfast_second_level_miss_wdtlb
/* ITLB entry. We only use dst in a6. */
1: witlb a6, a1
isync
- j 4b
+ j .Lfast_second_level_miss_skip_wdtlb
#endif // DCACHE_WAY_SIZE > PAGE_SIZE
-
-2: /* Invalid PGD, default exception handling */
+ /* Invalid PGD, default exception handling */
+.Lfast_second_level_miss_slow:
rsr a1, depc
s32i a1, a2, PT_AREG2
@@ -1824,12 +1833,13 @@ ENTRY(fast_store_prohibited)
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, 9f
+ beqz a0, .Lfast_store_no_mm
-8: rsr a1, excvaddr # fault address
+.Lfast_store_continue:
+ rsr a1, excvaddr # fault address
_PGD_OFFSET(a0, a1, a3)
l32i a0, a0, 0
- beqz a0, 2f
+ beqz a0, .Lfast_store_slow
/*
* Note that we test _PAGE_WRITABLE_BIT only if PTE is present
@@ -1839,8 +1849,8 @@ ENTRY(fast_store_prohibited)
_PTE_OFFSET(a0, a1, a3)
l32i a3, a0, 0 # read pteval
movi a1, _PAGE_CA_INVALID
- ball a3, a1, 2f
- bbci.l a3, _PAGE_WRITABLE_BIT, 2f
+ ball a3, a1, .Lfast_store_slow
+ bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a3, a3, a1
@@ -1868,7 +1878,6 @@ ENTRY(fast_store_prohibited)
l32i a2, a2, PT_DEPC
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-
rsr a2, depc
rfe
@@ -1878,11 +1887,17 @@ ENTRY(fast_store_prohibited)
esync
rfde
-9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- j 8b
-
-2: /* If there was a problem, handle fault in C */
+.Lfast_store_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j .Lfast_store_continue
+ /* If there was a problem, handle fault in C */
+.Lfast_store_slow:
+ rsr a1, excvaddr
+ pdtlb a0, a1
+ bbci.l a0, DTLB_HIT_BIT, 1f
+ idtlb a0
+1:
rsr a3, depc # still holds a2
s32i a3, a2, PT_AREG2
mov a1, a2
@@ -2071,8 +2086,16 @@ ENTRY(_switch_to)
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE
- xsr a3, cpenable
- s32i a3, a4, THREAD_CPENABLE
+#ifdef CONFIG_SMP
+ beqz a3, 1f
+ memw # pairs with memw (2) in fast_coprocessor
+ l32i a6, a5, THREAD_CP_OWNER_CPU
+ l32i a7, a5, THREAD_CPU
+ beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner
+ movi a3, 0
+1:
+#endif
+ wsr a3, cpenable
#endif
#if XCHAL_HAVE_EXCLUSIVE
@@ -2147,3 +2170,95 @@ ENTRY(ret_from_kernel_thread)
j common_exception_return
ENDPROC(ret_from_kernel_thread)
+
+#ifdef CONFIG_HIBERNATION
+
+ .bss
+ .align 4
+.Lsaved_regs:
+#if defined(__XTENSA_WINDOWED_ABI__)
+ .fill 2, 4
+#elif defined(__XTENSA_CALL0_ABI__)
+ .fill 6, 4
+#else
+#error Unsupported Xtensa ABI
+#endif
+ .align XCHAL_NCP_SA_ALIGN
+.Lsaved_user_regs:
+ .fill XTREGS_USER_SIZE, 1
+
+ .previous
+
+ENTRY(swsusp_arch_suspend)
+
+ abi_entry_default
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ s32i a0, a2, 0
+ s32i a1, a2, 4
+ save_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#elif defined(__XTENSA_CALL0_ABI__)
+ s32i a12, a2, 8
+ s32i a13, a2, 12
+ s32i a14, a2, 16
+ s32i a15, a2, 20
+#else
+#error Unsupported Xtensa ABI
+#endif
+ abi_call swsusp_save
+ mov a2, abi_rv
+ abi_ret_default
+
+ENDPROC(swsusp_arch_suspend)
+
+ENTRY(swsusp_arch_resume)
+
+ abi_entry_default
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#endif
+
+ movi a2, restore_pblist
+ l32i a2, a2, 0
+
+.Lcopy_pbe:
+ l32i a3, a2, PBE_ADDRESS
+ l32i a4, a2, PBE_ORIG_ADDRESS
+
+ __loopi a3, a9, PAGE_SIZE, 16
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+ l32i a7, a3, 8
+ l32i a8, a3, 12
+ addi a3, a3, 16
+ s32i a5, a4, 0
+ s32i a6, a4, 4
+ s32i a7, a4, 8
+ s32i a8, a4, 12
+ addi a4, a4, 16
+ __endl a3, a9
+
+ l32i a2, a2, PBE_NEXT
+ bnez a2, .Lcopy_pbe
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ l32i a0, a2, 0
+ l32i a1, a2, 4
+ load_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_CALL0_ABI__)
+ l32i a12, a2, 8
+ l32i a13, a2, 12
+ l32i a14, a2, 16
+ l32i a15, a2, 20
+#endif
+ movi a2, 0
+ abi_ret_default
+
+ENDPROC(swsusp_arch_resume)
+
+#endif
diff --git a/arch/xtensa/kernel/hibernate.c b/arch/xtensa/kernel/hibernate.c
new file mode 100644
index 000000000000..06984327d6e2
--- /dev/null
+++ b/arch/xtensa/kernel/hibernate.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <asm/coprocessor.h>
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+#if XTENSA_HAVE_COPROCESSORS
+ local_coprocessors_flush_release_all();
+#endif
+}
+
+void notrace restore_processor_state(void)
+{
+}
diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
index 0dde21e0d3de..ad1841cecdfb 100644
--- a/arch/xtensa/kernel/jump_label.c
+++ b/arch/xtensa/kernel/jump_label.c
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index c3751cc88e5d..68e0e2f06d66 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -47,6 +47,7 @@
#include <asm/asm-offsets.h>
#include <asm/regs.h>
#include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
@@ -63,52 +64,114 @@ EXPORT_SYMBOL(__stack_chk_guard);
#if XTENSA_HAVE_COPROCESSORS
-void coprocessor_release_all(struct thread_info *ti)
+void local_coprocessors_flush_release_all(void)
{
- unsigned long cpenable;
- int i;
+ struct thread_info **coprocessor_owner;
+ struct thread_info *unique_owner[XCHAL_CP_MAX];
+ int n = 0;
+ int i, j;
- /* Make sure we don't switch tasks during this operation. */
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ xtensa_set_sr(XCHAL_CP_MASK, cpenable);
- preempt_disable();
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ struct thread_info *ti = coprocessor_owner[i];
- /* Walk through all cp owners and release it for the requested one. */
+ if (ti) {
+ coprocessor_flush(ti, i);
- cpenable = ti->cpenable;
+ for (j = 0; j < n; j++)
+ if (unique_owner[j] == ti)
+ break;
+ if (j == n)
+ unique_owner[n++] = ti;
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (coprocessor_owner[i] == ti) {
- coprocessor_owner[i] = 0;
- cpenable &= ~(1 << i);
+ coprocessor_owner[i] = NULL;
}
}
+ for (i = 0; i < n; i++) {
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ unique_owner[i]->cpenable = 0;
+ }
+ xtensa_set_sr(0, cpenable);
+}
- ti->cpenable = cpenable;
+static void local_coprocessor_release_all(void *info)
+{
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ int i;
+
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti)
+ coprocessor_owner[i] = NULL;
+ }
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ ti->cpenable = 0;
if (ti == current_thread_info())
xtensa_set_sr(0, cpenable);
+}
- preempt_enable();
+void coprocessor_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_release_all,
+ ti, true);
+ }
}
-void coprocessor_flush_all(struct thread_info *ti)
+static void local_coprocessor_flush_all(void *info)
{
- unsigned long cpenable, old_cpenable;
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ unsigned long old_cpenable;
int i;
- preempt_disable();
-
- old_cpenable = xtensa_get_sr(cpenable);
- cpenable = ti->cpenable;
- xtensa_set_sr(cpenable, cpenable);
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) {
- if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ if (coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
- cpenable >>= 1;
}
xtensa_set_sr(old_cpenable, cpenable);
+}
- preempt_enable();
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_all,
+ ti, true);
+ }
+}
+
+static void local_coprocessor_flush_release_all(void *info)
+{
+ local_coprocessor_flush_all(info);
+ local_coprocessor_release_all(info);
+}
+
+void coprocessor_flush_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_release_all,
+ ti, true);
+ }
}
#endif
@@ -140,8 +203,7 @@ void flush_thread(void)
{
#if XTENSA_HAVE_COPROCESSORS
struct thread_info *ti = current_thread_info();
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
#endif
flush_ptrace_hw_breakpoint(current);
}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 323c678a691f..22cdaa6729d3 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -171,8 +171,7 @@ static int tie_set(struct task_struct *target,
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessors before we overwrite them. */
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
ti->xtregs_cp.cp0 = newregs->cp0;
ti->xtregs_cp.cp1 = newregs->cp1;
ti->xtregs_cp.cp2 = newregs->cp2;
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
index 07e56e3a9a8b..8362388c8719 100644
--- a/arch/xtensa/kernel/s32c1i_selftest.c
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -40,14 +40,13 @@ static inline int probed_compare_swap(int *v, int cmp, int set)
/* Handle probed exception */
-static void __init do_probed_exception(struct pt_regs *regs,
- unsigned long exccause)
+static void __init do_probed_exception(struct pt_regs *regs)
{
if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
regs->pc += 3; /* skip the s32c1i instruction */
- rcw_exc = exccause;
+ rcw_exc = regs->exccause;
} else {
- do_unhandled(regs, exccause);
+ do_unhandled(regs);
}
}
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 6f68649e86ba..c9ffd42db873 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -162,8 +162,7 @@ setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
return err;
#if XTENSA_HAVE_COPROCESSORS
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
sizeof (frame->xtregs.cp));
#endif
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 1254da07ead1..4dc109dd6214 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/thread_info.h>
#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/mxregs.h>
@@ -272,6 +273,12 @@ int __cpu_disable(void)
*/
set_cpu_online(cpu, false);
+#if XTENSA_HAVE_COPROCESSORS
+ /*
+ * Flush coprocessor contexts that are active on the current CPU.
+ */
+ local_coprocessors_flush_release_all();
+#endif
/*
* OK - migrate IRQs away from this CPU
*/
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 9345007d474d..0c25e035ff10 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -48,25 +48,20 @@
* Machine specific interrupt handlers
*/
-extern void kernel_exception(void);
-extern void user_exception(void);
-
-extern void fast_illegal_instruction_user(void);
-extern void fast_syscall_user(void);
-extern void fast_alloca(void);
-extern void fast_unaligned(void);
-extern void fast_second_level_miss(void);
-extern void fast_store_prohibited(void);
-extern void fast_coprocessor(void);
-
-extern void do_illegal_instruction (struct pt_regs*);
-extern void do_interrupt (struct pt_regs*);
-extern void do_nmi(struct pt_regs *);
-extern void do_unaligned_user (struct pt_regs*);
-extern void do_multihit (struct pt_regs*, unsigned long);
-extern void do_page_fault (struct pt_regs*, unsigned long);
-extern void do_debug (struct pt_regs*);
-extern void system_call (struct pt_regs*);
+static void do_illegal_instruction(struct pt_regs *regs);
+static void do_div0(struct pt_regs *regs);
+static void do_interrupt(struct pt_regs *regs);
+#if XTENSA_FAKE_NMI
+static void do_nmi(struct pt_regs *regs);
+#endif
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+static void do_unaligned_user(struct pt_regs *regs);
+#endif
+static void do_multihit(struct pt_regs *regs);
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs);
+#endif
+static void do_debug(struct pt_regs *regs);
/*
* The vector table must be preceded by a save area (which
@@ -78,7 +73,8 @@ extern void system_call (struct pt_regs*);
#define USER 0x02
#define COPROCESSOR(x) \
-{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
typedef struct {
int cause;
@@ -100,7 +96,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
#ifdef SUPPORT_WINDOWED
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
#endif
-/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_XTENSA_UNALIGNED_USER
@@ -110,21 +106,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
#ifdef CONFIG_MMU
-{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
-{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_PFAULT
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
-{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
-/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
-{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
-{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
-{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
-/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
-{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
-#endif /* CONFIG_MMU */
+#endif
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0),
@@ -179,7 +175,7 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
* Unhandled Exceptions. Kill user task or panic if in kernel space.
*/
-void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+void do_unhandled(struct pt_regs *regs)
{
__die_if_kernel("Caught unhandled exception - should not happen",
regs, SIGKILL);
@@ -189,7 +185,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
"(pid = %d, pc = %#010lx) - should not happen\n"
"\tEXCCAUSE is %ld\n",
current->comm, task_pid_nr(current), regs->pc,
- exccause);
+ regs->exccause);
force_sig(SIGILL);
}
@@ -197,7 +193,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
* Multi-hit exception. This if fatal!
*/
-void do_multihit(struct pt_regs *regs, unsigned long exccause)
+static void do_multihit(struct pt_regs *regs)
{
die("Caught multihit exception", regs, SIGKILL);
}
@@ -206,8 +202,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
* IRQ handler.
*/
-extern void do_IRQ(int, struct pt_regs *);
-
#if XTENSA_FAKE_NMI
#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
@@ -240,14 +234,10 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
DEFINE_PER_CPU(unsigned long, nmi_count);
-void do_nmi(struct pt_regs *regs)
+static void do_nmi(struct pt_regs *regs)
{
- struct pt_regs *old_regs;
-
- if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
- trace_hardirqs_off();
+ struct pt_regs *old_regs = set_irq_regs(regs);
- old_regs = set_irq_regs(regs);
nmi_enter();
++*this_cpu_ptr(&nmi_count);
check_valid_nmi();
@@ -257,7 +247,7 @@ void do_nmi(struct pt_regs *regs)
}
#endif
-void do_interrupt(struct pt_regs *regs)
+static void do_interrupt(struct pt_regs *regs)
{
static const unsigned int_level_mask[] = {
0,
@@ -269,12 +259,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
- struct pt_regs *old_regs;
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned unhandled = ~0u;
- trace_hardirqs_off();
-
- old_regs = set_irq_regs(regs);
irq_enter();
for (;;) {
@@ -306,13 +293,47 @@ void do_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
+static bool check_div0(struct pt_regs *regs)
+{
+ static const u8 pattern[] = {'D', 'I', 'V', '0'};
+ const u8 *p;
+ u8 buf[5];
+
+ if (user_mode(regs)) {
+ if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
+ return false;
+ p = buf;
+ } else {
+ p = (const u8 *)regs->pc + 2;
+ }
+
+ return memcmp(p, pattern, sizeof(pattern)) == 0 ||
+ memcmp(p + 1, pattern, sizeof(pattern)) == 0;
+}
+
/*
* Illegal instruction. Fatal if in kernel space.
*/
-void
-do_illegal_instruction(struct pt_regs *regs)
+static void do_illegal_instruction(struct pt_regs *regs)
{
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /*
+ * When call0 application encounters an illegal instruction fast
+ * exception handler will attempt to set PS.WOE and retry failing
+ * instruction.
+ * If we get here we know that that instruction is also illegal
+ * with PS.WOE set, so it's not related to the windowed option
+ * hence PS.WOE may be cleared.
+ */
+ if (regs->pc == current_thread_info()->ps_woe_fix_addr)
+ regs->ps &= ~PS_WOE_MASK;
+#endif
+ if (check_div0(regs)) {
+ do_div0(regs);
+ return;
+ }
+
__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process. */
@@ -322,6 +343,11 @@ do_illegal_instruction(struct pt_regs *regs)
force_sig(SIGILL);
}
+static void do_div0(struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
+}
/*
* Handle unaligned memory accesses from user space. Kill task.
@@ -331,8 +357,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-void
-do_unaligned_user (struct pt_regs *regs)
+static void do_unaligned_user(struct pt_regs *regs)
{
__die_if_kernel("Unhandled unaligned exception in kernel",
regs, SIGKILL);
@@ -347,14 +372,20 @@ do_unaligned_user (struct pt_regs *regs)
}
#endif
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs)
+{
+ coprocessor_flush_release_all(current_thread_info());
+}
+#endif
+
/* Handle debug events.
* When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
* preemption disabled to avoid rescheduling and keep mapping of hardware
* breakpoint structures to debug registers intact, so that
* DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
*/
-void
-do_debug(struct pt_regs *regs)
+static void do_debug(struct pt_regs *regs)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret = check_hw_breakpoint(regs);
@@ -381,7 +412,8 @@ do_debug(struct pt_regs *regs)
/* Set exception C handler - for temporary use when probing exceptions */
-void * __init trap_set_handler(int cause, void *handler)
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler)
{
void *previous = per_cpu(exc_table, 0).default_handler[cause];
@@ -392,8 +424,7 @@ void * __init trap_set_handler(int cause, void *handler)
static void trap_init_excsave(void)
{
- unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
- __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+ xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
}
static void trap_init_debug(void)
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
index 5848c133f7ea..d4e9c397e3fd 100644
--- a/arch/xtensa/lib/Makefile
+++ b/arch/xtensa/lib/Makefile
@@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \
divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
usercopy.o strncpy_user.o strnlen_user.o
lib-$(CONFIG_PCI) += pci-auto.o
+lib-$(CONFIG_KCSAN) += kcsan-stubs.o
+KCSAN_SANITIZE_kcsan-stubs.o := n
diff --git a/arch/xtensa/lib/kcsan-stubs.c b/arch/xtensa/lib/kcsan-stubs.c
new file mode 100644
index 000000000000..2b08faa62b86
--- /dev/null
+++ b/arch/xtensa/lib/kcsan-stubs.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+void __atomic_store_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_load_8(const volatile void *p, int i)
+{
+ BUG();
+}
+
+u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index 582d817979ed..b20d206bcb71 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -402,13 +402,13 @@ WEAK(memmove)
*/
# copy 16 bytes per iteration for word-aligned dst and word-aligned src
#if XCHAL_HAVE_LOOPS
- loopnez a7, .backLoop1done
+ loopnez a7, .LbackLoop1done
#else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .backLoop1done
+ beqz a7, .LbackLoop1done
slli a8, a7, 4
sub a8, a3, a8 # a8 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop1:
+.LbackLoop1:
addi a3, a3, -16
l32i a7, a3, 12
l32i a6, a3, 8
@@ -420,9 +420,9 @@ WEAK(memmove)
s32i a7, a5, 4
s32i a6, a5, 0
#if !XCHAL_HAVE_LOOPS
- bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start
+ bne a3, a8, .LbackLoop1 # continue loop if a3:src != a8:src_start
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop1done:
+.LbackLoop1done:
bbci.l a4, 3, .Lback2
# copy 8 bytes
addi a3, a3, -8
@@ -479,13 +479,13 @@ WEAK(memmove)
#endif
l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS
- loopnez a7, .backLoop2done
+ loopnez a7, .LbackLoop2done
#else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .backLoop2done
+ beqz a7, .LbackLoop2done
slli a10, a7, 4
sub a10, a3, a10 # a10 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop2:
+.LbackLoop2:
addi a3, a3, -16
l32i a7, a3, 12
l32i a8, a3, 8
@@ -501,9 +501,9 @@ WEAK(memmove)
__src_b a9, a6, a9
s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS
- bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
+ bne a3, a10, .LbackLoop2 # continue loop if a3:src != a10:src_start
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop2done:
+.LbackLoop2done:
bbci.l a4, 3, .Lback12
# copy 8 bytes
addi a3, a3, -8
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f7fb08ae768f..44153a335951 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y := init.o misc.o
-obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
+obj-$(CONFIG_PFAULT) += fault.o
+obj-$(CONFIG_MMU) += cache.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 06d0973a0d74..16f0a5ff5799 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,9 +21,61 @@
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
-DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
+static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
+{
+#ifdef CONFIG_MMU
+ /* Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ struct mm_struct *act_mm = current->active_mm;
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ if (act_mm == NULL)
+ goto bad_page_fault;
+
+ pgd = act_mm->pgd + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto bad_page_fault;
+
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_page_fault;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_page_fault;
+
+ pmd_val(*pmd) = pmd_val(*pmd_k);
+ pte_k = pte_offset_kernel(pmd_k, address);
+
+ if (!pte_present(*pte_k))
+ goto bad_page_fault;
+ return;
+
+bad_page_fault:
+ bad_page_fault(regs, address, SIGKILL);
+#else
+ WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
+#endif
+}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -49,8 +101,10 @@ void do_page_fault(struct pt_regs *regs)
/* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*/
- if (address >= TASK_SIZE && !user_mode(regs))
- goto vmalloc_fault;
+ if (address >= TASK_SIZE && !user_mode(regs)) {
+ vmalloc_fault(regs, address);
+ return;
+ }
/* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -114,7 +168,7 @@ good_area:
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
- goto bad_page_fault;
+ bad_page_fault(regs, address, SIGKILL);
return;
}
@@ -181,56 +235,6 @@ do_sigbus:
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
return;
-
-vmalloc_fault:
- {
- /* Synchronize this task's top level page-table
- * with the 'reference' page table.
- */
- struct mm_struct *act_mm = current->active_mm;
- int index = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- p4d_t *p4d, *p4d_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- if (act_mm == NULL)
- goto bad_page_fault;
-
- pgd = act_mm->pgd + index;
- pgd_k = init_mm.pgd + index;
-
- if (!pgd_present(*pgd_k))
- goto bad_page_fault;
-
- pgd_val(*pgd) = pgd_val(*pgd_k);
-
- p4d = p4d_offset(pgd, address);
- p4d_k = p4d_offset(pgd_k, address);
- if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
- goto bad_page_fault;
-
- pud = pud_offset(p4d, address);
- pud_k = pud_offset(p4d_k, address);
- if (!pud_present(*pud) || !pud_present(*pud_k))
- goto bad_page_fault;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
- goto bad_page_fault;
-
- pmd_val(*pmd) = pmd_val(*pmd_k);
- pte_k = pte_offset_kernel(pmd_k, address);
-
- if (!pte_present(*pte_k))
- goto bad_page_fault;
- return;
- }
-bad_page_fault:
- bad_page_fault(regs, address, SIGKILL);
- return;
}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 38acda4f04e8..92e158c69c10 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -18,6 +18,8 @@
#include <asm/initialize_mmu.h>
#include <asm/io.h>
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
+
#if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 81d7c7e8f7e9..10b79d3c74e0 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
static struct tty_driver *serial_driver;
static struct tty_port serial_port;
static DEFINE_TIMER(serial_timer, rs_poll);
-static DEFINE_SPINLOCK(timer_lock);
static int rs_open(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock_bh(&timer_lock);
return 0;
}
static void rs_close(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
del_timer_sync(&serial_timer);
- spin_unlock_bh(&timer_lock);
}
@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
int rd = 1;
unsigned char c;
- spin_lock(&timer_lock);
-
while (simc_poll(0)) {
rd = simc_read(0, &c, 1);
if (rd <= 0)
@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
tty_flip_buffer_push(port);
if (rd)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock(&timer_lock);
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index be3aaaad8bee..fd84d4891758 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -38,9 +38,6 @@
#define ISS_NET_TIMER_VALUE (HZ / 10)
-static DEFINE_SPINLOCK(opened_lock);
-static LIST_HEAD(opened);
-
static DEFINE_SPINLOCK(devices_lock);
static LIST_HEAD(devices);
@@ -59,17 +56,27 @@ struct tuntap_info {
/* ------------------------------------------------------------------------- */
+struct iss_net_private;
+
+struct iss_net_ops {
+ int (*open)(struct iss_net_private *lp);
+ void (*close)(struct iss_net_private *lp);
+ int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
+ int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
+ unsigned short (*protocol)(struct sk_buff *skb);
+ int (*poll)(struct iss_net_private *lp);
+};
+
/* This structure contains out private information for the driver. */
struct iss_net_private {
struct list_head device_list;
- struct list_head opened_list;
spinlock_t lock;
struct net_device *dev;
struct platform_device pdev;
struct timer_list tl;
- struct net_device_stats stats;
+ struct rtnl_link_stats64 stats;
struct timer_list timer;
unsigned int timer_val;
@@ -82,12 +89,7 @@ struct iss_net_private {
struct tuntap_info tuntap;
} info;
- int (*open)(struct iss_net_private *lp);
- void (*close)(struct iss_net_private *lp);
- int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
- int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
- unsigned short (*protocol)(struct sk_buff *skb);
- int (*poll)(struct iss_net_private *lp);
+ const struct iss_net_ops *net_ops;
} tp;
};
@@ -215,6 +217,15 @@ static int tuntap_poll(struct iss_net_private *lp)
return simc_poll(lp->tp.info.tuntap.fd);
}
+static const struct iss_net_ops tuntap_ops = {
+ .open = tuntap_open,
+ .close = tuntap_close,
+ .read = tuntap_read,
+ .write = tuntap_write,
+ .protocol = tuntap_protocol,
+ .poll = tuntap_poll,
+};
+
/*
* ethX=tuntap,[mac address],device name
*/
@@ -257,13 +268,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
lp->mtu = TRANSPORT_TUNTAP_MTU;
lp->tp.info.tuntap.fd = -1;
-
- lp->tp.open = tuntap_open;
- lp->tp.close = tuntap_close;
- lp->tp.read = tuntap_read;
- lp->tp.write = tuntap_write;
- lp->tp.protocol = tuntap_protocol;
- lp->tp.poll = tuntap_poll;
+ lp->tp.net_ops = &tuntap_ops;
return 1;
}
@@ -278,14 +283,16 @@ static int iss_net_rx(struct net_device *dev)
/* Check if there is any new data. */
- if (lp->tp.poll(lp) == 0)
+ if (lp->tp.net_ops->poll(lp) == 0)
return 0;
/* Try to allocate memory, if it fails, try again next round. */
skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
if (skb == NULL) {
+ spin_lock_bh(&lp->lock);
lp->stats.rx_dropped++;
+ spin_unlock_bh(&lp->lock);
return 0;
}
@@ -295,15 +302,17 @@ static int iss_net_rx(struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- pkt_len = lp->tp.read(lp, &skb);
+ pkt_len = lp->tp.net_ops->read(lp, &skb);
skb_put(skb, pkt_len);
if (pkt_len > 0) {
skb_trim(skb, pkt_len);
- skb->protocol = lp->tp.protocol(skb);
+ skb->protocol = lp->tp.net_ops->protocol(skb);
+ spin_lock_bh(&lp->lock);
lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++;
+ spin_unlock_bh(&lp->lock);
netif_rx(skb);
return pkt_len;
}
@@ -311,38 +320,24 @@ static int iss_net_rx(struct net_device *dev)
return pkt_len;
}
-static int iss_net_poll(void)
+static int iss_net_poll(struct iss_net_private *lp)
{
- struct list_head *ele;
int err, ret = 0;
- spin_lock(&opened_lock);
-
- list_for_each(ele, &opened) {
- struct iss_net_private *lp;
-
- lp = list_entry(ele, struct iss_net_private, opened_list);
-
- if (!netif_running(lp->dev))
- break;
-
- spin_lock(&lp->lock);
-
- while ((err = iss_net_rx(lp->dev)) > 0)
- ret++;
+ if (!netif_running(lp->dev))
+ return 0;
- spin_unlock(&lp->lock);
+ while ((err = iss_net_rx(lp->dev)) > 0)
+ ret++;
- if (err < 0) {
- pr_err("Device '%s' read returned %d, shutting it down\n",
- lp->dev->name, err);
- dev_close(lp->dev);
- } else {
- /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
- }
+ if (err < 0) {
+ pr_err("Device '%s' read returned %d, shutting it down\n",
+ lp->dev->name, err);
+ dev_close(lp->dev);
+ } else {
+ /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
}
- spin_unlock(&opened_lock);
return ret;
}
@@ -351,10 +346,8 @@ static void iss_net_timer(struct timer_list *t)
{
struct iss_net_private *lp = from_timer(lp, t, timer);
- iss_net_poll();
- spin_lock(&lp->lock);
+ iss_net_poll(lp);
mod_timer(&lp->timer, jiffies + lp->timer_val);
- spin_unlock(&lp->lock);
}
@@ -363,11 +356,9 @@ static int iss_net_open(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev);
int err;
- spin_lock_bh(&lp->lock);
-
- err = lp->tp.open(lp);
+ err = lp->tp.net_ops->open(lp);
if (err < 0)
- goto out;
+ return err;
netif_start_queue(dev);
@@ -378,36 +369,21 @@ static int iss_net_open(struct net_device *dev)
while ((err = iss_net_rx(dev)) > 0)
;
- spin_unlock_bh(&lp->lock);
- spin_lock_bh(&opened_lock);
- list_add(&lp->opened_list, &opened);
- spin_unlock_bh(&opened_lock);
- spin_lock_bh(&lp->lock);
-
timer_setup(&lp->timer, iss_net_timer, 0);
lp->timer_val = ISS_NET_TIMER_VALUE;
mod_timer(&lp->timer, jiffies + lp->timer_val);
-out:
- spin_unlock_bh(&lp->lock);
return err;
}
static int iss_net_close(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
- netif_stop_queue(dev);
- spin_lock_bh(&lp->lock);
-
- spin_lock(&opened_lock);
- list_del(&opened);
- spin_unlock(&opened_lock);
+ netif_stop_queue(dev);
del_timer_sync(&lp->timer);
+ lp->tp.net_ops->close(lp);
- lp->tp.close(lp);
-
- spin_unlock_bh(&lp->lock);
return 0;
}
@@ -417,13 +393,14 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len;
netif_stop_queue(dev);
- spin_lock_bh(&lp->lock);
- len = lp->tp.write(lp, &skb);
+ len = lp->tp.net_ops->write(lp, &skb);
if (len == skb->len) {
+ spin_lock_bh(&lp->lock);
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
+ spin_unlock_bh(&lp->lock);
netif_trans_update(dev);
netif_start_queue(dev);
@@ -432,24 +409,29 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (len == 0) {
netif_start_queue(dev);
+ spin_lock_bh(&lp->lock);
lp->stats.tx_dropped++;
+ spin_unlock_bh(&lp->lock);
} else {
netif_start_queue(dev);
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
}
- spin_unlock_bh(&lp->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
-static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
+static void iss_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct iss_net_private *lp = netdev_priv(dev);
- return &lp->stats;
+
+ spin_lock_bh(&lp->lock);
+ *stats = lp->stats;
+ spin_unlock_bh(&lp->lock);
}
static void iss_net_set_multicast_list(struct net_device *dev)
@@ -460,19 +442,6 @@ static void iss_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
}
-static int iss_net_set_mac(struct net_device *dev, void *addr)
-{
- struct iss_net_private *lp = netdev_priv(dev);
- struct sockaddr *hwaddr = addr;
-
- if (!is_valid_ether_addr(hwaddr->sa_data))
- return -EADDRNOTAVAIL;
- spin_lock_bh(&lp->lock);
- eth_hw_addr_set(dev, hwaddr->sa_data);
- spin_unlock_bh(&lp->lock);
- return 0;
-}
-
static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
{
return -EINVAL;
@@ -494,11 +463,11 @@ static int driver_registered;
static const struct net_device_ops iss_netdev_ops = {
.ndo_open = iss_net_open,
.ndo_stop = iss_net_close,
- .ndo_get_stats = iss_net_get_stats,
+ .ndo_get_stats64 = iss_net_get_stats64,
.ndo_start_xmit = iss_net_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = iss_net_change_mtu,
- .ndo_set_mac_address = iss_net_set_mac,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = iss_net_tx_timeout,
.ndo_set_rx_mode = iss_net_set_multicast_list,
};
@@ -520,7 +489,6 @@ static int iss_net_configure(int index, char *init)
lp = netdev_priv(dev);
*lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list),
- .opened_list = LIST_HEAD_INIT(lp->opened_list),
.dev = dev,
.index = index,
};
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 0f0e0724397f..4255b92fa3eb 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -211,12 +211,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
struct simdisk *dev = pde_data(file_inode(file));
const char *s = dev->filename;
if (s) {
- ssize_t n = simple_read_from_buffer(buf, size, ppos,
- s, strlen(s));
- if (n < 0)
- return n;
- buf += n;
- size -= n;
+ ssize_t len = strlen(s);
+ char *temp = kmalloc(len + 2, GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ len = scnprintf(temp, len + 2, "%s\n", s);
+ len = simple_read_from_buffer(buf, size, ppos,
+ temp, len);
+
+ kfree(temp);
+ return len;
}
return simple_read_from_buffer(buf, size, ppos, "\n", 1);
}
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 145d129be76f..0dc22c371614 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -78,7 +78,7 @@ void __init platform_init(bp_tag_t *first)
void platform_heartbeat(void)
{
- static int i=0, t = 0;
+ static int i, t;
if (--t < 0)
{