diff options
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r-- | arch/xtensa/kernel/Makefile | 8 | ||||
-rw-r--r-- | arch/xtensa/kernel/asm-offsets.c | 24 | ||||
-rw-r--r-- | arch/xtensa/kernel/coprocessor.S | 234 | ||||
-rw-r--r-- | arch/xtensa/kernel/entry.S | 376 | ||||
-rw-r--r-- | arch/xtensa/kernel/hibernate.c | 25 | ||||
-rw-r--r-- | arch/xtensa/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/xtensa/kernel/jump_label.c | 4 | ||||
-rw-r--r-- | arch/xtensa/kernel/mxhead.S | 2 | ||||
-rw-r--r-- | arch/xtensa/kernel/process.c | 139 | ||||
-rw-r--r-- | arch/xtensa/kernel/ptrace.c | 12 | ||||
-rw-r--r-- | arch/xtensa/kernel/s32c1i_selftest.c | 7 | ||||
-rw-r--r-- | arch/xtensa/kernel/setup.c | 10 | ||||
-rw-r--r-- | arch/xtensa/kernel/signal.c | 11 | ||||
-rw-r--r-- | arch/xtensa/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/xtensa/kernel/syscall.c | 18 | ||||
-rw-r--r-- | arch/xtensa/kernel/syscalls/Makefile | 3 | ||||
-rw-r--r-- | arch/xtensa/kernel/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | arch/xtensa/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/xtensa/kernel/traps.c | 145 | ||||
-rw-r--r-- | arch/xtensa/kernel/vectors.S | 4 | ||||
-rw-r--r-- | arch/xtensa/kernel/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/xtensa/kernel/xtensa_ksyms.c | 14 |
22 files changed, 668 insertions, 391 deletions
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index d4082c6a121b..f28b8e3d717e 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -3,9 +3,9 @@ # Makefile for the Linux/Xtensa kernel. # -extra-y := head.o vmlinux.lds +extra-y := vmlinux.lds -obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \ +obj-y := head.o align.o coprocessor.o entry.o irq.o platform.o process.o \ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \ vectors.o @@ -13,11 +13,13 @@ obj-$(CONFIG_MMU) += pci-dma.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o -obj-$(CONFIG_SMP) += smp.o mxhead.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SECONDARY_RESET_VECTOR) += mxhead.o obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_HIBERNATION) += hibernate.o # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index dc5c83cad9be..da38de20ae59 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -21,6 +21,7 @@ #include <linux/ptrace.h> #include <linux/mm.h> #include <linux/kbuild.h> +#include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/traps.h> @@ -63,7 +64,7 @@ int main(void) DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15])); DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase)); DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart)); - DEFINE(PT_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_KERNEL_SIZE, offsetof(struct pt_regs, areg[16])); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt)); @@ -87,15 +88,19 @@ int main(void) OFFSET(TI_STSTUS, thread_info, status); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); +#ifdef CONFIG_USER_ABI_CALL0_PROBE + OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr); +#endif /* struct thread_info (offset from start_struct) */ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); - DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); #if XCHAL_HAVE_EXCLUSIVE DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); #endif + DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable)); + DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu)); + DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu)); #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); @@ -108,8 +113,6 @@ int main(void) #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); - DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \ - thread.current_ds)); /* struct mm_struct */ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); @@ -140,11 +143,22 @@ int main(void) DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); +#if XTENSA_HAVE_COPROCESSORS + DEFINE(EXC_TABLE_COPROCESSOR_OWNER, + offsetof(struct exc_table, coprocessor_owner)); +#endif DEFINE(EXC_TABLE_FAST_USER, offsetof(struct exc_table, fast_user_handler)); DEFINE(EXC_TABLE_FAST_KERNEL, offsetof(struct exc_table, fast_kernel_handler)); DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); +#ifdef CONFIG_HIBERNATION + DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); + DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address)); + DEFINE(PBE_NEXT, offsetof(struct pbe, next)); + DEFINE(PBE_SIZE, sizeof(struct pbe)); +#endif + return 0; } diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 45cc0ae0af6f..ef33e76e07d8 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -19,6 +19,26 @@ #include <asm/current.h> #include <asm/regs.h> +/* + * Rules for coprocessor state manipulation on SMP: + * + * - a task may have live coprocessors only on one CPU. + * + * - whether coprocessor context of task T is live on some CPU is + * denoted by T's thread_info->cpenable. + * + * - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu + * is valid in the T's thread_info. Zero thread_info->cpenable means that + * coprocessor context is valid in the T's thread_info. + * + * - if a coprocessor context of task T is live on CPU X, only CPU X changes + * T's thread_info->cpenable, cp_owner_cpu and coprocessor save area. + * This is done by making sure that for the task T with live coprocessor + * on CPU X cpenable SR is 0 when T runs on any other CPU Y. + * When fast_coprocessor exception is taken on CPU Y it goes to the + * C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors. + */ + #if XTENSA_HAVE_COPROCESSORS /* @@ -29,35 +49,31 @@ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lsave_cp_regs_cp##x: \ - xchal_cp##x##_store a2 a4 a5 a6 a7; \ - jx a0; \ + xchal_cp##x##_store a2 a3 a4 a5 a6; \ + ret; \ .endif -#define SAVE_CP_REGS_TAB(x) \ - .if XTENSA_HAVE_COPROCESSOR(x); \ - .long .Lsave_cp_regs_cp##x; \ - .else; \ - .long 0; \ - .endif; \ - .long THREAD_XTREGS_CP##x - - #define LOAD_CP_REGS(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lload_cp_regs_cp##x: \ - xchal_cp##x##_load a2 a4 a5 a6 a7; \ - jx a0; \ + xchal_cp##x##_load a2 a3 a4 a5 a6; \ + ret; \ .endif -#define LOAD_CP_REGS_TAB(x) \ +#define CP_REGS_TAB(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ + .long .Lsave_cp_regs_cp##x; \ .long .Lload_cp_regs_cp##x; \ .else; \ - .long 0; \ + .long 0, 0; \ .endif; \ .long THREAD_XTREGS_CP##x +#define CP_REGS_TAB_SAVE 0 +#define CP_REGS_TAB_LOAD 4 +#define CP_REGS_TAB_OFFSET 8 + __XTENSA_HANDLER SAVE_CP_REGS(0) @@ -79,25 +95,15 @@ LOAD_CP_REGS(7) .align 4 -.Lsave_cp_regs_jump_table: - SAVE_CP_REGS_TAB(0) - SAVE_CP_REGS_TAB(1) - SAVE_CP_REGS_TAB(2) - SAVE_CP_REGS_TAB(3) - SAVE_CP_REGS_TAB(4) - SAVE_CP_REGS_TAB(5) - SAVE_CP_REGS_TAB(6) - SAVE_CP_REGS_TAB(7) - -.Lload_cp_regs_jump_table: - LOAD_CP_REGS_TAB(0) - LOAD_CP_REGS_TAB(1) - LOAD_CP_REGS_TAB(2) - LOAD_CP_REGS_TAB(3) - LOAD_CP_REGS_TAB(4) - LOAD_CP_REGS_TAB(5) - LOAD_CP_REGS_TAB(6) - LOAD_CP_REGS_TAB(7) +.Lcp_regs_jump_table: + CP_REGS_TAB(0) + CP_REGS_TAB(1) + CP_REGS_TAB(2) + CP_REGS_TAB(3) + CP_REGS_TAB(4) + CP_REGS_TAB(5) + CP_REGS_TAB(6) + CP_REGS_TAB(7) /* * Entry condition: @@ -115,9 +121,37 @@ ENTRY(fast_coprocessor) + s32i a3, a2, PT_AREG3 + +#ifdef CONFIG_SMP + /* + * Check if any coprocessor context is live on another CPU + * and if so go through the C-level coprocessor exception handler + * to flush it to memory. + */ + GET_THREAD_INFO (a0, a2) + l32i a3, a0, THREAD_CPENABLE + beqz a3, .Lload_local + + /* + * Pairs with smp_wmb in local_coprocessor_release_all + * and with both memws below. + */ + memw + l32i a3, a0, THREAD_CPU + l32i a0, a0, THREAD_CP_OWNER_CPU + beq a0, a3, .Lload_local + + rsr a0, ps + l32i a3, a2, PT_AREG3 + bbci.l a0, PS_UM_BIT, 1f + call0 user_exception +1: call0 kernel_exception +#endif + /* Save remaining registers a1-a3 and SAR */ - s32i a3, a2, PT_AREG3 +.Lload_local: rsr a3, sar s32i a1, a2, PT_AREG1 s32i a3, a2, PT_SAR @@ -125,13 +159,15 @@ ENTRY(fast_coprocessor) rsr a2, depc s32i a2, a1, PT_AREG2 - /* - * The hal macros require up to 4 temporary registers. We use a3..a6. - */ + /* The hal macros require up to 4 temporary registers. We use a3..a6. */ s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 + s32i a7, a1, PT_AREG7 + s32i a8, a1, PT_AREG8 + s32i a9, a1, PT_AREG9 + s32i a10, a1, PT_AREG10 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ @@ -148,58 +184,74 @@ ENTRY(fast_coprocessor) wsr a0, cpenable rsync - /* Retrieve previous owner. (a3 still holds CP number) */ + /* Get coprocessor save/load table entry (a7). */ - movi a0, coprocessor_owner # list of owners - addx4 a0, a3, a0 # entry for CP - l32i a4, a0, 0 + movi a7, .Lcp_regs_jump_table + addx8 a7, a3, a7 + addx4 a7, a3, a7 - beqz a4, 1f # skip 'save' if no previous owner + /* Retrieve previous owner (a8). */ - /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ + rsr a0, excsave1 # exc_table + addx4 a0, a3, a0 # entry for CP + l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER + + /* Set new owner (a9). */ - l32i a5, a4, THREAD_CPENABLE - xor a5, a5, a2 # (1 << cp-id) still in a2 - s32i a5, a4, THREAD_CPENABLE + GET_THREAD_INFO (a9, a1) + l32i a4, a9, THREAD_CPU + s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER + s32i a4, a9, THREAD_CP_OWNER_CPU /* - * Get context save area and 'call' save routine. - * (a4 still holds previous owner (thread_info), a3 CP number) + * Enable coprocessor for the new owner. (a2 = 1 << CP number) + * This can be done before loading context into the coprocessor. */ + l32i a4, a9, THREAD_CPENABLE + or a4, a4, a2 - movi a5, .Lsave_cp_regs_jump_table - movi a0, 2f # a0: 'return' address - addx8 a3, a3, a5 # a3: coprocessor number - l32i a2, a3, 4 # a2: xtregs offset - l32i a3, a3, 0 # a3: jump address - add a2, a2, a4 - jx a3 + /* + * Make sure THREAD_CP_OWNER_CPU is in memory before updating + * THREAD_CPENABLE + */ + memw # (2) + s32i a4, a9, THREAD_CPENABLE - /* Note that only a0 and a1 were preserved. */ + beqz a8, 1f # skip 'save' if no previous owner -2: rsr a3, exccause - addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED - movi a0, coprocessor_owner - addx4 a0, a3, a0 + /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ - /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */ + l32i a10, a8, THREAD_CPENABLE + xor a10, a10, a2 -1: GET_THREAD_INFO (a4, a1) - s32i a4, a0, 0 + /* Get context save area and call save routine. */ - /* Get context save area and 'call' load routine. */ + l32i a2, a7, CP_REGS_TAB_OFFSET + l32i a3, a7, CP_REGS_TAB_SAVE + add a2, a2, a8 + callx0 a3 - movi a5, .Lload_cp_regs_jump_table - movi a0, 1f - addx8 a3, a3, a5 - l32i a2, a3, 4 # a2: xtregs offset - l32i a3, a3, 0 # a3: jump address - add a2, a2, a4 - jx a3 + /* + * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory + * before updating THREAD_CPENABLE + */ + memw # (3) + s32i a10, a8, THREAD_CPENABLE +1: + /* Get context save area and call load routine. */ + + l32i a2, a7, CP_REGS_TAB_OFFSET + l32i a3, a7, CP_REGS_TAB_LOAD + add a2, a2, a9 + callx0 a3 /* Restore all registers and return from exception handler. */ -1: l32i a6, a1, PT_AREG6 + l32i a10, a1, PT_AREG10 + l32i a9, a1, PT_AREG9 + l32i a8, a1, PT_AREG8 + l32i a7, a1, PT_AREG7 + l32i a6, a1, PT_AREG6 l32i a5, a1, PT_AREG5 l32i a4, a1, PT_AREG4 @@ -230,29 +282,21 @@ ENDPROC(fast_coprocessor) ENTRY(coprocessor_flush) - /* reserve 4 bytes on stack to save a0 */ - abi_entry(4) - - s32i a0, a1, 0 - movi a0, .Lsave_cp_regs_jump_table - addx8 a3, a3, a0 - l32i a4, a3, 4 - l32i a3, a3, 0 - add a2, a2, a4 - beqz a3, 1f - callx0 a3 -1: l32i a0, a1, 0 - - abi_ret(4) + abi_entry_default + + movi a4, .Lcp_regs_jump_table + addx8 a4, a3, a4 + addx4 a3, a3, a4 + l32i a4, a3, CP_REGS_TAB_SAVE + beqz a4, 1f + l32i a3, a3, CP_REGS_TAB_OFFSET + add a2, a2, a3 + mov a7, a0 + callx0 a4 + mov a0, a7 +1: + abi_ret_default ENDPROC(coprocessor_flush) - .data - -ENTRY(coprocessor_owner) - - .fill XCHAL_CP_MAX, 4, 0 - -END(coprocessor_owner) - #endif /* XTENSA_HAVE_COPROCESSORS */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 99ab3c1a3387..272fff587907 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -28,15 +28,6 @@ #include <asm/tlbflush.h> #include <variant/tie-asm.h> -/* Unimplemented features. */ - -#undef KERNEL_STACK_OVERFLOW_CHECK - -/* Not well tested. - * - * - fast_coprocessor - */ - /* * Macro to find first bit set in WINDOWBASE from the left + 1 * @@ -178,28 +169,26 @@ _user_exception: /* Save only live registers. */ -UABI_W _bbsi.l a2, 1, 1f +UABI_W _bbsi.l a2, 1, .Lsave_window_registers s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 -UABI_W _bbsi.l a2, 2, 1f +UABI_W _bbsi.l a2, 2, .Lsave_window_registers s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 -UABI_W _bbsi.l a2, 3, 1f +UABI_W _bbsi.l a2, 3, .Lsave_window_registers s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 #if defined(USER_SUPPORT_WINDOWED) - _bnei a2, 1, 1f # only one valid frame? - - /* Only one valid frame, skip saving regs. */ + /* If only one valid frame skip saving regs. */ - j 2f + beqi a2, 1, common_exception /* Save the remaining registers. * We have to save all registers up to the first '1' from @@ -208,8 +197,8 @@ UABI_W _bbsi.l a2, 3, 1f * All register frames starting from the top field to the marked '1' * must be saved. */ - -1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 +.Lsave_window_registers: + addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 and a3, a3, a2 # max. only one bit is set @@ -250,7 +239,7 @@ UABI_W _bbsi.l a2, 3, 1f /* We are back to the original stack pointer (a1) */ #endif -2: /* Now, jump to the common exception handler. */ + /* Now, jump to the common exception handler. */ j common_exception @@ -341,8 +330,8 @@ KABI_W _bbsi.l a2, 3, 1f /* Copy spill slots of a0 and a1 to imitate movsp * in order to keep exception stack continuous */ - l32i a3, a1, PT_SIZE - l32i a0, a1, PT_SIZE + 4 + l32i a3, a1, PT_KERNEL_SIZE + l32i a0, a1, PT_KERNEL_SIZE + 4 s32e a3, a1, -16 s32e a0, a1, -12 #endif @@ -350,15 +339,6 @@ KABI_W _bbsi.l a2, 3, 1f l32i a0, a1, PT_AREG0 # restore saved a0 wsr a0, depc -#ifdef KERNEL_STACK_OVERFLOW_CHECK - - /* Stack overflow check, for debugging */ - extui a2, a1, TASK_SIZE_BITS,XX - movi a3, SIZE?? - _bge a2, a3, out_of_stack_panic - -#endif - /* * This is the common exception handler. * We get here from the user exception handler or simply by falling through @@ -442,7 +422,6 @@ KABI_W or a3, a3, a0 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt KABI_W movi a2, PS_WOE_MASK KABI_W or a3, a3, a2 - rsr a2, exccause #endif /* restore return address (or 0 if return to userspace) */ @@ -469,41 +448,56 @@ KABI_W or a3, a3, a2 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT +#ifdef CONFIG_TRACE_IRQFLAGS + rsr abi_tmp0, ps + extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + beqz abi_tmp0, 1f + abi_call trace_hardirqs_off +1: +#endif +#ifdef CONFIG_CONTEXT_TRACKING_USER + l32i abi_tmp0, a1, PT_PS + bbci.l abi_tmp0, PS_UM_BIT, 1f + abi_call user_exit_callable +1: +#endif + /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ - rsr a4, excsave1 - addx4 a4, a2, a4 - l32i a4, a4, EXC_TABLE_DEFAULT # load handler - mov abi_arg1, a2 # pass EXCCAUSE - mov abi_arg0, a1 # pass stack frame + l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE + rsr abi_tmp0, excsave1 + addx4 abi_tmp0, abi_arg1, abi_tmp0 + l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler + mov abi_arg0, a1 # pass stack frame /* Call the second-level handler */ - abi_callx a4 + abi_callx abi_tmp0 /* Jump here for exception exit */ .global common_exception_return common_exception_return: #if XTENSA_FAKE_NMI - l32i a2, a1, PT_EXCCAUSE - movi a3, EXCCAUSE_MAPPED_NMI - beq a2, a3, .LNMIexit + l32i abi_tmp0, a1, PT_EXCCAUSE + movi abi_tmp1, EXCCAUSE_MAPPED_NMI + l32i abi_saved1, a1, PT_PS + beq abi_tmp0, abi_tmp1, .Lrestore_state #endif -1: - irq_save a2, a3 +.Ltif_loop: + irq_save abi_tmp0, abi_tmp1 #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ - l32i abi_saved1, a1, PT_PS - GET_THREAD_INFO(a2, a1) - l32i a4, a2, TI_FLAGS - _bbci.l abi_saved1, PS_UM_BIT, 6f + l32i abi_saved1, a1, PT_PS + GET_THREAD_INFO(abi_tmp0, a1) + l32i abi_saved0, abi_tmp0, TI_FLAGS + _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, @@ -512,82 +506,80 @@ common_exception_return: * Note that we don't disable interrupts here. */ - _bbsi.l a4, TIF_NEED_RESCHED, 3f - movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL - bnone a4, a2, 5f + _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched + movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL + bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user -2: l32i a4, a1, PT_DEPC - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f + l32i abi_tmp0, a1, PT_DEPC + bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_on #endif - rsil a2, 0 - mov abi_arg0, a1 + rsil abi_tmp0, 0 + mov abi_arg0, a1 abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) - j 1b - -3: /* Reschedule */ + j .Ltif_loop +.Lresched: #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_on #endif - rsil a2, 0 + rsil abi_tmp0, 0 abi_call schedule # void schedule (void) - j 1b + j .Ltif_loop +.Lexit_tif_loop_kernel: #ifdef CONFIG_PREEMPTION -6: - _bbci.l a4, TIF_NEED_RESCHED, 4f + _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state /* Check current_thread_info->preempt_count */ - l32i a4, a2, TI_PRE_COUNT - bnez a4, 4f + l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT + bnez abi_tmp1, .Lrestore_state abi_call preempt_schedule_irq - j 4f #endif + j .Lrestore_state -#if XTENSA_FAKE_NMI -.LNMIexit: - l32i abi_saved1, a1, PT_PS - _bbci.l abi_saved1, PS_UM_BIT, 4f +.Lexit_tif_loop_user: +#ifdef CONFIG_CONTEXT_TRACKING_USER + abi_call user_enter_callable #endif - -5: #ifdef CONFIG_HAVE_HW_BREAKPOINT - _bbci.l a4, TIF_DB_DISABLED, 7f + _bbci.l abi_saved0, TIF_DB_DISABLED, 1f abi_call restore_dbreak -7: +1: #endif #ifdef CONFIG_DEBUG_TLB_SANITY - l32i a4, a1, PT_DEPC - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f + l32i abi_tmp0, a1, PT_DEPC + bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state abi_call check_tlb_sanity #endif -6: -4: + +.Lrestore_state: #ifdef CONFIG_TRACE_IRQFLAGS - extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH - bgei a4, LOCKLEVEL, 1f + extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei abi_tmp0, LOCKLEVEL, 1f abi_call trace_hardirqs_on 1: #endif - /* Restore optional registers. */ + /* + * Restore optional registers. + * abi_arg* are used as temporary registers here. + */ - load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT + load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT /* Restore SCOMPARE1 */ #if XCHAL_HAVE_S32C1I - l32i a2, a1, PT_SCOMPARE1 - wsr a2, scompare1 + l32i abi_tmp0, a1, PT_SCOMPARE1 + wsr abi_tmp0, scompare1 #endif - wsr abi_saved1, ps /* disable interrupts */ - - _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit + wsr abi_saved1, ps /* disable interrupts */ + _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit user_exception_exit: @@ -606,7 +598,7 @@ user_exception_exit: rsr a1, depc # restore stack pointer l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) rotw -1 # we restore a4..a7 - _bltui a6, 16, 1f # only have to restore current window? + _bltui a6, 16, .Lclear_regs # only have to restore current window? /* The working registers are a0 and a3. We are restoring to * a4..a7. Be careful not to destroy what we have just restored. @@ -618,18 +610,19 @@ user_exception_exit: mov a2, a6 mov a3, a5 -2: rotw -1 # a0..a3 become a4..a7 +1: rotw -1 # a0..a3 become a4..a7 addi a3, a7, -4*4 # next iteration addi a2, a6, -16 # decrementing Y in WMASK l32i a4, a3, PT_AREG_END + 0 l32i a5, a3, PT_AREG_END + 4 l32i a6, a3, PT_AREG_END + 8 l32i a7, a3, PT_AREG_END + 12 - _bgeui a2, 16, 2b + _bgeui a2, 16, 1b /* Clear unrestored registers (don't leak anything to user-land */ -1: rsr a0, windowbase +.Lclear_regs: + rsr a0, windowbase rsr a3, sar sub a3, a0, a3 beqz a3, 2f @@ -706,12 +699,12 @@ kernel_exception_exit: addi a0, a1, -16 l32i a3, a0, 0 l32i a4, a0, 4 - s32i a3, a1, PT_SIZE+0 - s32i a4, a1, PT_SIZE+4 + s32i a3, a1, PT_KERNEL_SIZE + 0 + s32i a4, a1, PT_KERNEL_SIZE + 4 l32i a3, a0, 8 l32i a4, a0, 12 - s32i a3, a1, PT_SIZE+8 - s32i a4, a1, PT_SIZE+12 + s32i a3, a1, PT_KERNEL_SIZE + 8 + s32i a4, a1, PT_KERNEL_SIZE + 12 /* Common exception exit. * We restore the special register and the current window frame, and @@ -800,7 +793,7 @@ ENDPROC(kernel_exception) ENTRY(debug_exception) rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL - bbsi.l a0, PS_EXCM_BIT, 1f # exception mode + bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode /* Set EPC1 and EXCCAUSE */ @@ -819,10 +812,10 @@ ENTRY(debug_exception) /* Switch to kernel/user stack, restore jump vector, and save a0 */ - bbsi.l a2, PS_UM_BIT, 2f # jump if user mode + bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode + addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack - addi a2, a1, -16-PT_SIZE # assume kernel stack -3: +.Ldebug_exception_continue: l32i a0, a3, DT_DEBUG_SAVE s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG0 @@ -850,10 +843,12 @@ ENTRY(debug_exception) bbsi.l a2, PS_UM_BIT, _user_exception j _kernel_exception -2: rsr a2, excsave1 +.Ldebug_exception_user: + rsr a2, excsave1 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer - j 3b + j .Ldebug_exception_continue +.Ldebug_exception_in_exception: #ifdef CONFIG_HAVE_HW_BREAKPOINT /* Debug exception while in exception mode. This may happen when * window overflow/underflow handler or fast exception handler hits @@ -861,8 +856,8 @@ ENTRY(debug_exception) * breakpoints, single-step faulting instruction and restore data * breakpoints. */ -1: - bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode + + bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode rsr a0, debugcause bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak @@ -906,7 +901,7 @@ ENTRY(debug_exception) rfi XCHAL_DEBUGLEVEL #else /* Debug exception while in exception mode. Should not happen. */ -1: j 1b // FIXME!! + j .Ldebug_exception_in_exception // FIXME!! #endif ENDPROC(debug_exception) @@ -1061,6 +1056,11 @@ ENTRY(fast_illegal_instruction_user) movi a3, PS_WOE_MASK or a0, a0, a3 wsr a0, ps +#ifdef CONFIG_USER_ABI_CALL0_PROBE + GET_THREAD_INFO(a3, a2) + rsr a0, epc1 + s32i a0, a3, TI_PS_WOE_FIX_ADDR +#endif l32i a3, a2, PT_AREG3 l32i a0, a2, PT_AREG0 rsr a2, depc @@ -1433,7 +1433,7 @@ ENTRY(fast_syscall_spill_registers) rsync movi abi_arg0, SIGSEGV - abi_call do_exit + abi_call make_task_dead /* shouldn't return, so panic */ @@ -1635,12 +1635,13 @@ ENTRY(fast_second_level_miss) GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm - beqz a0, 9f + beqz a0, .Lfast_second_level_miss_no_mm -8: rsr a3, excvaddr # fault address +.Lfast_second_level_miss_continue: + rsr a3, excvaddr # fault address _PGD_OFFSET(a0, a3, a1) l32i a0, a0, 0 # read pmdval - beqz a0, 2f + beqz a0, .Lfast_second_level_miss_no_pmd /* Read ptevaddr and convert to top of page-table page. * @@ -1683,12 +1684,13 @@ ENTRY(fast_second_level_miss) addi a3, a3, DTLB_WAY_PGD add a1, a1, a3 # ... + way_number -3: wdtlb a0, a1 +.Lfast_second_level_miss_wdtlb: + wdtlb a0, a1 dsync /* Exit critical section. */ - -4: rsr a3, excsave1 +.Lfast_second_level_miss_skip_wdtlb: + rsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP @@ -1712,19 +1714,21 @@ ENTRY(fast_second_level_miss) esync rfde -9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 - bnez a0, 8b +.Lfast_second_level_miss_no_mm: + l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 + bnez a0, .Lfast_second_level_miss_continue /* Even more unlikely case active_mm == 0. * We can get here with NMI in the middle of context_switch that * touches vmalloc area. */ movi a0, init_mm - j 8b + j .Lfast_second_level_miss_continue +.Lfast_second_level_miss_no_pmd: #if (DCACHE_WAY_SIZE > PAGE_SIZE) -2: /* Special case for cache aliasing. + /* Special case for cache aliasing. * We (should) only get here if a clear_user_page, copy_user_page * or the aliased cache flush functions got preemptively interrupted * by another task. Re-establish temporary mapping to the @@ -1734,24 +1738,24 @@ ENTRY(fast_second_level_miss) /* We shouldn't be in a double exception */ l32i a0, a2, PT_DEPC - bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f + bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow /* Make sure the exception originated in the special functions */ movi a0, __tlbtemp_mapping_start rsr a3, epc1 - bltu a3, a0, 2f + bltu a3, a0, .Lfast_second_level_miss_slow movi a0, __tlbtemp_mapping_end - bgeu a3, a0, 2f + bgeu a3, a0, .Lfast_second_level_miss_slow /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ movi a3, TLBTEMP_BASE_1 rsr a0, excvaddr - bltu a0, a3, 2f + bltu a0, a3, .Lfast_second_level_miss_slow addi a1, a0, -TLBTEMP_SIZE - bgeu a1, a3, 2f + bgeu a1, a3, .Lfast_second_level_miss_slow /* Check if we have to restore an ITLB mapping. */ @@ -1777,19 +1781,19 @@ ENTRY(fast_second_level_miss) mov a0, a6 movnez a0, a7, a3 - j 3b + j .Lfast_second_level_miss_wdtlb /* ITLB entry. We only use dst in a6. */ 1: witlb a6, a1 isync - j 4b + j .Lfast_second_level_miss_skip_wdtlb #endif // DCACHE_WAY_SIZE > PAGE_SIZE - -2: /* Invalid PGD, default exception handling */ + /* Invalid PGD, default exception handling */ +.Lfast_second_level_miss_slow: rsr a1, depc s32i a1, a2, PT_AREG2 @@ -1829,12 +1833,13 @@ ENTRY(fast_store_prohibited) GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm - beqz a0, 9f + beqz a0, .Lfast_store_no_mm -8: rsr a1, excvaddr # fault address +.Lfast_store_continue: + rsr a1, excvaddr # fault address _PGD_OFFSET(a0, a1, a3) l32i a0, a0, 0 - beqz a0, 2f + beqz a0, .Lfast_store_slow /* * Note that we test _PAGE_WRITABLE_BIT only if PTE is present @@ -1844,8 +1849,8 @@ ENTRY(fast_store_prohibited) _PTE_OFFSET(a0, a1, a3) l32i a3, a0, 0 # read pteval movi a1, _PAGE_CA_INVALID - ball a3, a1, 2f - bbci.l a3, _PAGE_WRITABLE_BIT, 2f + ball a3, a1, .Lfast_store_slow + bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE or a3, a3, a1 @@ -1873,7 +1878,6 @@ ENTRY(fast_store_prohibited) l32i a2, a2, PT_DEPC bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - rsr a2, depc rfe @@ -1883,11 +1887,17 @@ ENTRY(fast_store_prohibited) esync rfde -9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 - j 8b - -2: /* If there was a problem, handle fault in C */ +.Lfast_store_no_mm: + l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 + j .Lfast_store_continue + /* If there was a problem, handle fault in C */ +.Lfast_store_slow: + rsr a1, excvaddr + pdtlb a0, a1 + bbci.l a0, DTLB_HIT_BIT, 1f + idtlb a0 +1: rsr a3, depc # still holds a2 s32i a3, a2, PT_AREG2 mov a1, a2 @@ -2076,8 +2086,16 @@ ENTRY(_switch_to) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) l32i a3, a5, THREAD_CPENABLE - xsr a3, cpenable - s32i a3, a4, THREAD_CPENABLE +#ifdef CONFIG_SMP + beqz a3, 1f + memw # pairs with memw (2) in fast_coprocessor + l32i a6, a5, THREAD_CP_OWNER_CPU + l32i a7, a5, THREAD_CPU + beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner + movi a3, 0 +1: +#endif + wsr a3, cpenable #endif #if XCHAL_HAVE_EXCLUSIVE @@ -2152,3 +2170,95 @@ ENTRY(ret_from_kernel_thread) j common_exception_return ENDPROC(ret_from_kernel_thread) + +#ifdef CONFIG_HIBERNATION + + .section .bss, "aw" + .align 4 +.Lsaved_regs: +#if defined(__XTENSA_WINDOWED_ABI__) + .fill 2, 4 +#elif defined(__XTENSA_CALL0_ABI__) + .fill 6, 4 +#else +#error Unsupported Xtensa ABI +#endif + .align XCHAL_NCP_SA_ALIGN +.Lsaved_user_regs: + .fill XTREGS_USER_SIZE, 1 + + .previous + +ENTRY(swsusp_arch_suspend) + + abi_entry_default + + movi a2, .Lsaved_regs + movi a3, .Lsaved_user_regs + s32i a0, a2, 0 + s32i a1, a2, 4 + save_xtregs_user a3 a4 a5 a6 a7 a8 0 +#if defined(__XTENSA_WINDOWED_ABI__) + spill_registers_kernel +#elif defined(__XTENSA_CALL0_ABI__) + s32i a12, a2, 8 + s32i a13, a2, 12 + s32i a14, a2, 16 + s32i a15, a2, 20 +#else +#error Unsupported Xtensa ABI +#endif + abi_call swsusp_save + mov a2, abi_rv + abi_ret_default + +ENDPROC(swsusp_arch_suspend) + +ENTRY(swsusp_arch_resume) + + abi_entry_default + +#if defined(__XTENSA_WINDOWED_ABI__) + spill_registers_kernel +#endif + + movi a2, restore_pblist + l32i a2, a2, 0 + +.Lcopy_pbe: + l32i a3, a2, PBE_ADDRESS + l32i a4, a2, PBE_ORIG_ADDRESS + + __loopi a3, a9, PAGE_SIZE, 16 + l32i a5, a3, 0 + l32i a6, a3, 4 + l32i a7, a3, 8 + l32i a8, a3, 12 + addi a3, a3, 16 + s32i a5, a4, 0 + s32i a6, a4, 4 + s32i a7, a4, 8 + s32i a8, a4, 12 + addi a4, a4, 16 + __endl a3, a9 + + l32i a2, a2, PBE_NEXT + bnez a2, .Lcopy_pbe + + movi a2, .Lsaved_regs + movi a3, .Lsaved_user_regs + l32i a0, a2, 0 + l32i a1, a2, 4 + load_xtregs_user a3 a4 a5 a6 a7 a8 0 +#if defined(__XTENSA_CALL0_ABI__) + l32i a12, a2, 8 + l32i a13, a2, 12 + l32i a14, a2, 16 + l32i a15, a2, 20 +#endif + movi a2, 0 + abi_ret_default + +ENDPROC(swsusp_arch_resume) + +#endif diff --git a/arch/xtensa/kernel/hibernate.c b/arch/xtensa/kernel/hibernate.c new file mode 100644 index 000000000000..06984327d6e2 --- /dev/null +++ b/arch/xtensa/kernel/hibernate.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/mm.h> +#include <linux/suspend.h> +#include <asm/coprocessor.h> + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +void notrace save_processor_state(void) +{ + WARN_ON(num_online_cpus() != 1); +#if XTENSA_HAVE_COPROCESSORS + local_coprocessors_flush_release_all(); +#endif +} + +void notrace restore_processor_state(void) +{ +} diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 15051a8a1539..42f106004400 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -36,9 +36,8 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { - unsigned long sp; + unsigned long sp = current_stack_pointer; - __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp)); sp &= THREAD_SIZE - 1; if (unlikely(sp < (sizeof(thread_info) + 1024))) @@ -170,7 +169,7 @@ void migrate_irqs(void) for_each_active_irq(i) { struct irq_data *data = irq_get_irq_data(i); - struct cpumask *mask; + const struct cpumask *mask; unsigned int newcpu; if (irqd_is_per_cpu(data)) @@ -186,9 +185,10 @@ void migrate_irqs(void) pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", i, cpu); - cpumask_setall(mask); + irq_set_affinity(i, cpu_all_mask); + } else { + irq_set_affinity(i, mask); } - irq_set_affinity(i, mask); } } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c index 61cf6497a646..ad1841cecdfb 100644 --- a/arch/xtensa/kernel/jump_label.c +++ b/arch/xtensa/kernel/jump_label.c @@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data) { struct patch *patch = data; - if (atomic_inc_return(&patch->cpu_count) == 1) { + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { local_patch_text(patch->addr, patch->data, patch->sz); atomic_inc(&patch->cpu_count); } else { @@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz) .data = data, }; stop_machine_cpuslocked(patch_text_stop_machine, - &patch, NULL); + &patch, cpu_online_mask); } else { unsigned long flags; diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S index 9f3843742726..b702c0908b1f 100644 --- a/arch/xtensa/kernel/mxhead.S +++ b/arch/xtensa/kernel/mxhead.S @@ -37,11 +37,13 @@ _SetupOCD: * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ +#if XCHAL_HAVE_WINDOWED movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync +#endif movi a1, LOCKLEVEL wsr a1, ps diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index bd80df890b1e..68e0e2f06d66 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -47,6 +47,7 @@ #include <asm/asm-offsets.h> #include <asm/regs.h> #include <asm/hw_breakpoint.h> +#include <asm/traps.h> extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); @@ -63,52 +64,114 @@ EXPORT_SYMBOL(__stack_chk_guard); #if XTENSA_HAVE_COPROCESSORS -void coprocessor_release_all(struct thread_info *ti) +void local_coprocessors_flush_release_all(void) { - unsigned long cpenable; - int i; + struct thread_info **coprocessor_owner; + struct thread_info *unique_owner[XCHAL_CP_MAX]; + int n = 0; + int i, j; - /* Make sure we don't switch tasks during this operation. */ + coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; + xtensa_set_sr(XCHAL_CP_MASK, cpenable); - preempt_disable(); + for (i = 0; i < XCHAL_CP_MAX; i++) { + struct thread_info *ti = coprocessor_owner[i]; - /* Walk through all cp owners and release it for the requested one. */ + if (ti) { + coprocessor_flush(ti, i); - cpenable = ti->cpenable; + for (j = 0; j < n; j++) + if (unique_owner[j] == ti) + break; + if (j == n) + unique_owner[n++] = ti; - for (i = 0; i < XCHAL_CP_MAX; i++) { - if (coprocessor_owner[i] == ti) { - coprocessor_owner[i] = 0; - cpenable &= ~(1 << i); + coprocessor_owner[i] = NULL; } } + for (i = 0; i < n; i++) { + /* pairs with memw (1) in fast_coprocessor and memw in switch_to */ + smp_wmb(); + unique_owner[i]->cpenable = 0; + } + xtensa_set_sr(0, cpenable); +} - ti->cpenable = cpenable; +static void local_coprocessor_release_all(void *info) +{ + struct thread_info *ti = info; + struct thread_info **coprocessor_owner; + int i; + + coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; + + /* Walk through all cp owners and release it for the requested one. */ + + for (i = 0; i < XCHAL_CP_MAX; i++) { + if (coprocessor_owner[i] == ti) + coprocessor_owner[i] = NULL; + } + /* pairs with memw (1) in fast_coprocessor and memw in switch_to */ + smp_wmb(); + ti->cpenable = 0; if (ti == current_thread_info()) xtensa_set_sr(0, cpenable); +} - preempt_enable(); +void coprocessor_release_all(struct thread_info *ti) +{ + if (ti->cpenable) { + /* pairs with memw (2) in fast_coprocessor */ + smp_rmb(); + smp_call_function_single(ti->cp_owner_cpu, + local_coprocessor_release_all, + ti, true); + } } -void coprocessor_flush_all(struct thread_info *ti) +static void local_coprocessor_flush_all(void *info) { - unsigned long cpenable, old_cpenable; + struct thread_info *ti = info; + struct thread_info **coprocessor_owner; + unsigned long old_cpenable; int i; - preempt_disable(); - - old_cpenable = xtensa_get_sr(cpenable); - cpenable = ti->cpenable; - xtensa_set_sr(cpenable, cpenable); + coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; + old_cpenable = xtensa_xsr(ti->cpenable, cpenable); for (i = 0; i < XCHAL_CP_MAX; i++) { - if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) + if (coprocessor_owner[i] == ti) coprocessor_flush(ti, i); - cpenable >>= 1; } xtensa_set_sr(old_cpenable, cpenable); +} + +void coprocessor_flush_all(struct thread_info *ti) +{ + if (ti->cpenable) { + /* pairs with memw (2) in fast_coprocessor */ + smp_rmb(); + smp_call_function_single(ti->cp_owner_cpu, + local_coprocessor_flush_all, + ti, true); + } +} - preempt_enable(); +static void local_coprocessor_flush_release_all(void *info) +{ + local_coprocessor_flush_all(info); + local_coprocessor_release_all(info); +} + +void coprocessor_flush_release_all(struct thread_info *ti) +{ + if (ti->cpenable) { + /* pairs with memw (2) in fast_coprocessor */ + smp_rmb(); + smp_call_function_single(ti->cp_owner_cpu, + local_coprocessor_flush_release_all, + ti, true); + } } #endif @@ -140,8 +203,7 @@ void flush_thread(void) { #if XTENSA_HAVE_COPROCESSORS struct thread_info *ti = current_thread_info(); - coprocessor_flush_all(ti); - coprocessor_release_all(ti); + coprocessor_flush_release_all(ti); #endif flush_ptrace_hw_breakpoint(current); } @@ -201,10 +263,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, - unsigned long thread_fn_arg, struct task_struct *p, - unsigned long tls) +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + unsigned long clone_flags = args->flags; + unsigned long usp_thread_fn = args->stack; + unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) @@ -224,7 +287,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, #error Unsupported Xtensa ABI #endif - if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + if (!args->fn) { struct pt_regs *regs = current_pt_regs(); unsigned long usp = usp_thread_fn ? usp_thread_fn : regs->areg[1]; @@ -232,10 +295,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_fork, 0x1); - /* This does not copy all the regs. - * In a bout of brilliance or madness, - * ARs beyond a0-a15 exist past the end of the struct. - */ *childregs = *regs; childregs->areg[1] = usp; childregs->areg[2] = 0; @@ -265,14 +324,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, childregs->wmask = 1; childregs->windowstart = 1; childregs->windowbase = 0; - } else { - int len = childregs->wmask & ~0xf; - memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], - ®s->areg[XCHAL_NUM_AREGS - len/4], len); } - childregs->syscall = regs->syscall; - if (clone_flags & CLONE_SETTLS) childregs->threadptr = tls; } else { @@ -286,15 +339,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, * Window underflow will load registers from the * spill slots on the stack on return from _switch_to. */ - SPILL_SLOT(childregs, 2) = usp_thread_fn; - SPILL_SLOT(childregs, 3) = thread_fn_arg; + SPILL_SLOT(childregs, 2) = (unsigned long)args->fn; + SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg; #elif defined(__XTENSA_CALL0_ABI__) /* * a12 = thread_fn, a13 = thread_fn arg. * _switch_to epilogue will load registers from the stack. */ - ((unsigned long *)p->thread.sp)[0] = usp_thread_fn; - ((unsigned long *)p->thread.sp)[1] = thread_fn_arg; + ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn; + ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg; #else #error Unsupported Xtensa ABI #endif diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index bb3f4797d212..f29477162ede 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -26,7 +26,6 @@ #include <linux/security.h> #include <linux/signal.h> #include <linux/smp.h> -#include <linux/tracehook.h> #include <linux/uaccess.h> #define CREATE_TRACE_POINTS @@ -172,8 +171,7 @@ static int tie_set(struct task_struct *target, #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessors before we overwrite them. */ - coprocessor_flush_all(ti); - coprocessor_release_all(ti); + coprocessor_flush_release_all(ti); ti->xtregs_cp.cp0 = newregs->cp0; ti->xtregs_cp.cp1 = newregs->cp1; ti->xtregs_cp.cp2 = newregs->cp2; @@ -226,12 +224,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) void user_enable_single_step(struct task_struct *child) { - child->ptrace |= PT_SINGLESTEP; + set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { - child->ptrace &= ~PT_SINGLESTEP; + clear_tsk_thread_flag(child, TIF_SINGLESTEP); } /* @@ -550,7 +548,7 @@ int do_syscall_trace_enter(struct pt_regs *regs) regs->areg[2] = -ENOSYS; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + ptrace_report_syscall_entry(regs)) { regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; return 0; @@ -583,5 +581,5 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c index 07e56e3a9a8b..8362388c8719 100644 --- a/arch/xtensa/kernel/s32c1i_selftest.c +++ b/arch/xtensa/kernel/s32c1i_selftest.c @@ -40,14 +40,13 @@ static inline int probed_compare_swap(int *v, int cmp, int set) /* Handle probed exception */ -static void __init do_probed_exception(struct pt_regs *regs, - unsigned long exccause) +static void __init do_probed_exception(struct pt_regs *regs) { if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ regs->pc += 3; /* skip the s32c1i instruction */ - rcw_exc = exccause; + rcw_exc = regs->exccause; } else { - do_unhandled(regs, exccause); + do_unhandled(regs); } } diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 8db20cfb44ab..9191738f9941 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -140,7 +140,7 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt); static int __init parse_tag_cmdline(const bp_tag_t* tag) { - strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); + strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); return 0; } @@ -230,7 +230,7 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(xtensa_dt_io_area, NULL); if (!command_line[0]) - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); } #endif /* CONFIG_USE_OF */ @@ -260,7 +260,7 @@ void __init init_arch(bp_tag_t *bp_start) #ifdef CONFIG_CMDLINE_BOOL if (!command_line[0]) - strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE); + strscpy(command_line, default_command_line, COMMAND_LINE_SIZE); #endif /* Early hook for platforms */ @@ -289,7 +289,7 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = command_line; platform_setup(cmdline_p); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); /* Reserve some memory regions */ @@ -349,7 +349,7 @@ void __init setup_arch(char **cmdline_p) #endif /* CONFIG_VECTORS_ADDR */ -#ifdef CONFIG_SMP +#ifdef CONFIG_SECONDARY_RESET_VECTOR mem_reserve(__pa(_SecondaryResetVector_text_start), __pa(_SecondaryResetVector_text_end)); #endif diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index f6c949895b3e..876d5df157ed 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -19,7 +19,7 @@ #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/personality.h> -#include <linux/tracehook.h> +#include <linux/resume_user_mode.h> #include <linux/sched/task_stack.h> #include <asm/ucontext.h> @@ -162,8 +162,7 @@ setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) return err; #if XTENSA_HAVE_COPROCESSORS - coprocessor_flush_all(ti); - coprocessor_release_all(ti); + coprocessor_flush_release_all(ti); err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp, sizeof (frame->xtregs.cp)); #endif @@ -473,7 +472,7 @@ static void do_signal(struct pt_regs *regs) /* Set up the stack frame */ ret = setup_frame(&ksig, sigmask_to_save(), regs); signal_setup_done(ret, &ksig, 0); - if (current->ptrace & PT_SINGLESTEP) + if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; @@ -499,7 +498,7 @@ static void do_signal(struct pt_regs *regs) /* If there's no signal to deliver, we just restore the saved mask. */ restore_saved_sigmask(); - if (current->ptrace & PT_SINGLESTEP) + if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; } @@ -511,5 +510,5 @@ void do_notify_resume(struct pt_regs *regs) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 1254da07ead1..4dc109dd6214 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -30,6 +30,7 @@ #include <linux/thread_info.h> #include <asm/cacheflush.h> +#include <asm/coprocessor.h> #include <asm/kdebug.h> #include <asm/mmu_context.h> #include <asm/mxregs.h> @@ -272,6 +273,12 @@ int __cpu_disable(void) */ set_cpu_online(cpu, false); +#if XTENSA_HAVE_COPROCESSORS + /* + * Flush coprocessor contexts that are active on the current CPU. + */ + local_coprocessors_flush_release_all(); +#endif /* * OK - migrate IRQs away from this CPU */ diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 201356faa7e6..b3c2450d6f23 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -58,6 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vmm; + struct vma_iterator vmi; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -79,15 +80,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vm_start_gap(vmm)) - return addr; + vma_iter_init(&vmi, current->mm, addr); + for_each_vma(vmi, vmm) { + /* At this point: (addr < vmm->vm_end). */ + if (addr + len <= vm_start_gap(vmm)) + break; + addr = vmm->vm_end; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr, pgoff); } + + if (TASK_SIZE - len < addr) + return -ENOMEM; + + return addr; } #endif diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile index 6713c65a25e1..b265e4bc16c2 100644 --- a/arch/xtensa/kernel/syscalls/Makefile +++ b/arch/xtensa/kernel/syscalls/Makefile @@ -2,8 +2,7 @@ kapi := arch/$(SRCARCH)/include/generated/asm uapi := arch/$(SRCARCH)/include/generated/uapi/asm -_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ - $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') +$(shell mkdir -p $(uapi) $(kapi)) syscall := $(src)/syscall.tbl syshdr := $(srctree)/scripts/syscallhdr.sh diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 3e3e1a506bed..52c94ab5c205 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -420,3 +420,4 @@ # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index e8ceb1528608..16b8a6273772 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -154,6 +154,7 @@ static void __init calibrate_ccount(void) cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); if (cpu) { clk = of_clk_get(cpu, 0); + of_node_put(cpu); if (!IS_ERR(clk)) { ccount_freq = clk_get_rate(clk); return; diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 4b4dbeb2d612..0c25e035ff10 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -48,25 +48,20 @@ * Machine specific interrupt handlers */ -extern void kernel_exception(void); -extern void user_exception(void); - -extern void fast_illegal_instruction_user(void); -extern void fast_syscall_user(void); -extern void fast_alloca(void); -extern void fast_unaligned(void); -extern void fast_second_level_miss(void); -extern void fast_store_prohibited(void); -extern void fast_coprocessor(void); - -extern void do_illegal_instruction (struct pt_regs*); -extern void do_interrupt (struct pt_regs*); -extern void do_nmi(struct pt_regs *); -extern void do_unaligned_user (struct pt_regs*); -extern void do_multihit (struct pt_regs*, unsigned long); -extern void do_page_fault (struct pt_regs*, unsigned long); -extern void do_debug (struct pt_regs*); -extern void system_call (struct pt_regs*); +static void do_illegal_instruction(struct pt_regs *regs); +static void do_div0(struct pt_regs *regs); +static void do_interrupt(struct pt_regs *regs); +#if XTENSA_FAKE_NMI +static void do_nmi(struct pt_regs *regs); +#endif +#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION +static void do_unaligned_user(struct pt_regs *regs); +#endif +static void do_multihit(struct pt_regs *regs); +#if XTENSA_HAVE_COPROCESSORS +static void do_coprocessor(struct pt_regs *regs); +#endif +static void do_debug(struct pt_regs *regs); /* * The vector table must be preceded by a save area (which @@ -78,7 +73,8 @@ extern void system_call (struct pt_regs*); #define USER 0x02 #define COPROCESSOR(x) \ -{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor } +{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\ +{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor } typedef struct { int cause; @@ -100,7 +96,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { #ifdef SUPPORT_WINDOWED { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, #endif -/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ +{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 }, /* EXCCAUSE_PRIVILEGED unhandled */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #ifdef CONFIG_XTENSA_UNALIGNED_USER @@ -110,21 +106,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, #endif #ifdef CONFIG_MMU -{ EXCCAUSE_ITLB_MISS, 0, do_page_fault }, -{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, +{ EXCCAUSE_ITLB_MISS, 0, do_page_fault }, +{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, +{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, +{ EXCCAUSE_DTLB_MISS, 0, do_page_fault }, +{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, +#endif /* CONFIG_MMU */ +#ifdef CONFIG_PFAULT { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, -{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, -/* EXCCAUSE_SIZE_RESTRICTION unhandled */ +{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, -{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, -{ EXCCAUSE_DTLB_MISS, 0, do_page_fault }, { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, -{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, -/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */ -{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, +{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, -#endif /* CONFIG_MMU */ +#endif /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ #if XTENSA_HAVE_COPROCESSOR(0) COPROCESSOR(0), @@ -179,7 +175,7 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) * Unhandled Exceptions. Kill user task or panic if in kernel space. */ -void do_unhandled(struct pt_regs *regs, unsigned long exccause) +void do_unhandled(struct pt_regs *regs) { __die_if_kernel("Caught unhandled exception - should not happen", regs, SIGKILL); @@ -189,7 +185,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause) "(pid = %d, pc = %#010lx) - should not happen\n" "\tEXCCAUSE is %ld\n", current->comm, task_pid_nr(current), regs->pc, - exccause); + regs->exccause); force_sig(SIGILL); } @@ -197,7 +193,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause) * Multi-hit exception. This if fatal! */ -void do_multihit(struct pt_regs *regs, unsigned long exccause) +static void do_multihit(struct pt_regs *regs) { die("Caught multihit exception", regs, SIGKILL); } @@ -206,8 +202,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) * IRQ handler. */ -extern void do_IRQ(int, struct pt_regs *); - #if XTENSA_FAKE_NMI #define IS_POW2(v) (((v) & ((v) - 1)) == 0) @@ -240,14 +234,10 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); DEFINE_PER_CPU(unsigned long, nmi_count); -void do_nmi(struct pt_regs *regs) +static void do_nmi(struct pt_regs *regs) { - struct pt_regs *old_regs; - - if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL) - trace_hardirqs_off(); + struct pt_regs *old_regs = set_irq_regs(regs); - old_regs = set_irq_regs(regs); nmi_enter(); ++*this_cpu_ptr(&nmi_count); check_valid_nmi(); @@ -257,7 +247,7 @@ void do_nmi(struct pt_regs *regs) } #endif -void do_interrupt(struct pt_regs *regs) +static void do_interrupt(struct pt_regs *regs) { static const unsigned int_level_mask[] = { 0, @@ -269,12 +259,9 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - struct pt_regs *old_regs; + struct pt_regs *old_regs = set_irq_regs(regs); unsigned unhandled = ~0u; - trace_hardirqs_off(); - - old_regs = set_irq_regs(regs); irq_enter(); for (;;) { @@ -306,13 +293,47 @@ void do_interrupt(struct pt_regs *regs) set_irq_regs(old_regs); } +static bool check_div0(struct pt_regs *regs) +{ + static const u8 pattern[] = {'D', 'I', 'V', '0'}; + const u8 *p; + u8 buf[5]; + + if (user_mode(regs)) { + if (copy_from_user(buf, (void __user *)regs->pc + 2, 5)) + return false; + p = buf; + } else { + p = (const u8 *)regs->pc + 2; + } + + return memcmp(p, pattern, sizeof(pattern)) == 0 || + memcmp(p + 1, pattern, sizeof(pattern)) == 0; +} + /* * Illegal instruction. Fatal if in kernel space. */ -void -do_illegal_instruction(struct pt_regs *regs) +static void do_illegal_instruction(struct pt_regs *regs) { +#ifdef CONFIG_USER_ABI_CALL0_PROBE + /* + * When call0 application encounters an illegal instruction fast + * exception handler will attempt to set PS.WOE and retry failing + * instruction. + * If we get here we know that that instruction is also illegal + * with PS.WOE set, so it's not related to the windowed option + * hence PS.WOE may be cleared. + */ + if (regs->pc == current_thread_info()->ps_woe_fix_addr) + regs->ps &= ~PS_WOE_MASK; +#endif + if (check_div0(regs)) { + do_div0(regs); + return; + } + __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL); /* If in user mode, send SIGILL signal to current process. */ @@ -322,6 +343,11 @@ do_illegal_instruction(struct pt_regs *regs) force_sig(SIGILL); } +static void do_div0(struct pt_regs *regs) +{ + __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL); + force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc); +} /* * Handle unaligned memory accesses from user space. Kill task. @@ -331,8 +357,7 @@ do_illegal_instruction(struct pt_regs *regs) */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION -void -do_unaligned_user (struct pt_regs *regs) +static void do_unaligned_user(struct pt_regs *regs) { __die_if_kernel("Unhandled unaligned exception in kernel", regs, SIGKILL); @@ -347,14 +372,20 @@ do_unaligned_user (struct pt_regs *regs) } #endif +#if XTENSA_HAVE_COPROCESSORS +static void do_coprocessor(struct pt_regs *regs) +{ + coprocessor_flush_release_all(current_thread_info()); +} +#endif + /* Handle debug events. * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with * preemption disabled to avoid rescheduling and keep mapping of hardware * breakpoint structures to debug registers intact, so that * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit. */ -void -do_debug(struct pt_regs *regs) +static void do_debug(struct pt_regs *regs) { #ifdef CONFIG_HAVE_HW_BREAKPOINT int ret = check_hw_breakpoint(regs); @@ -381,7 +412,8 @@ do_debug(struct pt_regs *regs) /* Set exception C handler - for temporary use when probing exceptions */ -void * __init trap_set_handler(int cause, void *handler) +xtensa_exception_handler * +__init trap_set_handler(int cause, xtensa_exception_handler *handler) { void *previous = per_cpu(exc_table, 0).default_handler[cause]; @@ -392,8 +424,7 @@ void * __init trap_set_handler(int cause, void *handler) static void trap_init_excsave(void) { - unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); - __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); + xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1); } static void trap_init_debug(void) @@ -552,5 +583,5 @@ void __noreturn die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(err); + make_task_dead(err); } diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 407ece204e7c..1073fe4a584d 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -88,7 +88,7 @@ ENDPROC(_UserExceptionVector) * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * * We get this exception when we were already in kernel space. - * We decrement the current stack pointer (kernel) by PT_SIZE and + * We decrement the current stack pointer (kernel) by PT_KERNEL_SIZE and * jump to the first-level handler associated with the exception cause. * * Note: we need to preserve space for the spill region. @@ -100,7 +100,7 @@ ENTRY(_KernelExceptionVector) xsr a3, excsave1 # save a3, and get dispatch table wsr a2, depc # save a2 - addi a2, a1, -16-PT_SIZE # adjust stack pointer + addi a2, a1, -16 - PT_KERNEL_SIZE # adjust stack pointer s32i a0, a2, PT_AREG0 # save a0 to ESF rsr a0, exccause # retrieve exception cause s32i a0, a2, PT_DEPC # mark it as a regular exception diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index eee270a039a4..965a3952c47b 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -207,7 +207,7 @@ SECTIONS RELOCATE_ENTRY(_xip_data, .data); RELOCATE_ENTRY(_xip_init_data, .init.data); #endif -#if defined(CONFIG_SMP) +#if defined(CONFIG_SECONDARY_RESET_VECTOR) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); #endif @@ -303,7 +303,7 @@ SECTIONS #define LAST .DoubleExceptionVector.text #endif -#if defined(CONFIG_SMP) +#if defined(CONFIG_SECONDARY_RESET_VECTOR) SECTION_VECTOR4 (_SecondaryResetVector_text, .SecondaryResetVector.text, diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index d79edbb98d2a..b0bc8897c924 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -59,32 +59,18 @@ extern long long __ashldi3(long long, int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); -extern long long __muldi3(long long, long long); extern int __mulsi3(int, int); extern unsigned int __udivsi3(unsigned int, unsigned int); extern unsigned int __umodsi3(unsigned int, unsigned int); -extern unsigned long long __umoddi3(unsigned long long, unsigned long long); -extern unsigned long long __udivdi3(unsigned long long, unsigned long long); -extern int __ucmpdi2(int, int); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); -EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); -EXPORT_SYMBOL(__udivdi3); -EXPORT_SYMBOL(__umoddi3); -EXPORT_SYMBOL(__ucmpdi2); - -void __xtensa_libgcc_window_spill(void) -{ - BUG(); -} -EXPORT_SYMBOL(__xtensa_libgcc_window_spill); unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { |