aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig75
-rw-r--r--arch/xtensa/Makefile14
-rw-r--r--arch/xtensa/boot/dts/Makefile5
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi8
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi8
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi4
-rw-r--r--arch/xtensa/boot/lib/Makefile3
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig2
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig2
-rw-r--r--arch/xtensa/configs/virt_defconfig2
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig2
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h71
-rw-r--r--arch/xtensa/include/asm/asmmacro.h34
-rw-r--r--arch/xtensa/include/asm/barrier.h12
-rw-r--r--arch/xtensa/include/asm/bitops.h11
-rw-r--r--arch/xtensa/include/asm/bootparam.h2
-rw-r--r--arch/xtensa/include/asm/coprocessor.h11
-rw-r--r--arch/xtensa/include/asm/core.h7
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/dma.h7
-rw-r--r--arch/xtensa/include/asm/elf.h24
-rw-r--r--arch/xtensa/include/asm/io.h3
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h9
-rw-r--r--arch/xtensa/include/asm/pci.h3
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h24
-rw-r--r--arch/xtensa/include/asm/processor.h30
-rw-r--r--arch/xtensa/include/asm/ptrace.h7
-rw-r--r--arch/xtensa/include/asm/sections.h2
-rw-r--r--arch/xtensa/include/asm/stacktrace.h8
-rw-r--r--arch/xtensa/include/asm/thread_info.h14
-rw-r--r--arch/xtensa/include/asm/timex.h6
-rw-r--r--arch/xtensa/include/asm/traps.h40
-rw-r--r--arch/xtensa/include/asm/uaccess.h26
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h4
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h4
-rw-r--r--arch/xtensa/include/uapi/asm/shmbuf.h5
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h2
-rw-r--r--arch/xtensa/include/uapi/asm/termbits.h221
-rw-r--r--arch/xtensa/kernel/Makefile8
-rw-r--r--arch/xtensa/kernel/asm-offsets.c24
-rw-r--r--arch/xtensa/kernel/coprocessor.S234
-rw-r--r--arch/xtensa/kernel/entry.S376
-rw-r--r--arch/xtensa/kernel/hibernate.c25
-rw-r--r--arch/xtensa/kernel/irq.c10
-rw-r--r--arch/xtensa/kernel/jump_label.c4
-rw-r--r--arch/xtensa/kernel/mxhead.S2
-rw-r--r--arch/xtensa/kernel/process.c139
-rw-r--r--arch/xtensa/kernel/ptrace.c12
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c7
-rw-r--r--arch/xtensa/kernel/setup.c10
-rw-r--r--arch/xtensa/kernel/signal.c11
-rw-r--r--arch/xtensa/kernel/smp.c7
-rw-r--r--arch/xtensa/kernel/syscall.c18
-rw-r--r--arch/xtensa/kernel/syscalls/Makefile3
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/xtensa/kernel/time.c1
-rw-r--r--arch/xtensa/kernel/traps.c145
-rw-r--r--arch/xtensa/kernel/vectors.S4
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c14
-rw-r--r--arch/xtensa/lib/Makefile4
-rw-r--r--arch/xtensa/lib/ashldi3.S28
-rw-r--r--arch/xtensa/lib/ashrdi3.S28
-rw-r--r--arch/xtensa/lib/divsi3.S74
-rw-r--r--arch/xtensa/lib/kcsan-stubs.c54
-rw-r--r--arch/xtensa/lib/lshrdi3.S28
-rw-r--r--arch/xtensa/lib/memcopy.S20
-rw-r--r--arch/xtensa/lib/modsi3.S87
-rw-r--r--arch/xtensa/lib/mulsi3.S133
-rw-r--r--arch/xtensa/lib/udivsi3.S68
-rw-r--r--arch/xtensa/lib/umodsi3.S57
-rw-r--r--arch/xtensa/mm/Makefile3
-rw-r--r--arch/xtensa/mm/fault.c133
-rw-r--r--arch/xtensa/mm/init.c22
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/mm/tlb.c6
-rw-r--r--arch/xtensa/platforms/iss/console.c8
-rw-r--r--arch/xtensa/platforms/iss/network.c219
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c30
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c1
86 files changed, 1716 insertions, 1073 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8ac599aa6d99..bcb0c5d2abc2 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -3,7 +3,11 @@ config XTENSA
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT if !MMU
+ select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT if MMU
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_KCOV
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
select ARCH_HAS_DMA_SET_UNCACHED if MMU
@@ -17,32 +21,37 @@ config XTENSA
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_REMAP if MMU
+ select DMA_NONCOHERENT_MMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
+ select GENERIC_LIB_CMPDI2
+ select GENERIC_LIB_MULDI3
+ select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
+ select HAVE_ARCH_KCSAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
+ select HAVE_GCC_PLUGINS if GCC_VERSION >= 120000
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC
- select SET_FS
select TRACE_IRQFLAGS_SUPPORT
- select VIRT_TO_BUS
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
@@ -75,6 +84,7 @@ config STACKTRACE_SUPPORT
config MMU
def_bool n
+ select PFAULT
config HAVE_XTENSA_GPIO32
def_bool n
@@ -89,6 +99,9 @@ config CPU_BIG_ENDIAN
config CPU_LITTLE_ENDIAN
def_bool !CPU_BIG_ENDIAN
+config CC_HAVE_CALL0_ABI
+ def_bool $(success,test "$(shell,echo __XTENSA_CALL0_ABI__ | $(CC) -mabi=call0 -E -P - 2>/dev/null)" = 1)
+
menu "Processor type and features"
choice
@@ -171,6 +184,16 @@ config XTENSA_FAKE_NMI
If unsure, say N.
+config PFAULT
+ bool "Handle protection faults" if EXPERT && !MMU
+ default y
+ help
+ Handle protection faults. MMU configurations must enable it.
+ noMMU configurations may disable it if used memory map never
+ generates protection faults or faults are always fatal.
+
+ If unsure, say Y.
+
config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in user space"
help
@@ -221,6 +244,15 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
+config SECONDARY_RESET_VECTOR
+ bool "Secondary cores use alternative reset vector"
+ default y
+ depends on HAVE_SMP
+ help
+ Secondary cores may be configured to use alternative reset vector,
+ or all cores may use primary reset vector.
+ Say Y here to supply handler for the alternative reset location.
+
config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls"
default n
@@ -247,6 +279,38 @@ config FAST_SYSCALL_SPILL_REGISTERS
If unsure, say N.
+choice
+ prompt "Kernel ABI"
+ default KERNEL_ABI_DEFAULT
+ help
+ Select ABI for the kernel code. This ABI is independent of the
+ supported userspace ABI and any combination of the
+ kernel/userspace ABI is possible and should work.
+
+ In case both kernel and userspace support only call0 ABI
+ all register windows support code will be omitted from the
+ build.
+
+ If unsure, choose the default ABI.
+
+config KERNEL_ABI_DEFAULT
+ bool "Default ABI"
+ help
+ Select this option to compile kernel code with the default ABI
+ selected for the toolchain.
+ Normally cores with windowed registers option use windowed ABI and
+ cores without it use call0 ABI.
+
+config KERNEL_ABI_CALL0
+ bool "Call0 ABI" if CC_HAVE_CALL0_ABI
+ help
+ Select this option to compile kernel code with call0 ABI even with
+ toolchain that defaults to windowed ABI.
+ When this option is not selected the default toolchain ABI will
+ be used for the kernel code.
+
+endchoice
+
config USER_ABI_CALL0
bool
@@ -707,7 +771,7 @@ config HIGHMEM
If unsure, say Y.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "11"
help
@@ -725,6 +789,9 @@ endmenu
menu "Power management options"
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y
+
source "kernel/power/Kconfig"
endmenu
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 9778216d6e09..bfd8e433ed62 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -12,7 +12,7 @@
# Core configuration.
# (Use VAR=<xtensa_config> to use another default compiler.)
-variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME))
+variant-y := $(CONFIG_XTENSA_VARIANT_NAME)
VARIANT = $(variant-y)
@@ -35,6 +35,10 @@ KBUILD_CFLAGS += -ffreestanding -D__linux__
KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
+ifneq ($(CONFIG_KERNEL_ABI_CALL0),)
+KBUILD_CFLAGS += -mabi=call0
+KBUILD_AFLAGS += -mabi=call0
+endif
KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
@@ -51,13 +55,7 @@ KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
KBUILD_DEFCONFIG := iss_defconfig
-# Find libgcc.a
-
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
-head-y := arch/xtensa/kernel/head.o
-
-libs-y += arch/xtensa/lib/ $(LIBGCC)
+libs-y += arch/xtensa/lib/
boot := arch/xtensa/boot
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index 0b8d00cdae7c..720628c0d8b9 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,10 +7,7 @@
#
#
-BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
-endif
+obj-$(CONFIG_OF) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
# for CONFIG_OF_ALL_DTBS test
dtstree := $(srctree)/$(src)
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
index 9bf8bad1dd18..c33932568aa7 100644
--- a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
@@ -8,19 +8,19 @@
reg = <0x00000000 0x08000000>;
bank-width = <2>;
device-width = <2>;
- partition@0x0 {
+ partition@0 {
label = "data";
reg = <0x00000000 0x06000000>;
};
- partition@0x6000000 {
+ partition@6000000 {
label = "boot loader area";
reg = <0x06000000 0x00800000>;
};
- partition@0x6800000 {
+ partition@6800000 {
label = "kernel image";
reg = <0x06800000 0x017e0000>;
};
- partition@0x7fe0000 {
+ partition@7fe0000 {
label = "boot environment";
reg = <0x07fe0000 0x00020000>;
};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
index 40c2f81f7cb6..7bde2ab2d6fb 100644
--- a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
@@ -8,19 +8,19 @@
reg = <0x08000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
- partition@0x0 {
+ partition@0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
- partition@0x400000 {
+ partition@400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
- partition@0xa00000 {
+ partition@a00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
- partition@0xfe0000 {
+ partition@fe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
index fb8d3a9f33c2..0655b868749a 100644
--- a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
@@ -8,11 +8,11 @@
reg = <0x08000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
- partition@0x0 {
+ partition@0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
- partition@0x3f0000 {
+ partition@3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index e3d717c7bfa1..0378a22a08e3 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -16,6 +16,9 @@ CFLAGS_REMOVE_inffast.o = -pg
endif
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 3be62da8089b..ef0ebcfbccf9 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -120,7 +120,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index fc240737b14d..2665962d247a 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -100,7 +100,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index e9d6b6f6eca1..236c7f23cc10 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -107,7 +107,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index fcb620ef3799..8263da9e078d 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -105,7 +105,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
# CONFIG_FRAME_POINTER is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_VM=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index a47c85638ec1..7bdffa3a69c6 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -111,7 +111,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_VM=y
CONFIG_LOCKUP_DETECTOR=y
diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig
index 6d1387dfa96f..98acb7191cb7 100644
--- a/arch/xtensa/configs/virt_defconfig
+++ b/arch/xtensa/configs/virt_defconfig
@@ -97,7 +97,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index 062148e17135..1c3cebaaa71b 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -102,7 +102,7 @@ CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 854c5e07e867..fa07c686cbcc 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += param.h
+generic-y += parport.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += user.h
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
index 7f6cf4151843..7cec869136e3 100644
--- a/arch/xtensa/include/asm/asm-uaccess.h
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -24,76 +24,6 @@
#include <asm/processor.h>
/*
- * These assembly macros mirror the C macros in asm/uaccess.h. They
- * should always have identical functionality. See
- * arch/xtensa/kernel/sys.S for usage.
- */
-
-#define KERNEL_DS 0
-#define USER_DS 1
-
-/*
- * get_fs reads current->thread.current_ds into a register.
- * On Entry:
- * <ad> anything
- * <sp> stack
- * On Exit:
- * <ad> contains current->thread.current_ds
- */
- .macro get_fs ad, sp
- GET_CURRENT(\ad,\sp)
-#if THREAD_CURRENT_DS > 1020
- addi \ad, \ad, TASK_THREAD
- l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
-#else
- l32i \ad, \ad, THREAD_CURRENT_DS
-#endif
- .endm
-
-/*
- * set_fs sets current->thread.current_ds to some value.
- * On Entry:
- * <at> anything (temp register)
- * <av> value to write
- * <sp> stack
- * On Exit:
- * <at> destroyed (actually, current)
- * <av> preserved, value to write
- */
- .macro set_fs at, av, sp
- GET_CURRENT(\at,\sp)
- s32i \av, \at, THREAD_CURRENT_DS
- .endm
-
-/*
- * kernel_ok determines whether we should bypass addr/size checking.
- * See the equivalent C-macro version below for clarity.
- * On success, kernel_ok branches to a label indicated by parameter
- * <success>. This implies that the macro falls through to the next
- * insruction on an error.
- *
- * Note that while this macro can be used independently, we designed
- * in for optimal use in the access_ok macro below (i.e., we fall
- * through on error).
- *
- * On Entry:
- * <at> anything (temp register)
- * <success> label to branch to on success; implies
- * fall-through macro on error
- * <sp> stack pointer
- * On Exit:
- * <at> destroyed (actually, current->thread.current_ds)
- */
-
-#if ((KERNEL_DS != 0) || (USER_DS == 0))
-# error Assembly macro kernel_ok fails
-#endif
- .macro kernel_ok at, sp, success
- get_fs \at, \sp
- beqz \at, \success
- .endm
-
-/*
* user_ok determines whether the access to user-space memory is allowed.
* See the equivalent C-macro version below for clarity.
*
@@ -147,7 +77,6 @@
* <at> destroyed
*/
.macro access_ok aa, as, at, sp, error
- kernel_ok \at, \sp, .Laccess_ok_\@
user_ok \aa, \as, \at, \error
.Laccess_ok_\@:
.endm
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 809c507d1825..e3474ca411ff 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -191,7 +191,39 @@
#endif
.endm
-#define XTENSA_STACK_ALIGNMENT 16
+ .macro do_nsau cnt, val, tmp, a
+#if XCHAL_HAVE_NSA
+ nsau \cnt, \val
+#else
+ mov \a, \val
+ movi \cnt, 0
+ extui \tmp, \a, 16, 16
+ bnez \tmp, 0f
+ movi \cnt, 16
+ slli \a, \a, 16
+0:
+ extui \tmp, \a, 24, 8
+ bnez \tmp, 1f
+ addi \cnt, \cnt, 8
+ slli \a, \a, 8
+1:
+ movi \tmp, __nsau_data
+ extui \a, \a, 24, 8
+ add \tmp, \tmp, \a
+ l8ui \tmp, \tmp, 0
+ add \cnt, \cnt, \tmp
+#endif /* !XCHAL_HAVE_NSA */
+ .endm
+
+ .macro do_abs dst, src, tmp
+#if XCHAL_HAVE_ABS
+ abs \dst, \src
+#else
+ neg \tmp, \src
+ movgez \tmp, \src, \src
+ mov \dst, \tmp
+#endif
+ .endm
#if defined(__XTENSA_WINDOWED_ABI__)
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index d6f8d4ddc2bc..898ea397e9bc 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -11,9 +11,15 @@
#include <asm/core.h>
-#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
-#define rmb() barrier()
-#define wmb() mb()
+#define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define __rmb() barrier()
+#define __wmb() __mb()
+
+#ifdef CONFIG_SMP
+#define __smp_mb() __mb()
+#define __smp_rmb() __rmb()
+#define __smp_wmb() __wmb()
+#endif
#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier()
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 3f71d364ba90..e02ec5833389 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
#define BIT_OP(op, insn, inv) \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
@@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
#elif XCHAL_HAVE_S32C1I
#define BIT_OP(op, insn, inv) \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@@ -205,7 +205,8 @@ BIT_OPS(change, "xor", )
#undef BIT_OP
#undef TEST_AND_BIT_OP
-#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
index 892aab399ac8..6333bd1eb9d2 100644
--- a/arch/xtensa/include/asm/bootparam.h
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -34,7 +34,7 @@
typedef struct bp_tag {
unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/
- unsigned long data[0]; /* data */
+ unsigned long data[]; /* data */
} bp_tag_t;
struct bp_meminfo {
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 0fbe2a740b8d..3b1a0d5d2169 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -142,11 +142,12 @@ typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
-extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
-extern void coprocessor_flush(struct thread_info*, int);
-
-extern void coprocessor_release_all(struct thread_info*);
-extern void coprocessor_flush_all(struct thread_info*);
+struct thread_info;
+void coprocessor_flush(struct thread_info *ti, int cp_index);
+void coprocessor_release_all(struct thread_info *ti);
+void coprocessor_flush_all(struct thread_info *ti);
+void coprocessor_flush_release_all(struct thread_info *ti);
+void local_coprocessors_flush_release_all(void);
#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 9138077e567d..f856d2bcb9f3 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -37,4 +37,11 @@
#endif
#endif
+/* Xtensa ABI requires stack alignment to be at least 16 */
+#if XCHAL_DATA_WIDTH > 16
+#define XTENSA_STACK_ALIGNMENT XCHAL_DATA_WIDTH
+#else
+#define XTENSA_STACK_ALIGNMENT 16
+#endif
+
#endif
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 5d98a7ad4251..08010dbf5e09 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -26,6 +26,8 @@ static inline struct task_struct *get_current(void)
#define current get_current()
+register unsigned long current_stack_pointer __asm__("a1");
+
#else
#define GET_CURRENT(reg,sp) \
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
index bb099a373b5a..172644539032 100644
--- a/arch/xtensa/include/asm/dma.h
+++ b/arch/xtensa/include/asm/dma.h
@@ -52,11 +52,4 @@
extern int request_dma(unsigned int dmanr, const char * device_id);
extern void free_dma(unsigned int dmanr);
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-
#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index 909a6ab4f22b..ffcf1ada19c6 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -93,6 +93,10 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
( (x)->e_machine == EM_XTENSA_OLD ) )
+#define ELFOSABI_XTENSA_FDPIC 65
+#define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC)
+#define ELF_FDPIC_CORE_EFLAGS 0
+
/*
* These are used to set parameters in the core dumps.
*/
@@ -153,10 +157,22 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
*/
#define ELF_PLAT_INIT(_r, load_addr) \
- do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
- _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
- _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
- _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ do { \
+ (_r)->areg[0] = 0; /*(_r)->areg[1] = 0;*/ \
+ (_r)->areg[2] = 0; (_r)->areg[3] = 0; \
+ (_r)->areg[4] = 0; (_r)->areg[5] = 0; \
+ (_r)->areg[6] = 0; (_r)->areg[7] = 0; \
+ (_r)->areg[8] = 0; (_r)->areg[9] = 0; \
+ (_r)->areg[10] = 0; (_r)->areg[11] = 0; \
+ (_r)->areg[12] = 0; (_r)->areg[13] = 0; \
+ (_r)->areg[14] = 0; (_r)->areg[15] = 0; \
+ } while (0)
+
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->areg[4] = _exec_map_addr; \
+ (_r)->areg[5] = _interp_map_addr; \
+ (_r)->areg[6] = dynamic_addr; \
} while (0)
typedef struct {
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 54188e69b988..a5b707e1c0f4 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -63,9 +63,6 @@ static inline void iounmap(volatile void __iomem *addr)
xtensa_iounmap(addr);
}
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
#endif /* CONFIG_MMU */
#include <asm-generic/io.h>
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 405526912d9a..e320aa5bbedb 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -73,13 +73,4 @@ static inline void pcibios_init_resource(struct resource *res,
res->child = NULL;
}
-
-/* These are used for config access before all the PCI probing has been done. */
-int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
-int early_read_config_word(struct pci_controller*, int, int, int, u16*);
-int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
-int early_write_config_byte(struct pci_controller*, int, int, int, u8);
-int early_write_config_word(struct pci_controller*, int, int, int, u16);
-int early_write_config_dword(struct pci_controller*, int, int, int, u32);
-
#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 8e2b48a268db..b56de9635b6c 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -43,7 +43,4 @@
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
#define arch_can_pci_mmap_io() 1
-/* Generic PCI */
-#include <asm-generic/pci.h>
-
#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index eeb2de3a89e5..7fc0f9126dd3 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+ return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void ptes_clear(pte_t *ptep)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index bd5aeb795567..54f577c13afa 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -57,7 +57,6 @@
#define PTRS_PER_PTE 1024
#define PTRS_PER_PTE_SHIFT 10
#define PTRS_PER_PGD 1024
-#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
@@ -200,24 +199,6 @@
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
-#define __P000 PAGE_NONE /* private --- */
-#define __P001 PAGE_READONLY /* private --r */
-#define __P010 PAGE_COPY /* private -w- */
-#define __P011 PAGE_COPY /* private -wr */
-#define __P100 PAGE_READONLY_EXEC /* private x-- */
-#define __P101 PAGE_READONLY_EXEC /* private x-r */
-#define __P110 PAGE_COPY_EXEC /* private xw- */
-#define __P111 PAGE_COPY_EXEC /* private xwr */
-
-#define __S000 PAGE_NONE /* shared --- */
-#define __S001 PAGE_READONLY /* shared --r */
-#define __S010 PAGE_SHARED /* shared -w- */
-#define __S011 PAGE_SHARED /* shared -wr */
-#define __S100 PAGE_READONLY_EXEC /* shared x-- */
-#define __S101 PAGE_READONLY_EXEC /* shared x-r */
-#define __S110 PAGE_SHARED_EXEC /* shared xw- */
-#define __S111 PAGE_SHARED_EXEC /* shared xwr */
-
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
@@ -241,6 +222,7 @@ static inline void paging_init(void) { }
* The pmd contains the kernel virtual address of the pte page.
*/
#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
/*
@@ -411,6 +393,10 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
typedef pte_t *pte_addr_t;
+void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+
#endif /* !defined (__ASSEMBLY__) */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 37d3e9887fe7..228e4dff5fb2 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -18,11 +18,7 @@
#include <asm/types.h>
#include <asm/regs.h>
-/* Xtensa ABI requires stack alignment to be at least 16 */
-
-#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
-
-#define ARCH_SLAB_MINALIGN STACK_ALIGN
+#define ARCH_SLAB_MINALIGN XTENSA_STACK_ALIGNMENT
/*
* User space process size: 1 GB.
@@ -152,18 +148,12 @@
*/
#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
struct thread_struct {
/* kernel's return address and stack pointer for context switching */
unsigned long ra; /* kernel's a0: return address and window call size */
unsigned long sp; /* kernel's a1: stack pointer */
- mm_segment_t current_ds; /* see uaccess.h for example uses */
-
/* struct xtensa_cpuinfo info; */
unsigned long bad_vaddr; /* last user fault */
@@ -186,7 +176,6 @@ struct thread_struct {
{ \
ra: 0, \
sp: sizeof(init_stack) + (long) &init_stack, \
- current_ds: {0}, \
/*info: {0}, */ \
bad_vaddr: 0, \
bad_uaddr: 0, \
@@ -216,9 +205,12 @@ struct thread_struct {
#define start_thread(regs, new_pc, new_sp) \
do { \
unsigned long syscall = (regs)->syscall; \
+ unsigned long current_aregs[16]; \
+ memcpy(current_aregs, (regs)->areg, sizeof(current_aregs)); \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
+ memcpy((regs)->areg, current_aregs, sizeof(current_aregs)); \
(regs)->areg[1] = (new_sp); \
(regs)->areg[0] = 0; \
(regs)->wmask = 1; \
@@ -232,9 +224,6 @@ struct thread_struct {
struct task_struct;
struct mm_struct;
-/* Free all resources held by a thread. */
-#define release_thread(thread) do { } while(0)
-
extern unsigned long __get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
@@ -246,8 +235,8 @@ extern unsigned long __get_wchan(struct task_struct *p);
#define xtensa_set_sr(x, sr) \
({ \
- unsigned int v = (unsigned int)(x); \
- __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
+ __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \
+ "a"((unsigned int)(x))); \
})
#define xtensa_get_sr(sr) \
@@ -257,6 +246,13 @@ extern unsigned long __get_wchan(struct task_struct *p);
v; \
})
+#define xtensa_xsr(x, sr) \
+ ({ \
+ unsigned int __v__ = (unsigned int)(x); \
+ __asm__ __volatile__ ("xsr %0, " __stringify(sr) : "+a"(__v__)); \
+ __v__; \
+ })
+
#if XCHAL_HAVE_EXTERN_REGS
static inline void set_er(unsigned long value, unsigned long addr)
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index b109416dc07e..308f209a4740 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
+#include <asm/core.h>
/*
* This struct defines the way the registers are stored on the
@@ -77,14 +78,12 @@ struct pt_regs {
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
- unsigned long areg[16];
+ unsigned long areg[XCHAL_NUM_AREGS];
};
-#include <asm/core.h>
-
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
- (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h
index a8c42d08e281..3bc6b9afa993 100644
--- a/arch/xtensa/include/asm/sections.h
+++ b/arch/xtensa/include/asm/sections.h
@@ -29,7 +29,7 @@ extern char _Level5InterruptVector_text_end[];
extern char _Level6InterruptVector_text_start[];
extern char _Level6InterruptVector_text_end[];
#endif
-#ifdef CONFIG_SMP
+#ifdef CONFIG_SECONDARY_RESET_VECTOR
extern char _SecondaryResetVector_text_start[];
extern char _SecondaryResetVector_text_end[];
#endif
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
index fe06e8ed162b..a85e785a6288 100644
--- a/arch/xtensa/include/asm/stacktrace.h
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -19,14 +19,14 @@ struct stackframe {
static __always_inline unsigned long *stack_pointer(struct task_struct *task)
{
- unsigned long *sp;
+ unsigned long sp;
if (!task || task == current)
- __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ sp = current_stack_pointer;
else
- sp = (unsigned long *)task->thread.sp;
+ sp = task->thread.sp;
- return sp;
+ return (unsigned long *)sp;
}
void walk_stackframe(unsigned long *sp,
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index a312333a9add..326db1c1d5d8 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -52,14 +52,21 @@ struct thread_info {
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
- mm_segment_t addr_limit; /* thread address space */
-
- unsigned long cpenable;
#if XCHAL_HAVE_EXCLUSIVE
/* result of the most recent exclusive store */
unsigned long atomctl8;
#endif
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /* Address where PS.WOE was enabled by the ABI probing code */
+ unsigned long ps_woe_fix_addr;
+#endif
+ /*
+ * If i-th bit is set then coprocessor state is loaded into the
+ * coprocessor i on CPU cp_owner_cpu.
+ */
+ unsigned long cpenable;
+ u32 cp_owner_cpu;
/* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp;
@@ -81,7 +88,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
/* how to get the thread information struct from C */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 233ec75e60c6..3f2462f2d027 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -29,10 +29,6 @@
extern unsigned long ccount_freq;
-typedef unsigned long long cycles_t;
-
-#define get_cycles() (0)
-
void local_timer_setup(unsigned cpu);
/*
@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
}
+#include <asm-generic/timex.h>
+
#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 6fa47cd8e02d..6f74ccc0c7ea 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -12,6 +12,8 @@
#include <asm/ptrace.h>
+typedef void xtensa_exception_handler(struct pt_regs *regs);
+
/*
* Per-CPU exception handling data structure.
* EXCSAVE1 points to it.
@@ -25,31 +27,47 @@ struct exc_table {
void *fixup;
/* For passing a parameter to fixup */
void *fixup_param;
+#if XTENSA_HAVE_COPROCESSORS
+ /* Pointers to owner struct thread_info */
+ struct thread_info *coprocessor_owner[XCHAL_CP_MAX];
+#endif
/* Fast user exception handlers */
void *fast_user_handler[EXCCAUSE_N];
/* Fast kernel exception handlers */
void *fast_kernel_handler[EXCCAUSE_N];
/* Default C-Handlers */
- void *default_handler[EXCCAUSE_N];
+ xtensa_exception_handler *default_handler[EXCCAUSE_N];
};
-/*
- * handler must be either of the following:
- * void (*)(struct pt_regs *regs);
- * void (*)(struct pt_regs *regs, unsigned long exccause);
- */
-extern void * __init trap_set_handler(int cause, void *handler);
-extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
-void fast_second_level_miss(void);
+DECLARE_PER_CPU(struct exc_table, exc_table);
+
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler);
+
+asmlinkage void fast_illegal_instruction_user(void);
+asmlinkage void fast_syscall_user(void);
+asmlinkage void fast_alloca(void);
+asmlinkage void fast_unaligned(void);
+asmlinkage void fast_second_level_miss(void);
+asmlinkage void fast_store_prohibited(void);
+asmlinkage void fast_coprocessor(void);
+
+asmlinkage void kernel_exception(void);
+asmlinkage void user_exception(void);
+asmlinkage void system_call(struct pt_regs *regs);
+
+void do_IRQ(int hwirq, struct pt_regs *regs);
+void do_page_fault(struct pt_regs *regs);
+void do_unhandled(struct pt_regs *regs);
/* Initialize minimal exc_table structure sufficient for basic paging */
static inline void __init early_trap_init(void)
{
- static struct exc_table exc_table __initdata = {
+ static struct exc_table init_exc_table __initdata = {
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
};
- __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+ xtensa_set_sr(&init_exc_table, excsave1);
}
void secondary_trap_init(void);
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 75bd8fbf52ba..56aec6d504fe 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -19,31 +19,7 @@
#include <linux/prefetch.h>
#include <asm/types.h>
#include <asm/extable.h>
-
-/*
- * The fs value determines whether argument validity checking should
- * be performed or not. If get_fs() == USER_DS, checking is
- * performed, with get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons (Data Segment Register?), these macros are
- * grossly misnamed.
- */
-
-#define KERNEL_DS ((mm_segment_t) { 0 })
-#define USER_DS ((mm_segment_t) { 1 })
-
-#define get_fs() (current->thread.current_ds)
-#define set_fs(val) (current->thread.current_ds = (val))
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-#define __kernel_ok (uaccess_kernel())
-#define __user_ok(addr, size) \
- (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
-#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
-#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
-
-#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+#include <asm-generic/access_ok.h>
/*
* These are the main single-value transfer routines. They
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index b3a22095371b..1ff0c858544f 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -109,6 +109,10 @@
#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
index 50db3e0a6341..9115e86ebc75 100644
--- a/arch/xtensa/include/uapi/asm/ptrace.h
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -37,6 +37,10 @@
#define PTRACE_SETXTREGS 19
#define PTRACE_GETHBPREGS 20
#define PTRACE_SETHBPREGS 21
+#define PTRACE_GETFDPIC 22
+
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
#ifndef __ASSEMBLY__
diff --git a/arch/xtensa/include/uapi/asm/shmbuf.h b/arch/xtensa/include/uapi/asm/shmbuf.h
index 554a57a6a90f..bb8bdddae9b5 100644
--- a/arch/xtensa/include/uapi/asm/shmbuf.h
+++ b/arch/xtensa/include/uapi/asm/shmbuf.h
@@ -20,9 +20,12 @@
#ifndef _XTENSA_SHMBUF_H
#define _XTENSA_SHMBUF_H
+#include <asm/ipcbuf.h>
+#include <asm/posix_types.h>
+
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_size_t shm_segsz; /* size of segment (bytes) */
unsigned long shm_atime; /* last attach time */
unsigned long shm_atime_high;
unsigned long shm_dtime; /* last detach time */
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
index 79ddabaa4e5d..b8c824dd4b74 100644
--- a/arch/xtensa/include/uapi/asm/signal.h
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -103,7 +103,7 @@ struct sigaction {
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
- size_t ss_size;
+ __kernel_size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
diff --git a/arch/xtensa/include/uapi/asm/termbits.h b/arch/xtensa/include/uapi/asm/termbits.h
deleted file mode 100644
index d4206a7c5138..000000000000
--- a/arch/xtensa/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * include/asm-xtensa/termbits.h
- *
- * Copied from SH.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_TERMBITS_H
-#define _XTENSA_TERMBITS_H
-
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* _XTENSA_TERMBITS_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index d4082c6a121b..f28b8e3d717e 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -3,9 +3,9 @@
# Makefile for the Linux/Xtensa kernel.
#
-extra-y := head.o vmlinux.lds
+extra-y := vmlinux.lds
-obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \
+obj-y := head.o align.o coprocessor.o entry.o irq.o platform.o process.o \
ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
vectors.o
@@ -13,11 +13,13 @@ obj-$(CONFIG_MMU) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
-obj-$(CONFIG_SMP) += smp.o mxhead.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SECONDARY_RESET_VECTOR) += mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index dc5c83cad9be..da38de20ae59 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -21,6 +21,7 @@
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
+#include <linux/suspend.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
@@ -63,7 +64,7 @@ int main(void)
DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
- DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_KERNEL_SIZE, offsetof(struct pt_regs, areg[16]));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
@@ -87,15 +88,19 @@ int main(void)
OFFSET(TI_STSTUS, thread_info, status);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
- OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr);
+#endif
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
- DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XCHAL_HAVE_EXCLUSIVE
DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
#endif
+ DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable));
+ DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu));
#if XTENSA_HAVE_COPROCESSORS
DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
@@ -108,8 +113,6 @@ int main(void)
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
- DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
- thread.current_ds));
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
@@ -140,11 +143,22 @@ int main(void)
DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(EXC_TABLE_COPROCESSOR_OWNER,
+ offsetof(struct exc_table, coprocessor_owner));
+#endif
DEFINE(EXC_TABLE_FAST_USER,
offsetof(struct exc_table, fast_user_handler));
DEFINE(EXC_TABLE_FAST_KERNEL,
offsetof(struct exc_table, fast_kernel_handler));
DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+#ifdef CONFIG_HIBERNATION
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+#endif
+
return 0;
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 45cc0ae0af6f..ef33e76e07d8 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -19,6 +19,26 @@
#include <asm/current.h>
#include <asm/regs.h>
+/*
+ * Rules for coprocessor state manipulation on SMP:
+ *
+ * - a task may have live coprocessors only on one CPU.
+ *
+ * - whether coprocessor context of task T is live on some CPU is
+ * denoted by T's thread_info->cpenable.
+ *
+ * - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu
+ * is valid in the T's thread_info. Zero thread_info->cpenable means that
+ * coprocessor context is valid in the T's thread_info.
+ *
+ * - if a coprocessor context of task T is live on CPU X, only CPU X changes
+ * T's thread_info->cpenable, cp_owner_cpu and coprocessor save area.
+ * This is done by making sure that for the task T with live coprocessor
+ * on CPU X cpenable SR is 0 when T runs on any other CPU Y.
+ * When fast_coprocessor exception is taken on CPU Y it goes to the
+ * C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors.
+ */
+
#if XTENSA_HAVE_COPROCESSORS
/*
@@ -29,35 +49,31 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
- xchal_cp##x##_store a2 a4 a5 a6 a7; \
- jx a0; \
+ xchal_cp##x##_store a2 a3 a4 a5 a6; \
+ ret; \
.endif
-#define SAVE_CP_REGS_TAB(x) \
- .if XTENSA_HAVE_COPROCESSOR(x); \
- .long .Lsave_cp_regs_cp##x; \
- .else; \
- .long 0; \
- .endif; \
- .long THREAD_XTREGS_CP##x
-
-
#define LOAD_CP_REGS(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
- xchal_cp##x##_load a2 a4 a5 a6 a7; \
- jx a0; \
+ xchal_cp##x##_load a2 a3 a4 a5 a6; \
+ ret; \
.endif
-#define LOAD_CP_REGS_TAB(x) \
+#define CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x; \
.long .Lload_cp_regs_cp##x; \
.else; \
- .long 0; \
+ .long 0, 0; \
.endif; \
.long THREAD_XTREGS_CP##x
+#define CP_REGS_TAB_SAVE 0
+#define CP_REGS_TAB_LOAD 4
+#define CP_REGS_TAB_OFFSET 8
+
__XTENSA_HANDLER
SAVE_CP_REGS(0)
@@ -79,25 +95,15 @@
LOAD_CP_REGS(7)
.align 4
-.Lsave_cp_regs_jump_table:
- SAVE_CP_REGS_TAB(0)
- SAVE_CP_REGS_TAB(1)
- SAVE_CP_REGS_TAB(2)
- SAVE_CP_REGS_TAB(3)
- SAVE_CP_REGS_TAB(4)
- SAVE_CP_REGS_TAB(5)
- SAVE_CP_REGS_TAB(6)
- SAVE_CP_REGS_TAB(7)
-
-.Lload_cp_regs_jump_table:
- LOAD_CP_REGS_TAB(0)
- LOAD_CP_REGS_TAB(1)
- LOAD_CP_REGS_TAB(2)
- LOAD_CP_REGS_TAB(3)
- LOAD_CP_REGS_TAB(4)
- LOAD_CP_REGS_TAB(5)
- LOAD_CP_REGS_TAB(6)
- LOAD_CP_REGS_TAB(7)
+.Lcp_regs_jump_table:
+ CP_REGS_TAB(0)
+ CP_REGS_TAB(1)
+ CP_REGS_TAB(2)
+ CP_REGS_TAB(3)
+ CP_REGS_TAB(4)
+ CP_REGS_TAB(5)
+ CP_REGS_TAB(6)
+ CP_REGS_TAB(7)
/*
* Entry condition:
@@ -115,9 +121,37 @@
ENTRY(fast_coprocessor)
+ s32i a3, a2, PT_AREG3
+
+#ifdef CONFIG_SMP
+ /*
+ * Check if any coprocessor context is live on another CPU
+ * and if so go through the C-level coprocessor exception handler
+ * to flush it to memory.
+ */
+ GET_THREAD_INFO (a0, a2)
+ l32i a3, a0, THREAD_CPENABLE
+ beqz a3, .Lload_local
+
+ /*
+ * Pairs with smp_wmb in local_coprocessor_release_all
+ * and with both memws below.
+ */
+ memw
+ l32i a3, a0, THREAD_CPU
+ l32i a0, a0, THREAD_CP_OWNER_CPU
+ beq a0, a3, .Lload_local
+
+ rsr a0, ps
+ l32i a3, a2, PT_AREG3
+ bbci.l a0, PS_UM_BIT, 1f
+ call0 user_exception
+1: call0 kernel_exception
+#endif
+
/* Save remaining registers a1-a3 and SAR */
- s32i a3, a2, PT_AREG3
+.Lload_local:
rsr a3, sar
s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR
@@ -125,13 +159,15 @@ ENTRY(fast_coprocessor)
rsr a2, depc
s32i a2, a1, PT_AREG2
- /*
- * The hal macros require up to 4 temporary registers. We use a3..a6.
- */
+ /* The hal macros require up to 4 temporary registers. We use a3..a6. */
s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
@@ -148,58 +184,74 @@ ENTRY(fast_coprocessor)
wsr a0, cpenable
rsync
- /* Retrieve previous owner. (a3 still holds CP number) */
+ /* Get coprocessor save/load table entry (a7). */
- movi a0, coprocessor_owner # list of owners
- addx4 a0, a3, a0 # entry for CP
- l32i a4, a0, 0
+ movi a7, .Lcp_regs_jump_table
+ addx8 a7, a3, a7
+ addx4 a7, a3, a7
- beqz a4, 1f # skip 'save' if no previous owner
+ /* Retrieve previous owner (a8). */
- /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+ rsr a0, excsave1 # exc_table
+ addx4 a0, a3, a0 # entry for CP
+ l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER
+
+ /* Set new owner (a9). */
- l32i a5, a4, THREAD_CPENABLE
- xor a5, a5, a2 # (1 << cp-id) still in a2
- s32i a5, a4, THREAD_CPENABLE
+ GET_THREAD_INFO (a9, a1)
+ l32i a4, a9, THREAD_CPU
+ s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER
+ s32i a4, a9, THREAD_CP_OWNER_CPU
/*
- * Get context save area and 'call' save routine.
- * (a4 still holds previous owner (thread_info), a3 CP number)
+ * Enable coprocessor for the new owner. (a2 = 1 << CP number)
+ * This can be done before loading context into the coprocessor.
*/
+ l32i a4, a9, THREAD_CPENABLE
+ or a4, a4, a2
- movi a5, .Lsave_cp_regs_jump_table
- movi a0, 2f # a0: 'return' address
- addx8 a3, a3, a5 # a3: coprocessor number
- l32i a2, a3, 4 # a2: xtregs offset
- l32i a3, a3, 0 # a3: jump address
- add a2, a2, a4
- jx a3
+ /*
+ * Make sure THREAD_CP_OWNER_CPU is in memory before updating
+ * THREAD_CPENABLE
+ */
+ memw # (2)
+ s32i a4, a9, THREAD_CPENABLE
- /* Note that only a0 and a1 were preserved. */
+ beqz a8, 1f # skip 'save' if no previous owner
-2: rsr a3, exccause
- addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
- movi a0, coprocessor_owner
- addx4 a0, a3, a0
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
- /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+ l32i a10, a8, THREAD_CPENABLE
+ xor a10, a10, a2
-1: GET_THREAD_INFO (a4, a1)
- s32i a4, a0, 0
+ /* Get context save area and call save routine. */
- /* Get context save area and 'call' load routine. */
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_SAVE
+ add a2, a2, a8
+ callx0 a3
- movi a5, .Lload_cp_regs_jump_table
- movi a0, 1f
- addx8 a3, a3, a5
- l32i a2, a3, 4 # a2: xtregs offset
- l32i a3, a3, 0 # a3: jump address
- add a2, a2, a4
- jx a3
+ /*
+ * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory
+ * before updating THREAD_CPENABLE
+ */
+ memw # (3)
+ s32i a10, a8, THREAD_CPENABLE
+1:
+ /* Get context save area and call load routine. */
+
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_LOAD
+ add a2, a2, a9
+ callx0 a3
/* Restore all registers and return from exception handler. */
-1: l32i a6, a1, PT_AREG6
+ l32i a10, a1, PT_AREG10
+ l32i a9, a1, PT_AREG9
+ l32i a8, a1, PT_AREG8
+ l32i a7, a1, PT_AREG7
+ l32i a6, a1, PT_AREG6
l32i a5, a1, PT_AREG5
l32i a4, a1, PT_AREG4
@@ -230,29 +282,21 @@ ENDPROC(fast_coprocessor)
ENTRY(coprocessor_flush)
- /* reserve 4 bytes on stack to save a0 */
- abi_entry(4)
-
- s32i a0, a1, 0
- movi a0, .Lsave_cp_regs_jump_table
- addx8 a3, a3, a0
- l32i a4, a3, 4
- l32i a3, a3, 0
- add a2, a2, a4
- beqz a3, 1f
- callx0 a3
-1: l32i a0, a1, 0
-
- abi_ret(4)
+ abi_entry_default
+
+ movi a4, .Lcp_regs_jump_table
+ addx8 a4, a3, a4
+ addx4 a3, a3, a4
+ l32i a4, a3, CP_REGS_TAB_SAVE
+ beqz a4, 1f
+ l32i a3, a3, CP_REGS_TAB_OFFSET
+ add a2, a2, a3
+ mov a7, a0
+ callx0 a4
+ mov a0, a7
+1:
+ abi_ret_default
ENDPROC(coprocessor_flush)
- .data
-
-ENTRY(coprocessor_owner)
-
- .fill XCHAL_CP_MAX, 4, 0
-
-END(coprocessor_owner)
-
#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 99ab3c1a3387..272fff587907 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -28,15 +28,6 @@
#include <asm/tlbflush.h>
#include <variant/tie-asm.h>
-/* Unimplemented features. */
-
-#undef KERNEL_STACK_OVERFLOW_CHECK
-
-/* Not well tested.
- *
- * - fast_coprocessor
- */
-
/*
* Macro to find first bit set in WINDOWBASE from the left + 1
*
@@ -178,28 +169,26 @@ _user_exception:
/* Save only live registers. */
-UABI_W _bbsi.l a2, 1, 1f
+UABI_W _bbsi.l a2, 1, .Lsave_window_registers
s32i a4, a1, PT_AREG4
s32i a5, a1, PT_AREG5
s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7
-UABI_W _bbsi.l a2, 2, 1f
+UABI_W _bbsi.l a2, 2, .Lsave_window_registers
s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10
s32i a11, a1, PT_AREG11
-UABI_W _bbsi.l a2, 3, 1f
+UABI_W _bbsi.l a2, 3, .Lsave_window_registers
s32i a12, a1, PT_AREG12
s32i a13, a1, PT_AREG13
s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15
#if defined(USER_SUPPORT_WINDOWED)
- _bnei a2, 1, 1f # only one valid frame?
-
- /* Only one valid frame, skip saving regs. */
+ /* If only one valid frame skip saving regs. */
- j 2f
+ beqi a2, 1, common_exception
/* Save the remaining registers.
* We have to save all registers up to the first '1' from
@@ -208,8 +197,8 @@ UABI_W _bbsi.l a2, 3, 1f
* All register frames starting from the top field to the marked '1'
* must be saved.
*/
-
-1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
+.Lsave_window_registers:
+ addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
and a3, a3, a2 # max. only one bit is set
@@ -250,7 +239,7 @@ UABI_W _bbsi.l a2, 3, 1f
/* We are back to the original stack pointer (a1) */
#endif
-2: /* Now, jump to the common exception handler. */
+ /* Now, jump to the common exception handler. */
j common_exception
@@ -341,8 +330,8 @@ KABI_W _bbsi.l a2, 3, 1f
/* Copy spill slots of a0 and a1 to imitate movsp
* in order to keep exception stack continuous
*/
- l32i a3, a1, PT_SIZE
- l32i a0, a1, PT_SIZE + 4
+ l32i a3, a1, PT_KERNEL_SIZE
+ l32i a0, a1, PT_KERNEL_SIZE + 4
s32e a3, a1, -16
s32e a0, a1, -12
#endif
@@ -350,15 +339,6 @@ KABI_W _bbsi.l a2, 3, 1f
l32i a0, a1, PT_AREG0 # restore saved a0
wsr a0, depc
-#ifdef KERNEL_STACK_OVERFLOW_CHECK
-
- /* Stack overflow check, for debugging */
- extui a2, a1, TASK_SIZE_BITS,XX
- movi a3, SIZE??
- _bge a2, a3, out_of_stack_panic
-
-#endif
-
/*
* This is the common exception handler.
* We get here from the user exception handler or simply by falling through
@@ -442,7 +422,6 @@ KABI_W or a3, a3, a0
moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
KABI_W movi a2, PS_WOE_MASK
KABI_W or a3, a3, a2
- rsr a2, exccause
#endif
/* restore return address (or 0 if return to userspace) */
@@ -469,41 +448,56 @@ KABI_W or a3, a3, a2
save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+#ifdef CONFIG_TRACE_IRQFLAGS
+ rsr abi_tmp0, ps
+ extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beqz abi_tmp0, 1f
+ abi_call trace_hardirqs_off
+1:
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ l32i abi_tmp0, a1, PT_PS
+ bbci.l abi_tmp0, PS_UM_BIT, 1f
+ abi_call user_exit_callable
+1:
+#endif
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
- rsr a4, excsave1
- addx4 a4, a2, a4
- l32i a4, a4, EXC_TABLE_DEFAULT # load handler
- mov abi_arg1, a2 # pass EXCCAUSE
- mov abi_arg0, a1 # pass stack frame
+ l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE
+ rsr abi_tmp0, excsave1
+ addx4 abi_tmp0, abi_arg1, abi_tmp0
+ l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler
+ mov abi_arg0, a1 # pass stack frame
/* Call the second-level handler */
- abi_callx a4
+ abi_callx abi_tmp0
/* Jump here for exception exit */
.global common_exception_return
common_exception_return:
#if XTENSA_FAKE_NMI
- l32i a2, a1, PT_EXCCAUSE
- movi a3, EXCCAUSE_MAPPED_NMI
- beq a2, a3, .LNMIexit
+ l32i abi_tmp0, a1, PT_EXCCAUSE
+ movi abi_tmp1, EXCCAUSE_MAPPED_NMI
+ l32i abi_saved1, a1, PT_PS
+ beq abi_tmp0, abi_tmp1, .Lrestore_state
#endif
-1:
- irq_save a2, a3
+.Ltif_loop:
+ irq_save abi_tmp0, abi_tmp1
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_off
#endif
/* Jump if we are returning from kernel exceptions. */
- l32i abi_saved1, a1, PT_PS
- GET_THREAD_INFO(a2, a1)
- l32i a4, a2, TI_FLAGS
- _bbci.l abi_saved1, PS_UM_BIT, 6f
+ l32i abi_saved1, a1, PT_PS
+ GET_THREAD_INFO(abi_tmp0, a1)
+ l32i abi_saved0, abi_tmp0, TI_FLAGS
+ _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
@@ -512,82 +506,80 @@ common_exception_return:
* Note that we don't disable interrupts here.
*/
- _bbsi.l a4, TIF_NEED_RESCHED, 3f
- movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
- bnone a4, a2, 5f
+ _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched
+ movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
+ bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user
-2: l32i a4, a1, PT_DEPC
- bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
/* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_on
#endif
- rsil a2, 0
- mov abi_arg0, a1
+ rsil abi_tmp0, 0
+ mov abi_arg0, a1
abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
- j 1b
-
-3: /* Reschedule */
+ j .Ltif_loop
+.Lresched:
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_on
#endif
- rsil a2, 0
+ rsil abi_tmp0, 0
abi_call schedule # void schedule (void)
- j 1b
+ j .Ltif_loop
+.Lexit_tif_loop_kernel:
#ifdef CONFIG_PREEMPTION
-6:
- _bbci.l a4, TIF_NEED_RESCHED, 4f
+ _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state
/* Check current_thread_info->preempt_count */
- l32i a4, a2, TI_PRE_COUNT
- bnez a4, 4f
+ l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT
+ bnez abi_tmp1, .Lrestore_state
abi_call preempt_schedule_irq
- j 4f
#endif
+ j .Lrestore_state
-#if XTENSA_FAKE_NMI
-.LNMIexit:
- l32i abi_saved1, a1, PT_PS
- _bbci.l abi_saved1, PS_UM_BIT, 4f
+.Lexit_tif_loop_user:
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ abi_call user_enter_callable
#endif
-
-5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- _bbci.l a4, TIF_DB_DISABLED, 7f
+ _bbci.l abi_saved0, TIF_DB_DISABLED, 1f
abi_call restore_dbreak
-7:
+1:
#endif
#ifdef CONFIG_DEBUG_TLB_SANITY
- l32i a4, a1, PT_DEPC
- bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
abi_call check_tlb_sanity
#endif
-6:
-4:
+
+.Lrestore_state:
#ifdef CONFIG_TRACE_IRQFLAGS
- extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- bgei a4, LOCKLEVEL, 1f
+ extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei abi_tmp0, LOCKLEVEL, 1f
abi_call trace_hardirqs_on
1:
#endif
- /* Restore optional registers. */
+ /*
+ * Restore optional registers.
+ * abi_arg* are used as temporary registers here.
+ */
- load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+ load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT
/* Restore SCOMPARE1 */
#if XCHAL_HAVE_S32C1I
- l32i a2, a1, PT_SCOMPARE1
- wsr a2, scompare1
+ l32i abi_tmp0, a1, PT_SCOMPARE1
+ wsr abi_tmp0, scompare1
#endif
- wsr abi_saved1, ps /* disable interrupts */
-
- _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
+ wsr abi_saved1, ps /* disable interrupts */
+ _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
user_exception_exit:
@@ -606,7 +598,7 @@ user_exception_exit:
rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7
- _bltui a6, 16, 1f # only have to restore current window?
+ _bltui a6, 16, .Lclear_regs # only have to restore current window?
/* The working registers are a0 and a3. We are restoring to
* a4..a7. Be careful not to destroy what we have just restored.
@@ -618,18 +610,19 @@ user_exception_exit:
mov a2, a6
mov a3, a5
-2: rotw -1 # a0..a3 become a4..a7
+1: rotw -1 # a0..a3 become a4..a7
addi a3, a7, -4*4 # next iteration
addi a2, a6, -16 # decrementing Y in WMASK
l32i a4, a3, PT_AREG_END + 0
l32i a5, a3, PT_AREG_END + 4
l32i a6, a3, PT_AREG_END + 8
l32i a7, a3, PT_AREG_END + 12
- _bgeui a2, 16, 2b
+ _bgeui a2, 16, 1b
/* Clear unrestored registers (don't leak anything to user-land */
-1: rsr a0, windowbase
+.Lclear_regs:
+ rsr a0, windowbase
rsr a3, sar
sub a3, a0, a3
beqz a3, 2f
@@ -706,12 +699,12 @@ kernel_exception_exit:
addi a0, a1, -16
l32i a3, a0, 0
l32i a4, a0, 4
- s32i a3, a1, PT_SIZE+0
- s32i a4, a1, PT_SIZE+4
+ s32i a3, a1, PT_KERNEL_SIZE + 0
+ s32i a4, a1, PT_KERNEL_SIZE + 4
l32i a3, a0, 8
l32i a4, a0, 12
- s32i a3, a1, PT_SIZE+8
- s32i a4, a1, PT_SIZE+12
+ s32i a3, a1, PT_KERNEL_SIZE + 8
+ s32i a4, a1, PT_KERNEL_SIZE + 12
/* Common exception exit.
* We restore the special register and the current window frame, and
@@ -800,7 +793,7 @@ ENDPROC(kernel_exception)
ENTRY(debug_exception)
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
- bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
+ bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode
/* Set EPC1 and EXCCAUSE */
@@ -819,10 +812,10 @@ ENTRY(debug_exception)
/* Switch to kernel/user stack, restore jump vector, and save a0 */
- bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
+ bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode
+ addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack
- addi a2, a1, -16-PT_SIZE # assume kernel stack
-3:
+.Ldebug_exception_continue:
l32i a0, a3, DT_DEBUG_SAVE
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG0
@@ -850,10 +843,12 @@ ENTRY(debug_exception)
bbsi.l a2, PS_UM_BIT, _user_exception
j _kernel_exception
-2: rsr a2, excsave1
+.Ldebug_exception_user:
+ rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
- j 3b
+ j .Ldebug_exception_continue
+.Ldebug_exception_in_exception:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* Debug exception while in exception mode. This may happen when
* window overflow/underflow handler or fast exception handler hits
@@ -861,8 +856,8 @@ ENTRY(debug_exception)
* breakpoints, single-step faulting instruction and restore data
* breakpoints.
*/
-1:
- bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode
+
+ bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode
rsr a0, debugcause
bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
@@ -906,7 +901,7 @@ ENTRY(debug_exception)
rfi XCHAL_DEBUGLEVEL
#else
/* Debug exception while in exception mode. Should not happen. */
-1: j 1b // FIXME!!
+ j .Ldebug_exception_in_exception // FIXME!!
#endif
ENDPROC(debug_exception)
@@ -1061,6 +1056,11 @@ ENTRY(fast_illegal_instruction_user)
movi a3, PS_WOE_MASK
or a0, a0, a3
wsr a0, ps
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ GET_THREAD_INFO(a3, a2)
+ rsr a0, epc1
+ s32i a0, a3, TI_PS_WOE_FIX_ADDR
+#endif
l32i a3, a2, PT_AREG3
l32i a0, a2, PT_AREG0
rsr a2, depc
@@ -1433,7 +1433,7 @@ ENTRY(fast_syscall_spill_registers)
rsync
movi abi_arg0, SIGSEGV
- abi_call do_exit
+ abi_call make_task_dead
/* shouldn't return, so panic */
@@ -1635,12 +1635,13 @@ ENTRY(fast_second_level_miss)
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, 9f
+ beqz a0, .Lfast_second_level_miss_no_mm
-8: rsr a3, excvaddr # fault address
+.Lfast_second_level_miss_continue:
+ rsr a3, excvaddr # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
- beqz a0, 2f
+ beqz a0, .Lfast_second_level_miss_no_pmd
/* Read ptevaddr and convert to top of page-table page.
*
@@ -1683,12 +1684,13 @@ ENTRY(fast_second_level_miss)
addi a3, a3, DTLB_WAY_PGD
add a1, a1, a3 # ... + way_number
-3: wdtlb a0, a1
+.Lfast_second_level_miss_wdtlb:
+ wdtlb a0, a1
dsync
/* Exit critical section. */
-
-4: rsr a3, excsave1
+.Lfast_second_level_miss_skip_wdtlb:
+ rsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
@@ -1712,19 +1714,21 @@ ENTRY(fast_second_level_miss)
esync
rfde
-9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- bnez a0, 8b
+.Lfast_second_level_miss_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ bnez a0, .Lfast_second_level_miss_continue
/* Even more unlikely case active_mm == 0.
* We can get here with NMI in the middle of context_switch that
* touches vmalloc area.
*/
movi a0, init_mm
- j 8b
+ j .Lfast_second_level_miss_continue
+.Lfast_second_level_miss_no_pmd:
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
-2: /* Special case for cache aliasing.
+ /* Special case for cache aliasing.
* We (should) only get here if a clear_user_page, copy_user_page
* or the aliased cache flush functions got preemptively interrupted
* by another task. Re-establish temporary mapping to the
@@ -1734,24 +1738,24 @@ ENTRY(fast_second_level_miss)
/* We shouldn't be in a double exception */
l32i a0, a2, PT_DEPC
- bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
rsr a3, epc1
- bltu a3, a0, 2f
+ bltu a3, a0, .Lfast_second_level_miss_slow
movi a0, __tlbtemp_mapping_end
- bgeu a3, a0, 2f
+ bgeu a3, a0, .Lfast_second_level_miss_slow
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
rsr a0, excvaddr
- bltu a0, a3, 2f
+ bltu a0, a3, .Lfast_second_level_miss_slow
addi a1, a0, -TLBTEMP_SIZE
- bgeu a1, a3, 2f
+ bgeu a1, a3, .Lfast_second_level_miss_slow
/* Check if we have to restore an ITLB mapping. */
@@ -1777,19 +1781,19 @@ ENTRY(fast_second_level_miss)
mov a0, a6
movnez a0, a7, a3
- j 3b
+ j .Lfast_second_level_miss_wdtlb
/* ITLB entry. We only use dst in a6. */
1: witlb a6, a1
isync
- j 4b
+ j .Lfast_second_level_miss_skip_wdtlb
#endif // DCACHE_WAY_SIZE > PAGE_SIZE
-
-2: /* Invalid PGD, default exception handling */
+ /* Invalid PGD, default exception handling */
+.Lfast_second_level_miss_slow:
rsr a1, depc
s32i a1, a2, PT_AREG2
@@ -1829,12 +1833,13 @@ ENTRY(fast_store_prohibited)
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
- beqz a0, 9f
+ beqz a0, .Lfast_store_no_mm
-8: rsr a1, excvaddr # fault address
+.Lfast_store_continue:
+ rsr a1, excvaddr # fault address
_PGD_OFFSET(a0, a1, a3)
l32i a0, a0, 0
- beqz a0, 2f
+ beqz a0, .Lfast_store_slow
/*
* Note that we test _PAGE_WRITABLE_BIT only if PTE is present
@@ -1844,8 +1849,8 @@ ENTRY(fast_store_prohibited)
_PTE_OFFSET(a0, a1, a3)
l32i a3, a0, 0 # read pteval
movi a1, _PAGE_CA_INVALID
- ball a3, a1, 2f
- bbci.l a3, _PAGE_WRITABLE_BIT, 2f
+ ball a3, a1, .Lfast_store_slow
+ bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a3, a3, a1
@@ -1873,7 +1878,6 @@ ENTRY(fast_store_prohibited)
l32i a2, a2, PT_DEPC
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-
rsr a2, depc
rfe
@@ -1883,11 +1887,17 @@ ENTRY(fast_store_prohibited)
esync
rfde
-9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
- j 8b
-
-2: /* If there was a problem, handle fault in C */
+.Lfast_store_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j .Lfast_store_continue
+ /* If there was a problem, handle fault in C */
+.Lfast_store_slow:
+ rsr a1, excvaddr
+ pdtlb a0, a1
+ bbci.l a0, DTLB_HIT_BIT, 1f
+ idtlb a0
+1:
rsr a3, depc # still holds a2
s32i a3, a2, PT_AREG2
mov a1, a2
@@ -2076,8 +2086,16 @@ ENTRY(_switch_to)
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE
- xsr a3, cpenable
- s32i a3, a4, THREAD_CPENABLE
+#ifdef CONFIG_SMP
+ beqz a3, 1f
+ memw # pairs with memw (2) in fast_coprocessor
+ l32i a6, a5, THREAD_CP_OWNER_CPU
+ l32i a7, a5, THREAD_CPU
+ beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner
+ movi a3, 0
+1:
+#endif
+ wsr a3, cpenable
#endif
#if XCHAL_HAVE_EXCLUSIVE
@@ -2152,3 +2170,95 @@ ENTRY(ret_from_kernel_thread)
j common_exception_return
ENDPROC(ret_from_kernel_thread)
+
+#ifdef CONFIG_HIBERNATION
+
+ .section .bss, "aw"
+ .align 4
+.Lsaved_regs:
+#if defined(__XTENSA_WINDOWED_ABI__)
+ .fill 2, 4
+#elif defined(__XTENSA_CALL0_ABI__)
+ .fill 6, 4
+#else
+#error Unsupported Xtensa ABI
+#endif
+ .align XCHAL_NCP_SA_ALIGN
+.Lsaved_user_regs:
+ .fill XTREGS_USER_SIZE, 1
+
+ .previous
+
+ENTRY(swsusp_arch_suspend)
+
+ abi_entry_default
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ s32i a0, a2, 0
+ s32i a1, a2, 4
+ save_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#elif defined(__XTENSA_CALL0_ABI__)
+ s32i a12, a2, 8
+ s32i a13, a2, 12
+ s32i a14, a2, 16
+ s32i a15, a2, 20
+#else
+#error Unsupported Xtensa ABI
+#endif
+ abi_call swsusp_save
+ mov a2, abi_rv
+ abi_ret_default
+
+ENDPROC(swsusp_arch_suspend)
+
+ENTRY(swsusp_arch_resume)
+
+ abi_entry_default
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#endif
+
+ movi a2, restore_pblist
+ l32i a2, a2, 0
+
+.Lcopy_pbe:
+ l32i a3, a2, PBE_ADDRESS
+ l32i a4, a2, PBE_ORIG_ADDRESS
+
+ __loopi a3, a9, PAGE_SIZE, 16
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+ l32i a7, a3, 8
+ l32i a8, a3, 12
+ addi a3, a3, 16
+ s32i a5, a4, 0
+ s32i a6, a4, 4
+ s32i a7, a4, 8
+ s32i a8, a4, 12
+ addi a4, a4, 16
+ __endl a3, a9
+
+ l32i a2, a2, PBE_NEXT
+ bnez a2, .Lcopy_pbe
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ l32i a0, a2, 0
+ l32i a1, a2, 4
+ load_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_CALL0_ABI__)
+ l32i a12, a2, 8
+ l32i a13, a2, 12
+ l32i a14, a2, 16
+ l32i a15, a2, 20
+#endif
+ movi a2, 0
+ abi_ret_default
+
+ENDPROC(swsusp_arch_resume)
+
+#endif
diff --git a/arch/xtensa/kernel/hibernate.c b/arch/xtensa/kernel/hibernate.c
new file mode 100644
index 000000000000..06984327d6e2
--- /dev/null
+++ b/arch/xtensa/kernel/hibernate.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <asm/coprocessor.h>
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+#if XTENSA_HAVE_COPROCESSORS
+ local_coprocessors_flush_release_all();
+#endif
+}
+
+void notrace restore_processor_state(void)
+{
+}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 15051a8a1539..42f106004400 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -36,9 +36,8 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
- unsigned long sp;
+ unsigned long sp = current_stack_pointer;
- __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
sp &= THREAD_SIZE - 1;
if (unlikely(sp < (sizeof(thread_info) + 1024)))
@@ -170,7 +169,7 @@ void migrate_irqs(void)
for_each_active_irq(i) {
struct irq_data *data = irq_get_irq_data(i);
- struct cpumask *mask;
+ const struct cpumask *mask;
unsigned int newcpu;
if (irqd_is_per_cpu(data))
@@ -186,9 +185,10 @@ void migrate_irqs(void)
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, cpu);
- cpumask_setall(mask);
+ irq_set_affinity(i, cpu_all_mask);
+ } else {
+ irq_set_affinity(i, mask);
}
- irq_set_affinity(i, mask);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
index 61cf6497a646..ad1841cecdfb 100644
--- a/arch/xtensa/kernel/jump_label.c
+++ b/arch/xtensa/kernel/jump_label.c
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {
@@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz)
.data = data,
};
stop_machine_cpuslocked(patch_text_stop_machine,
- &patch, NULL);
+ &patch, cpu_online_mask);
} else {
unsigned long flags;
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
index 9f3843742726..b702c0908b1f 100644
--- a/arch/xtensa/kernel/mxhead.S
+++ b/arch/xtensa/kernel/mxhead.S
@@ -37,11 +37,13 @@ _SetupOCD:
* xt-gdb to single step via DEBUG exceptions received directly
* by ocd.
*/
+#if XCHAL_HAVE_WINDOWED
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
+#endif
movi a1, LOCKLEVEL
wsr a1, ps
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index bd80df890b1e..68e0e2f06d66 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -47,6 +47,7 @@
#include <asm/asm-offsets.h>
#include <asm/regs.h>
#include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
@@ -63,52 +64,114 @@ EXPORT_SYMBOL(__stack_chk_guard);
#if XTENSA_HAVE_COPROCESSORS
-void coprocessor_release_all(struct thread_info *ti)
+void local_coprocessors_flush_release_all(void)
{
- unsigned long cpenable;
- int i;
+ struct thread_info **coprocessor_owner;
+ struct thread_info *unique_owner[XCHAL_CP_MAX];
+ int n = 0;
+ int i, j;
- /* Make sure we don't switch tasks during this operation. */
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ xtensa_set_sr(XCHAL_CP_MASK, cpenable);
- preempt_disable();
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ struct thread_info *ti = coprocessor_owner[i];
- /* Walk through all cp owners and release it for the requested one. */
+ if (ti) {
+ coprocessor_flush(ti, i);
- cpenable = ti->cpenable;
+ for (j = 0; j < n; j++)
+ if (unique_owner[j] == ti)
+ break;
+ if (j == n)
+ unique_owner[n++] = ti;
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (coprocessor_owner[i] == ti) {
- coprocessor_owner[i] = 0;
- cpenable &= ~(1 << i);
+ coprocessor_owner[i] = NULL;
}
}
+ for (i = 0; i < n; i++) {
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ unique_owner[i]->cpenable = 0;
+ }
+ xtensa_set_sr(0, cpenable);
+}
- ti->cpenable = cpenable;
+static void local_coprocessor_release_all(void *info)
+{
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ int i;
+
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti)
+ coprocessor_owner[i] = NULL;
+ }
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ ti->cpenable = 0;
if (ti == current_thread_info())
xtensa_set_sr(0, cpenable);
+}
- preempt_enable();
+void coprocessor_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_release_all,
+ ti, true);
+ }
}
-void coprocessor_flush_all(struct thread_info *ti)
+static void local_coprocessor_flush_all(void *info)
{
- unsigned long cpenable, old_cpenable;
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ unsigned long old_cpenable;
int i;
- preempt_disable();
-
- old_cpenable = xtensa_get_sr(cpenable);
- cpenable = ti->cpenable;
- xtensa_set_sr(cpenable, cpenable);
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) {
- if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ if (coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
- cpenable >>= 1;
}
xtensa_set_sr(old_cpenable, cpenable);
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_all,
+ ti, true);
+ }
+}
- preempt_enable();
+static void local_coprocessor_flush_release_all(void *info)
+{
+ local_coprocessor_flush_all(info);
+ local_coprocessor_release_all(info);
+}
+
+void coprocessor_flush_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_release_all,
+ ti, true);
+ }
}
#endif
@@ -140,8 +203,7 @@ void flush_thread(void)
{
#if XTENSA_HAVE_COPROCESSORS
struct thread_info *ti = current_thread_info();
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
#endif
flush_ptrace_hw_breakpoint(current);
}
@@ -201,10 +263,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
- unsigned long thread_fn_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp_thread_fn = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
@@ -224,7 +287,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
#error Unsupported Xtensa ABI
#endif
- if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (!args->fn) {
struct pt_regs *regs = current_pt_regs();
unsigned long usp = usp_thread_fn ?
usp_thread_fn : regs->areg[1];
@@ -232,10 +295,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_fork, 0x1);
- /* This does not copy all the regs.
- * In a bout of brilliance or madness,
- * ARs beyond a0-a15 exist past the end of the struct.
- */
*childregs = *regs;
childregs->areg[1] = usp;
childregs->areg[2] = 0;
@@ -265,14 +324,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
childregs->wmask = 1;
childregs->windowstart = 1;
childregs->windowbase = 0;
- } else {
- int len = childregs->wmask & ~0xf;
- memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
- &regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
- childregs->syscall = regs->syscall;
-
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = tls;
} else {
@@ -286,15 +339,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
* Window underflow will load registers from the
* spill slots on the stack on return from _switch_to.
*/
- SPILL_SLOT(childregs, 2) = usp_thread_fn;
- SPILL_SLOT(childregs, 3) = thread_fn_arg;
+ SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
+ SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
#elif defined(__XTENSA_CALL0_ABI__)
/*
* a12 = thread_fn, a13 = thread_fn arg.
* _switch_to epilogue will load registers from the stack.
*/
- ((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
- ((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
+ ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
+ ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
#else
#error Unsupported Xtensa ABI
#endif
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index bb3f4797d212..f29477162ede 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -26,7 +26,6 @@
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/smp.h>
-#include <linux/tracehook.h>
#include <linux/uaccess.h>
#define CREATE_TRACE_POINTS
@@ -172,8 +171,7 @@ static int tie_set(struct task_struct *target,
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessors before we overwrite them. */
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
ti->xtregs_cp.cp0 = newregs->cp0;
ti->xtregs_cp.cp1 = newregs->cp1;
ti->xtregs_cp.cp2 = newregs->cp2;
@@ -226,12 +224,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
void user_enable_single_step(struct task_struct *child)
{
- child->ptrace |= PT_SINGLESTEP;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
- child->ptrace &= ~PT_SINGLESTEP;
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
@@ -550,7 +548,7 @@ int do_syscall_trace_enter(struct pt_regs *regs)
regs->areg[2] = -ENOSYS;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
+ ptrace_report_syscall_entry(regs)) {
regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL;
return 0;
@@ -583,5 +581,5 @@ void do_syscall_trace_leave(struct pt_regs *regs)
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
+ ptrace_report_syscall_exit(regs, step);
}
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
index 07e56e3a9a8b..8362388c8719 100644
--- a/arch/xtensa/kernel/s32c1i_selftest.c
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -40,14 +40,13 @@ static inline int probed_compare_swap(int *v, int cmp, int set)
/* Handle probed exception */
-static void __init do_probed_exception(struct pt_regs *regs,
- unsigned long exccause)
+static void __init do_probed_exception(struct pt_regs *regs)
{
if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
regs->pc += 3; /* skip the s32c1i instruction */
- rcw_exc = exccause;
+ rcw_exc = regs->exccause;
} else {
- do_unhandled(regs, exccause);
+ do_unhandled(regs);
}
}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 8db20cfb44ab..9191738f9941 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -140,7 +140,7 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
- strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+ strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
return 0;
}
@@ -230,7 +230,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(xtensa_dt_io_area, NULL);
if (!command_line[0])
- strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
#endif /* CONFIG_USE_OF */
@@ -260,7 +260,7 @@ void __init init_arch(bp_tag_t *bp_start)
#ifdef CONFIG_CMDLINE_BOOL
if (!command_line[0])
- strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+ strscpy(command_line, default_command_line, COMMAND_LINE_SIZE);
#endif
/* Early hook for platforms */
@@ -289,7 +289,7 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = command_line;
platform_setup(cmdline_p);
- strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
/* Reserve some memory regions */
@@ -349,7 +349,7 @@ void __init setup_arch(char **cmdline_p)
#endif /* CONFIG_VECTORS_ADDR */
-#ifdef CONFIG_SMP
+#ifdef CONFIG_SECONDARY_RESET_VECTOR
mem_reserve(__pa(_SecondaryResetVector_text_start),
__pa(_SecondaryResetVector_text_end));
#endif
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index f6c949895b3e..876d5df157ed 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -19,7 +19,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
#include <linux/sched/task_stack.h>
#include <asm/ucontext.h>
@@ -162,8 +162,7 @@ setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
return err;
#if XTENSA_HAVE_COPROCESSORS
- coprocessor_flush_all(ti);
- coprocessor_release_all(ti);
+ coprocessor_flush_release_all(ti);
err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
sizeof (frame->xtregs.cp));
#endif
@@ -473,7 +472,7 @@ static void do_signal(struct pt_regs *regs)
/* Set up the stack frame */
ret = setup_frame(&ksig, sigmask_to_save(), regs);
signal_setup_done(ret, &ksig, 0);
- if (current->ptrace & PT_SINGLESTEP)
+ if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
@@ -499,7 +498,7 @@ static void do_signal(struct pt_regs *regs)
/* If there's no signal to deliver, we just restore the saved mask. */
restore_saved_sigmask();
- if (current->ptrace & PT_SINGLESTEP)
+ if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
}
@@ -511,5 +510,5 @@ void do_notify_resume(struct pt_regs *regs)
do_signal(regs);
if (test_thread_flag(TIF_NOTIFY_RESUME))
- tracehook_notify_resume(regs);
+ resume_user_mode_work(regs);
}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 1254da07ead1..4dc109dd6214 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/thread_info.h>
#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/mxregs.h>
@@ -272,6 +273,12 @@ int __cpu_disable(void)
*/
set_cpu_online(cpu, false);
+#if XTENSA_HAVE_COPROCESSORS
+ /*
+ * Flush coprocessor contexts that are active on the current CPU.
+ */
+ local_coprocessors_flush_release_all();
+#endif
/*
* OK - migrate IRQs away from this CPU
*/
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 201356faa7e6..b3c2450d6f23 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -58,6 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
+ struct vma_iterator vmi;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -79,15 +80,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else
addr = PAGE_ALIGN(addr);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vmm || addr + len <= vm_start_gap(vmm))
- return addr;
+ vma_iter_init(&vmi, current->mm, addr);
+ for_each_vma(vmi, vmm) {
+ /* At this point: (addr < vmm->vm_end). */
+ if (addr + len <= vm_start_gap(vmm))
+ break;
+
addr = vmm->vm_end;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
}
+
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+
+ return addr;
}
#endif
diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile
index 6713c65a25e1..b265e4bc16c2 100644
--- a/arch/xtensa/kernel/syscalls/Makefile
+++ b/arch/xtensa/kernel/syscalls/Makefile
@@ -2,8 +2,7 @@
kapi := arch/$(SRCARCH)/include/generated/asm
uapi := arch/$(SRCARCH)/include/generated/uapi/asm
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
- $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
syscall := $(src)/syscall.tbl
syshdr := $(srctree)/scripts/syscallhdr.sh
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 3e3e1a506bed..52c94ab5c205 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -420,3 +420,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index e8ceb1528608..16b8a6273772 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void)
cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
if (cpu) {
clk = of_clk_get(cpu, 0);
+ of_node_put(cpu);
if (!IS_ERR(clk)) {
ccount_freq = clk_get_rate(clk);
return;
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 4b4dbeb2d612..0c25e035ff10 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -48,25 +48,20 @@
* Machine specific interrupt handlers
*/
-extern void kernel_exception(void);
-extern void user_exception(void);
-
-extern void fast_illegal_instruction_user(void);
-extern void fast_syscall_user(void);
-extern void fast_alloca(void);
-extern void fast_unaligned(void);
-extern void fast_second_level_miss(void);
-extern void fast_store_prohibited(void);
-extern void fast_coprocessor(void);
-
-extern void do_illegal_instruction (struct pt_regs*);
-extern void do_interrupt (struct pt_regs*);
-extern void do_nmi(struct pt_regs *);
-extern void do_unaligned_user (struct pt_regs*);
-extern void do_multihit (struct pt_regs*, unsigned long);
-extern void do_page_fault (struct pt_regs*, unsigned long);
-extern void do_debug (struct pt_regs*);
-extern void system_call (struct pt_regs*);
+static void do_illegal_instruction(struct pt_regs *regs);
+static void do_div0(struct pt_regs *regs);
+static void do_interrupt(struct pt_regs *regs);
+#if XTENSA_FAKE_NMI
+static void do_nmi(struct pt_regs *regs);
+#endif
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+static void do_unaligned_user(struct pt_regs *regs);
+#endif
+static void do_multihit(struct pt_regs *regs);
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs);
+#endif
+static void do_debug(struct pt_regs *regs);
/*
* The vector table must be preceded by a save area (which
@@ -78,7 +73,8 @@ extern void system_call (struct pt_regs*);
#define USER 0x02
#define COPROCESSOR(x) \
-{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
typedef struct {
int cause;
@@ -100,7 +96,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
#ifdef SUPPORT_WINDOWED
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
#endif
-/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_XTENSA_UNALIGNED_USER
@@ -110,21 +106,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
#ifdef CONFIG_MMU
-{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
-{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_PFAULT
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
-{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
-/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
-{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
-{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
-{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
-/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
-{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
-#endif /* CONFIG_MMU */
+#endif
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0),
@@ -179,7 +175,7 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
* Unhandled Exceptions. Kill user task or panic if in kernel space.
*/
-void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+void do_unhandled(struct pt_regs *regs)
{
__die_if_kernel("Caught unhandled exception - should not happen",
regs, SIGKILL);
@@ -189,7 +185,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
"(pid = %d, pc = %#010lx) - should not happen\n"
"\tEXCCAUSE is %ld\n",
current->comm, task_pid_nr(current), regs->pc,
- exccause);
+ regs->exccause);
force_sig(SIGILL);
}
@@ -197,7 +193,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
* Multi-hit exception. This if fatal!
*/
-void do_multihit(struct pt_regs *regs, unsigned long exccause)
+static void do_multihit(struct pt_regs *regs)
{
die("Caught multihit exception", regs, SIGKILL);
}
@@ -206,8 +202,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
* IRQ handler.
*/
-extern void do_IRQ(int, struct pt_regs *);
-
#if XTENSA_FAKE_NMI
#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
@@ -240,14 +234,10 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
DEFINE_PER_CPU(unsigned long, nmi_count);
-void do_nmi(struct pt_regs *regs)
+static void do_nmi(struct pt_regs *regs)
{
- struct pt_regs *old_regs;
-
- if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
- trace_hardirqs_off();
+ struct pt_regs *old_regs = set_irq_regs(regs);
- old_regs = set_irq_regs(regs);
nmi_enter();
++*this_cpu_ptr(&nmi_count);
check_valid_nmi();
@@ -257,7 +247,7 @@ void do_nmi(struct pt_regs *regs)
}
#endif
-void do_interrupt(struct pt_regs *regs)
+static void do_interrupt(struct pt_regs *regs)
{
static const unsigned int_level_mask[] = {
0,
@@ -269,12 +259,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
- struct pt_regs *old_regs;
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned unhandled = ~0u;
- trace_hardirqs_off();
-
- old_regs = set_irq_regs(regs);
irq_enter();
for (;;) {
@@ -306,13 +293,47 @@ void do_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
+static bool check_div0(struct pt_regs *regs)
+{
+ static const u8 pattern[] = {'D', 'I', 'V', '0'};
+ const u8 *p;
+ u8 buf[5];
+
+ if (user_mode(regs)) {
+ if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
+ return false;
+ p = buf;
+ } else {
+ p = (const u8 *)regs->pc + 2;
+ }
+
+ return memcmp(p, pattern, sizeof(pattern)) == 0 ||
+ memcmp(p + 1, pattern, sizeof(pattern)) == 0;
+}
+
/*
* Illegal instruction. Fatal if in kernel space.
*/
-void
-do_illegal_instruction(struct pt_regs *regs)
+static void do_illegal_instruction(struct pt_regs *regs)
{
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /*
+ * When call0 application encounters an illegal instruction fast
+ * exception handler will attempt to set PS.WOE and retry failing
+ * instruction.
+ * If we get here we know that that instruction is also illegal
+ * with PS.WOE set, so it's not related to the windowed option
+ * hence PS.WOE may be cleared.
+ */
+ if (regs->pc == current_thread_info()->ps_woe_fix_addr)
+ regs->ps &= ~PS_WOE_MASK;
+#endif
+ if (check_div0(regs)) {
+ do_div0(regs);
+ return;
+ }
+
__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process. */
@@ -322,6 +343,11 @@ do_illegal_instruction(struct pt_regs *regs)
force_sig(SIGILL);
}
+static void do_div0(struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
+}
/*
* Handle unaligned memory accesses from user space. Kill task.
@@ -331,8 +357,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-void
-do_unaligned_user (struct pt_regs *regs)
+static void do_unaligned_user(struct pt_regs *regs)
{
__die_if_kernel("Unhandled unaligned exception in kernel",
regs, SIGKILL);
@@ -347,14 +372,20 @@ do_unaligned_user (struct pt_regs *regs)
}
#endif
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs)
+{
+ coprocessor_flush_release_all(current_thread_info());
+}
+#endif
+
/* Handle debug events.
* When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
* preemption disabled to avoid rescheduling and keep mapping of hardware
* breakpoint structures to debug registers intact, so that
* DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
*/
-void
-do_debug(struct pt_regs *regs)
+static void do_debug(struct pt_regs *regs)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret = check_hw_breakpoint(regs);
@@ -381,7 +412,8 @@ do_debug(struct pt_regs *regs)
/* Set exception C handler - for temporary use when probing exceptions */
-void * __init trap_set_handler(int cause, void *handler)
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler)
{
void *previous = per_cpu(exc_table, 0).default_handler[cause];
@@ -392,8 +424,7 @@ void * __init trap_set_handler(int cause, void *handler)
static void trap_init_excsave(void)
{
- unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
- __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+ xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
}
static void trap_init_debug(void)
@@ -552,5 +583,5 @@ void __noreturn die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(err);
+ make_task_dead(err);
}
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 407ece204e7c..1073fe4a584d 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -88,7 +88,7 @@ ENDPROC(_UserExceptionVector)
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
*
* We get this exception when we were already in kernel space.
- * We decrement the current stack pointer (kernel) by PT_SIZE and
+ * We decrement the current stack pointer (kernel) by PT_KERNEL_SIZE and
* jump to the first-level handler associated with the exception cause.
*
* Note: we need to preserve space for the spill region.
@@ -100,7 +100,7 @@ ENTRY(_KernelExceptionVector)
xsr a3, excsave1 # save a3, and get dispatch table
wsr a2, depc # save a2
- addi a2, a1, -16-PT_SIZE # adjust stack pointer
+ addi a2, a1, -16 - PT_KERNEL_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index eee270a039a4..965a3952c47b 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -207,7 +207,7 @@ SECTIONS
RELOCATE_ENTRY(_xip_data, .data);
RELOCATE_ENTRY(_xip_init_data, .init.data);
#endif
-#if defined(CONFIG_SMP)
+#if defined(CONFIG_SECONDARY_RESET_VECTOR)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
@@ -303,7 +303,7 @@ SECTIONS
#define LAST .DoubleExceptionVector.text
#endif
-#if defined(CONFIG_SMP)
+#if defined(CONFIG_SECONDARY_RESET_VECTOR)
SECTION_VECTOR4 (_SecondaryResetVector_text,
.SecondaryResetVector.text,
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index d79edbb98d2a..b0bc8897c924 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -59,32 +59,18 @@ extern long long __ashldi3(long long, int);
extern long long __lshrdi3(long long, int);
extern int __divsi3(int, int);
extern int __modsi3(int, int);
-extern long long __muldi3(long long, long long);
extern int __mulsi3(int, int);
extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
-extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
-extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
-extern int __ucmpdi2(int, int);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__umoddi3);
-EXPORT_SYMBOL(__ucmpdi2);
-
-void __xtensa_libgcc_window_spill(void)
-{
- BUG();
-}
-EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
index 9437ca51f18a..d4e9c397e3fd 100644
--- a/arch/xtensa/lib/Makefile
+++ b/arch/xtensa/lib/Makefile
@@ -4,5 +4,9 @@
#
lib-y += memcopy.o memset.o checksum.o \
+ ashldi3.o ashrdi3.o lshrdi3.o \
+ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
usercopy.o strncpy_user.o strnlen_user.o
lib-$(CONFIG_PCI) += pci-auto.o
+lib-$(CONFIG_KCSAN) += kcsan-stubs.o
+KCSAN_SANITIZE_kcsan-stubs.o := n
diff --git a/arch/xtensa/lib/ashldi3.S b/arch/xtensa/lib/ashldi3.S
new file mode 100644
index 000000000000..67fb0da9e432
--- /dev/null
+++ b/arch/xtensa/lib/ashldi3.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+#ifdef __XTENSA_EB__
+#define uh a2
+#define ul a3
+#else
+#define uh a3
+#define ul a2
+#endif /* __XTENSA_EB__ */
+
+ENTRY(__ashldi3)
+
+ abi_entry_default
+ ssl a4
+ bgei a4, 32, .Llow_only
+ src uh, uh, ul
+ sll ul, ul
+ abi_ret_default
+
+.Llow_only:
+ sll uh, ul
+ movi ul, 0
+ abi_ret_default
+
+ENDPROC(__ashldi3)
diff --git a/arch/xtensa/lib/ashrdi3.S b/arch/xtensa/lib/ashrdi3.S
new file mode 100644
index 000000000000..cbf052c512cc
--- /dev/null
+++ b/arch/xtensa/lib/ashrdi3.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+#ifdef __XTENSA_EB__
+#define uh a2
+#define ul a3
+#else
+#define uh a3
+#define ul a2
+#endif /* __XTENSA_EB__ */
+
+ENTRY(__ashrdi3)
+
+ abi_entry_default
+ ssr a4
+ bgei a4, 32, .Lhigh_only
+ src ul, uh, ul
+ sra uh, uh
+ abi_ret_default
+
+.Lhigh_only:
+ sra ul, uh
+ srai uh, uh, 31
+ abi_ret_default
+
+ENDPROC(__ashrdi3)
diff --git a/arch/xtensa/lib/divsi3.S b/arch/xtensa/lib/divsi3.S
new file mode 100644
index 000000000000..b044b4744a8b
--- /dev/null
+++ b/arch/xtensa/lib/divsi3.S
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__divsi3)
+
+ abi_entry_default
+#if XCHAL_HAVE_DIV32
+ quos a2, a2, a3
+#else
+ xor a7, a2, a3 /* sign = dividend ^ divisor */
+ do_abs a6, a2, a4 /* udividend = abs (dividend) */
+ do_abs a3, a3, a4 /* udivisor = abs (divisor) */
+ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
+ do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */
+ do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
+ ssl a4
+ sll a3, a3 /* udivisor <<= count */
+ movi a2, 0 /* quotient = 0 */
+
+ /* test-subtract-and-shift loop; one quotient bit on each iteration */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 /* increment if udividend >= udivisor */
+.Lreturn:
+ neg a5, a2
+ movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */
+ abi_ret_default
+
+.Lle_one:
+ beqz a3, .Lerror
+ neg a2, a6 /* if udivisor == 1, then return... */
+ movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */
+ abi_ret_default
+
+.Lspecial:
+ bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */
+ movi a2, 1
+ movi a4, -1
+ movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */
+ abi_ret_default
+
+.Lerror:
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ abi_ret_default
+
+ENDPROC(__divsi3)
diff --git a/arch/xtensa/lib/kcsan-stubs.c b/arch/xtensa/lib/kcsan-stubs.c
new file mode 100644
index 000000000000..2b08faa62b86
--- /dev/null
+++ b/arch/xtensa/lib/kcsan-stubs.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+void __atomic_store_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_load_8(const volatile void *p, int i)
+{
+ BUG();
+}
+
+u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
+
+u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
+{
+ BUG();
+}
diff --git a/arch/xtensa/lib/lshrdi3.S b/arch/xtensa/lib/lshrdi3.S
new file mode 100644
index 000000000000..129ef8d1725b
--- /dev/null
+++ b/arch/xtensa/lib/lshrdi3.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+#ifdef __XTENSA_EB__
+#define uh a2
+#define ul a3
+#else
+#define uh a3
+#define ul a2
+#endif /* __XTENSA_EB__ */
+
+ENTRY(__lshrdi3)
+
+ abi_entry_default
+ ssr a4
+ bgei a4, 32, .Lhigh_only
+ src ul, uh, ul
+ srl uh, uh
+ abi_ret_default
+
+.Lhigh_only:
+ srl ul, uh
+ movi uh, 0
+ abi_ret_default
+
+ENDPROC(__lshrdi3)
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index 582d817979ed..b20d206bcb71 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -402,13 +402,13 @@ WEAK(memmove)
*/
# copy 16 bytes per iteration for word-aligned dst and word-aligned src
#if XCHAL_HAVE_LOOPS
- loopnez a7, .backLoop1done
+ loopnez a7, .LbackLoop1done
#else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .backLoop1done
+ beqz a7, .LbackLoop1done
slli a8, a7, 4
sub a8, a3, a8 # a8 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop1:
+.LbackLoop1:
addi a3, a3, -16
l32i a7, a3, 12
l32i a6, a3, 8
@@ -420,9 +420,9 @@ WEAK(memmove)
s32i a7, a5, 4
s32i a6, a5, 0
#if !XCHAL_HAVE_LOOPS
- bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start
+ bne a3, a8, .LbackLoop1 # continue loop if a3:src != a8:src_start
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop1done:
+.LbackLoop1done:
bbci.l a4, 3, .Lback2
# copy 8 bytes
addi a3, a3, -8
@@ -479,13 +479,13 @@ WEAK(memmove)
#endif
l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS
- loopnez a7, .backLoop2done
+ loopnez a7, .LbackLoop2done
#else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .backLoop2done
+ beqz a7, .LbackLoop2done
slli a10, a7, 4
sub a10, a3, a10 # a10 = start of first 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop2:
+.LbackLoop2:
addi a3, a3, -16
l32i a7, a3, 12
l32i a8, a3, 8
@@ -501,9 +501,9 @@ WEAK(memmove)
__src_b a9, a6, a9
s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS
- bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
+ bne a3, a10, .LbackLoop2 # continue loop if a3:src != a10:src_start
#endif /* !XCHAL_HAVE_LOOPS */
-.backLoop2done:
+.LbackLoop2done:
bbci.l a4, 3, .Lback12
# copy 8 bytes
addi a3, a3, -8
diff --git a/arch/xtensa/lib/modsi3.S b/arch/xtensa/lib/modsi3.S
new file mode 100644
index 000000000000..d00e77181e20
--- /dev/null
+++ b/arch/xtensa/lib/modsi3.S
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__modsi3)
+
+ abi_entry_default
+#if XCHAL_HAVE_DIV32
+ rems a2, a2, a3
+#else
+ mov a7, a2 /* save original (signed) dividend */
+ do_abs a2, a2, a4 /* udividend = abs (dividend) */
+ do_abs a3, a3, a4 /* udivisor = abs (divisor) */
+ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
+ do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */
+ do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
+ ssl a4
+ sll a3, a3 /* udivisor <<= count */
+
+ /* test-subtract-and-shift loop */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+.Lspecial:
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 /* subtract again if udividend >= udivisor */
+.Lreturn:
+ bgez a7, .Lpositive
+ neg a2, a2 /* if (dividend < 0), return -udividend */
+.Lpositive:
+ abi_ret_default
+
+.Lle_one:
+ bnez a3, .Lreturn0
+
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ abi_ret_default
+
+ENDPROC(__modsi3)
+
+#if !XCHAL_HAVE_NSA
+ .section .rodata
+ .align 4
+ .global __nsau_data
+ .type __nsau_data, @object
+__nsau_data:
+ .byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4
+ .byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .size __nsau_data, . - __nsau_data
+#endif /* !XCHAL_HAVE_NSA */
diff --git a/arch/xtensa/lib/mulsi3.S b/arch/xtensa/lib/mulsi3.S
new file mode 100644
index 000000000000..91a9d7c62f96
--- /dev/null
+++ b/arch/xtensa/lib/mulsi3.S
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ .macro do_addx2 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx2 \dst, \as, \at
+#else
+ slli \tmp, \as, 1
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+ .macro do_addx4 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx4 \dst, \as, \at
+#else
+ slli \tmp, \as, 2
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+ .macro do_addx8 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx8 \dst, \as, \at
+#else
+ slli \tmp, \as, 3
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+ENTRY(__mulsi3)
+
+ abi_entry_default
+
+#if XCHAL_HAVE_MUL32
+ mull a2, a2, a3
+
+#elif XCHAL_HAVE_MUL16
+ or a4, a2, a3
+ srai a4, a4, 16
+ bnez a4, .LMUL16
+ mul16u a2, a2, a3
+ abi_ret_default
+.LMUL16:
+ srai a4, a2, 16
+ srai a5, a3, 16
+ mul16u a7, a4, a3
+ mul16u a6, a5, a2
+ mul16u a4, a2, a3
+ add a7, a7, a6
+ slli a7, a7, 16
+ add a2, a7, a4
+
+#elif XCHAL_HAVE_MAC16
+ mul.aa.hl a2, a3
+ mula.aa.lh a2, a3
+ rsr a5, ACCLO
+ umul.aa.ll a2, a3
+ rsr a4, ACCLO
+ slli a5, a5, 16
+ add a2, a4, a5
+
+#else /* !MUL32 && !MUL16 && !MAC16 */
+
+ /* Multiply one bit at a time, but unroll the loop 4x to better
+ exploit the addx instructions and avoid overhead.
+ Peel the first iteration to save a cycle on init. */
+
+ /* Avoid negative numbers. */
+ xor a5, a2, a3 /* Top bit is 1 if one input is negative. */
+ do_abs a3, a3, a6
+ do_abs a2, a2, a6
+
+ /* Swap so the second argument is smaller. */
+ sub a7, a2, a3
+ mov a4, a3
+ movgez a4, a2, a7 /* a4 = max (a2, a3) */
+ movltz a3, a2, a7 /* a3 = min (a2, a3) */
+
+ movi a2, 0
+ extui a6, a3, 0, 1
+ movnez a2, a4, a6
+
+ do_addx2 a7, a4, a2, a7
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ do_addx4 a7, a4, a2, a7
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ do_addx8 a7, a4, a2, a7
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+ bgeui a3, 16, .Lmult_main_loop
+ neg a3, a2
+ movltz a2, a3, a5
+ abi_ret_default
+
+ .align 4
+.Lmult_main_loop:
+ srli a3, a3, 4
+ slli a4, a4, 4
+
+ add a7, a4, a2
+ extui a6, a3, 0, 1
+ movnez a2, a7, a6
+
+ do_addx2 a7, a4, a2, a7
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ do_addx4 a7, a4, a2, a7
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ do_addx8 a7, a4, a2, a7
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+ bgeui a3, 16, .Lmult_main_loop
+
+ neg a3, a2
+ movltz a2, a3, a5
+
+#endif /* !MUL32 && !MUL16 && !MAC16 */
+
+ abi_ret_default
+
+ENDPROC(__mulsi3)
diff --git a/arch/xtensa/lib/udivsi3.S b/arch/xtensa/lib/udivsi3.S
new file mode 100644
index 000000000000..d2477e0786cf
--- /dev/null
+++ b/arch/xtensa/lib/udivsi3.S
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__udivsi3)
+
+ abi_entry_default
+#if XCHAL_HAVE_DIV32
+ quou a2, a2, a3
+#else
+ bltui a3, 2, .Lle_one /* check if the divisor <= 1 */
+
+ mov a6, a2 /* keep dividend in a6 */
+ do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */
+ do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
+ ssl a4
+ sll a3, a3 /* divisor <<= count */
+ movi a2, 0 /* quotient = 0 */
+
+ /* test-subtract-and-shift loop; one quotient bit on each iteration */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 /* increment quotient if dividend >= divisor */
+.Lreturn:
+ abi_ret_default
+
+.Lle_one:
+ beqz a3, .Lerror /* if divisor == 1, return the dividend */
+ abi_ret_default
+
+.Lspecial:
+ /* return dividend >= divisor */
+ bltu a6, a3, .Lreturn0
+ movi a2, 1
+ abi_ret_default
+
+.Lerror:
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ abi_ret_default
+
+ENDPROC(__udivsi3)
diff --git a/arch/xtensa/lib/umodsi3.S b/arch/xtensa/lib/umodsi3.S
new file mode 100644
index 000000000000..5f031bfa0354
--- /dev/null
+++ b/arch/xtensa/lib/umodsi3.S
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__umodsi3)
+
+ abi_entry_default
+#if XCHAL_HAVE_DIV32
+ remu a2, a2, a3
+#else
+ bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */
+
+ do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */
+ do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
+ ssl a4
+ sll a3, a3 /* divisor <<= count */
+
+ /* test-subtract-and-shift loop */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+.Lspecial:
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 /* subtract once more if dividend >= divisor */
+.Lreturn:
+ abi_ret_default
+
+.Lle_one:
+ bnez a3, .Lreturn0
+
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ abi_ret_default
+
+ENDPROC(__umodsi3)
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f7fb08ae768f..44153a335951 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y := init.o misc.o
-obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
+obj-$(CONFIG_PFAULT) += fault.o
+obj-$(CONFIG_MMU) += cache.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index fd6a70635962..8c781b05c0bd 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,9 +21,61 @@
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
-DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
+static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
+{
+#ifdef CONFIG_MMU
+ /* Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ struct mm_struct *act_mm = current->active_mm;
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ if (act_mm == NULL)
+ goto bad_page_fault;
+
+ pgd = act_mm->pgd + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto bad_page_fault;
+
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_page_fault;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_page_fault;
+
+ pmd_val(*pmd) = pmd_val(*pmd_k);
+ pte_k = pte_offset_kernel(pmd_k, address);
+
+ if (!pte_present(*pte_k))
+ goto bad_page_fault;
+ return;
+
+bad_page_fault:
+ bad_page_fault(regs, address, SIGKILL);
+#else
+ WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
+#endif
+}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -49,8 +101,10 @@ void do_page_fault(struct pt_regs *regs)
/* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*/
- if (address >= TASK_SIZE && !user_mode(regs))
- goto vmalloc_fault;
+ if (address >= TASK_SIZE && !user_mode(regs)) {
+ vmalloc_fault(regs, address);
+ return;
+ }
/* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -114,10 +168,14 @@ good_area:
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
- goto bad_page_fault;
+ bad_page_fault(regs, address, SIGKILL);
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -127,17 +185,16 @@ good_area:
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
@@ -182,56 +239,6 @@ do_sigbus:
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
return;
-
-vmalloc_fault:
- {
- /* Synchronize this task's top level page-table
- * with the 'reference' page table.
- */
- struct mm_struct *act_mm = current->active_mm;
- int index = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- p4d_t *p4d, *p4d_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- if (act_mm == NULL)
- goto bad_page_fault;
-
- pgd = act_mm->pgd + index;
- pgd_k = init_mm.pgd + index;
-
- if (!pgd_present(*pgd_k))
- goto bad_page_fault;
-
- pgd_val(*pgd) = pgd_val(*pgd_k);
-
- p4d = p4d_offset(pgd, address);
- p4d_k = p4d_offset(pgd_k, address);
- if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
- goto bad_page_fault;
-
- pud = pud_offset(p4d, address);
- pud_k = pud_offset(p4d_k, address);
- if (!pud_present(*pud) || !pud_present(*pud_k))
- goto bad_page_fault;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
- goto bad_page_fault;
-
- pmd_val(*pmd) = pmd_val(*pmd_k);
- pte_k = pte_offset_kernel(pmd_k, address);
-
- if (!pte_present(*pte_k))
- goto bad_page_fault;
- return;
- }
-bad_page_fault:
- bad_page_fault(regs, address, SIGKILL);
- return;
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6a32b2cf2718..b2587a1a7c46 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -216,3 +216,25 @@ static int __init parse_memmap_opt(char *str)
return 0;
}
early_param("memmap", parse_memmap_opt);
+
+#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 38acda4f04e8..92e158c69c10 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -18,6 +18,8 @@
#include <asm/initialize_mmu.h>
#include <asm/io.h>
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
+
#if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index f436cf2efd8b..27a477dae232 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -162,6 +162,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
}
+void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ local_flush_tlb_page(vma, address);
+}
+
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 81d7c7e8f7e9..10b79d3c74e0 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
static struct tty_driver *serial_driver;
static struct tty_port serial_port;
static DEFINE_TIMER(serial_timer, rs_poll);
-static DEFINE_SPINLOCK(timer_lock);
static int rs_open(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock_bh(&timer_lock);
return 0;
}
static void rs_close(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
del_timer_sync(&serial_timer);
- spin_unlock_bh(&timer_lock);
}
@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
int rd = 1;
unsigned char c;
- spin_lock(&timer_lock);
-
while (simc_poll(0)) {
rd = simc_read(0, &c, 1);
if (rd <= 0)
@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
tty_flip_buffer_push(port);
if (rd)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock(&timer_lock);
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 962e5e145209..9ac46ab3a296 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -37,13 +37,6 @@
#define ETH_HEADER_OTHER 14
#define ISS_NET_TIMER_VALUE (HZ / 10)
-
-static DEFINE_SPINLOCK(opened_lock);
-static LIST_HEAD(opened);
-
-static DEFINE_SPINLOCK(devices_lock);
-static LIST_HEAD(devices);
-
/* ------------------------------------------------------------------------- */
/* We currently only support the TUNTAP transport protocol. */
@@ -59,17 +52,25 @@ struct tuntap_info {
/* ------------------------------------------------------------------------- */
+struct iss_net_private;
+
+struct iss_net_ops {
+ int (*open)(struct iss_net_private *lp);
+ void (*close)(struct iss_net_private *lp);
+ int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
+ int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
+ unsigned short (*protocol)(struct sk_buff *skb);
+ int (*poll)(struct iss_net_private *lp);
+};
+
/* This structure contains out private information for the driver. */
struct iss_net_private {
- struct list_head device_list;
- struct list_head opened_list;
-
spinlock_t lock;
struct net_device *dev;
struct platform_device pdev;
struct timer_list tl;
- struct net_device_stats stats;
+ struct rtnl_link_stats64 stats;
struct timer_list timer;
unsigned int timer_val;
@@ -82,12 +83,7 @@ struct iss_net_private {
struct tuntap_info tuntap;
} info;
- int (*open)(struct iss_net_private *lp);
- void (*close)(struct iss_net_private *lp);
- int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
- int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
- unsigned short (*protocol)(struct sk_buff *skb);
- int (*poll)(struct iss_net_private *lp);
+ const struct iss_net_ops *net_ops;
} tp;
};
@@ -174,7 +170,7 @@ static int tuntap_open(struct iss_net_private *lp)
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
- strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
+ strscpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
err = simc_ioctl(fd, TUNSETIFF, &ifr);
if (err < 0) {
@@ -215,6 +211,15 @@ static int tuntap_poll(struct iss_net_private *lp)
return simc_poll(lp->tp.info.tuntap.fd);
}
+static const struct iss_net_ops tuntap_ops = {
+ .open = tuntap_open,
+ .close = tuntap_close,
+ .read = tuntap_read,
+ .write = tuntap_write,
+ .protocol = tuntap_protocol,
+ .poll = tuntap_poll,
+};
+
/*
* ethX=tuntap,[mac address],device name
*/
@@ -249,7 +254,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
return 0;
}
- strlcpy(lp->tp.info.tuntap.dev_name, dev_name,
+ strscpy(lp->tp.info.tuntap.dev_name, dev_name,
sizeof(lp->tp.info.tuntap.dev_name));
setup_etheraddr(dev, mac_str);
@@ -257,13 +262,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
lp->mtu = TRANSPORT_TUNTAP_MTU;
lp->tp.info.tuntap.fd = -1;
-
- lp->tp.open = tuntap_open;
- lp->tp.close = tuntap_close;
- lp->tp.read = tuntap_read;
- lp->tp.write = tuntap_write;
- lp->tp.protocol = tuntap_protocol;
- lp->tp.poll = tuntap_poll;
+ lp->tp.net_ops = &tuntap_ops;
return 1;
}
@@ -278,14 +277,16 @@ static int iss_net_rx(struct net_device *dev)
/* Check if there is any new data. */
- if (lp->tp.poll(lp) == 0)
+ if (lp->tp.net_ops->poll(lp) == 0)
return 0;
/* Try to allocate memory, if it fails, try again next round. */
skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
if (skb == NULL) {
+ spin_lock_bh(&lp->lock);
lp->stats.rx_dropped++;
+ spin_unlock_bh(&lp->lock);
return 0;
}
@@ -295,54 +296,42 @@ static int iss_net_rx(struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- pkt_len = lp->tp.read(lp, &skb);
+ pkt_len = lp->tp.net_ops->read(lp, &skb);
skb_put(skb, pkt_len);
if (pkt_len > 0) {
skb_trim(skb, pkt_len);
- skb->protocol = lp->tp.protocol(skb);
+ skb->protocol = lp->tp.net_ops->protocol(skb);
+ spin_lock_bh(&lp->lock);
lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++;
- netif_rx_ni(skb);
+ spin_unlock_bh(&lp->lock);
+ netif_rx(skb);
return pkt_len;
}
kfree_skb(skb);
return pkt_len;
}
-static int iss_net_poll(void)
+static int iss_net_poll(struct iss_net_private *lp)
{
- struct list_head *ele;
int err, ret = 0;
- spin_lock(&opened_lock);
-
- list_for_each(ele, &opened) {
- struct iss_net_private *lp;
-
- lp = list_entry(ele, struct iss_net_private, opened_list);
-
- if (!netif_running(lp->dev))
- break;
-
- spin_lock(&lp->lock);
-
- while ((err = iss_net_rx(lp->dev)) > 0)
- ret++;
+ if (!netif_running(lp->dev))
+ return 0;
- spin_unlock(&lp->lock);
+ while ((err = iss_net_rx(lp->dev)) > 0)
+ ret++;
- if (err < 0) {
- pr_err("Device '%s' read returned %d, shutting it down\n",
- lp->dev->name, err);
- dev_close(lp->dev);
- } else {
- /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
- }
+ if (err < 0) {
+ pr_err("Device '%s' read returned %d, shutting it down\n",
+ lp->dev->name, err);
+ dev_close(lp->dev);
+ } else {
+ /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
}
- spin_unlock(&opened_lock);
return ret;
}
@@ -351,10 +340,8 @@ static void iss_net_timer(struct timer_list *t)
{
struct iss_net_private *lp = from_timer(lp, t, timer);
- iss_net_poll();
- spin_lock(&lp->lock);
+ iss_net_poll(lp);
mod_timer(&lp->timer, jiffies + lp->timer_val);
- spin_unlock(&lp->lock);
}
@@ -363,11 +350,9 @@ static int iss_net_open(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev);
int err;
- spin_lock_bh(&lp->lock);
-
- err = lp->tp.open(lp);
+ err = lp->tp.net_ops->open(lp);
if (err < 0)
- goto out;
+ return err;
netif_start_queue(dev);
@@ -378,36 +363,21 @@ static int iss_net_open(struct net_device *dev)
while ((err = iss_net_rx(dev)) > 0)
;
- spin_unlock_bh(&lp->lock);
- spin_lock_bh(&opened_lock);
- list_add(&lp->opened_list, &opened);
- spin_unlock_bh(&opened_lock);
- spin_lock_bh(&lp->lock);
-
timer_setup(&lp->timer, iss_net_timer, 0);
lp->timer_val = ISS_NET_TIMER_VALUE;
mod_timer(&lp->timer, jiffies + lp->timer_val);
-out:
- spin_unlock_bh(&lp->lock);
return err;
}
static int iss_net_close(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
- netif_stop_queue(dev);
- spin_lock_bh(&lp->lock);
-
- spin_lock(&opened_lock);
- list_del(&opened);
- spin_unlock(&opened_lock);
+ netif_stop_queue(dev);
del_timer_sync(&lp->timer);
+ lp->tp.net_ops->close(lp);
- lp->tp.close(lp);
-
- spin_unlock_bh(&lp->lock);
return 0;
}
@@ -417,13 +387,14 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len;
netif_stop_queue(dev);
- spin_lock_bh(&lp->lock);
- len = lp->tp.write(lp, &skb);
+ len = lp->tp.net_ops->write(lp, &skb);
if (len == skb->len) {
+ spin_lock_bh(&lp->lock);
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
+ spin_unlock_bh(&lp->lock);
netif_trans_update(dev);
netif_start_queue(dev);
@@ -432,24 +403,29 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (len == 0) {
netif_start_queue(dev);
+ spin_lock_bh(&lp->lock);
lp->stats.tx_dropped++;
+ spin_unlock_bh(&lp->lock);
} else {
netif_start_queue(dev);
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
}
- spin_unlock_bh(&lp->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
-static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
+static void iss_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct iss_net_private *lp = netdev_priv(dev);
- return &lp->stats;
+
+ spin_lock_bh(&lp->lock);
+ *stats = lp->stats;
+ spin_unlock_bh(&lp->lock);
}
static void iss_net_set_multicast_list(struct net_device *dev)
@@ -460,19 +436,6 @@ static void iss_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
}
-static int iss_net_set_mac(struct net_device *dev, void *addr)
-{
- struct iss_net_private *lp = netdev_priv(dev);
- struct sockaddr *hwaddr = addr;
-
- if (!is_valid_ether_addr(hwaddr->sa_data))
- return -EADDRNOTAVAIL;
- spin_lock_bh(&lp->lock);
- eth_hw_addr_set(dev, hwaddr->sa_data);
- spin_unlock_bh(&lp->lock);
- return 0;
-}
-
static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
{
return -EINVAL;
@@ -494,33 +457,39 @@ static int driver_registered;
static const struct net_device_ops iss_netdev_ops = {
.ndo_open = iss_net_open,
.ndo_stop = iss_net_close,
- .ndo_get_stats = iss_net_get_stats,
+ .ndo_get_stats64 = iss_net_get_stats64,
.ndo_start_xmit = iss_net_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = iss_net_change_mtu,
- .ndo_set_mac_address = iss_net_set_mac,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = iss_net_tx_timeout,
.ndo_set_rx_mode = iss_net_set_multicast_list,
};
-static int iss_net_configure(int index, char *init)
+static void iss_net_pdev_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iss_net_private *lp =
+ container_of(pdev, struct iss_net_private, pdev);
+
+ free_netdev(lp->dev);
+}
+
+static void iss_net_configure(int index, char *init)
{
struct net_device *dev;
struct iss_net_private *lp;
- int err;
dev = alloc_etherdev(sizeof(*lp));
if (dev == NULL) {
pr_err("eth_configure: failed to allocate device\n");
- return 1;
+ return;
}
/* Initialize private element. */
lp = netdev_priv(dev);
*lp = (struct iss_net_private) {
- .device_list = LIST_HEAD_INIT(lp->device_list),
- .opened_list = LIST_HEAD_INIT(lp->opened_list),
.dev = dev,
.index = index,
};
@@ -541,7 +510,7 @@ static int iss_net_configure(int index, char *init)
if (!tuntap_probe(lp, index, init)) {
pr_err("%s: invalid arguments. Skipping device!\n",
dev->name);
- goto errout;
+ goto err_free_netdev;
}
pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr);
@@ -549,17 +518,16 @@ static int iss_net_configure(int index, char *init)
/* sysfs register */
if (!driver_registered) {
- platform_driver_register(&iss_net_driver);
+ if (platform_driver_register(&iss_net_driver))
+ goto err_free_netdev;
driver_registered = 1;
}
- spin_lock(&devices_lock);
- list_add(&lp->device_list, &devices);
- spin_unlock(&devices_lock);
-
lp->pdev.id = index;
lp->pdev.name = DRIVER_NAME;
- platform_device_register(&lp->pdev);
+ lp->pdev.dev.release = iss_net_pdev_release;
+ if (platform_device_register(&lp->pdev))
+ goto err_free_netdev;
SET_NETDEV_DEV(dev, &lp->pdev.dev);
dev->netdev_ops = &iss_netdev_ops;
@@ -568,23 +536,20 @@ static int iss_net_configure(int index, char *init)
dev->irq = -1;
rtnl_lock();
- err = register_netdevice(dev);
- rtnl_unlock();
-
- if (err) {
+ if (register_netdevice(dev)) {
+ rtnl_unlock();
pr_err("%s: error registering net device!\n", dev->name);
- /* XXX: should we call ->remove() here? */
- free_netdev(dev);
- return 1;
+ platform_device_unregister(&lp->pdev);
+ return;
}
+ rtnl_unlock();
timer_setup(&lp->tl, iss_net_user_timer_expire, 0);
- return 0;
+ return;
-errout:
- /* FIXME: unregister; free, etc.. */
- return -EIO;
+err_free_netdev:
+ free_netdev(dev);
}
/* ------------------------------------------------------------------------- */
@@ -606,7 +571,7 @@ struct iss_net_init {
static int __init iss_net_setup(char *str)
{
- struct iss_net_private *device = NULL;
+ struct iss_net_init *device = NULL;
struct iss_net_init *new;
struct list_head *ele;
char *end;
@@ -627,16 +592,12 @@ static int __init iss_net_setup(char *str)
}
str = end;
- spin_lock(&devices_lock);
-
- list_for_each(ele, &devices) {
- device = list_entry(ele, struct iss_net_private, device_list);
+ list_for_each(ele, &eth_cmd_line) {
+ device = list_entry(ele, struct iss_net_init, list);
if (device->index == n)
break;
}
- spin_unlock(&devices_lock);
-
if (device && device->index == n) {
pr_err("Device %u already configured\n", n);
return 1;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 07b642c1916a..f50caaa1c249 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -108,13 +108,13 @@ static void simdisk_submit_bio(struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector;
bio_for_each_segment(bvec, bio, iter) {
- char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset;
+ char *buffer = bvec_kmap_local(&bvec);
unsigned len = bvec.bv_len >> SECTOR_SHIFT;
simdisk_transfer(dev, sector, len, buffer,
bio_data_dir(bio) == WRITE);
sector += len;
- kunmap_atomic(buffer);
+ kunmap_local(buffer);
}
bio_endio(bio);
@@ -208,15 +208,21 @@ static int simdisk_detach(struct simdisk *dev)
static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
- struct simdisk *dev = PDE_DATA(file_inode(file));
+ struct simdisk *dev = pde_data(file_inode(file));
const char *s = dev->filename;
if (s) {
- ssize_t n = simple_read_from_buffer(buf, size, ppos,
- s, strlen(s));
- if (n < 0)
- return n;
- buf += n;
- size -= n;
+ ssize_t len = strlen(s);
+ char *temp = kmalloc(len + 2, GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ len = scnprintf(temp, len + 2, "%s\n", s);
+ len = simple_read_from_buffer(buf, size, ppos,
+ temp, len);
+
+ kfree(temp);
+ return len;
}
return simple_read_from_buffer(buf, size, ppos, "\n", 1);
}
@@ -225,7 +231,7 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *tmp = memdup_user_nul(buf, count);
- struct simdisk *dev = PDE_DATA(file_inode(file));
+ struct simdisk *dev = pde_data(file_inode(file));
int err;
if (IS_ERR(tmp))
@@ -284,7 +290,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
return 0;
out_cleanup_disk:
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
out:
return err;
}
@@ -338,7 +344,7 @@ static void simdisk_teardown(struct simdisk *dev, int which,
simdisk_detach(dev);
if (dev->gd) {
del_gendisk(dev->gd);
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
}
remove_proc_entry(tmp, procdir);
}
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 145d129be76f..0dc22c371614 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -78,7 +78,7 @@ void __init platform_init(bp_tag_t *first)
void platform_heartbeat(void)
{
- static int i=0, t = 0;
+ static int i, t;
if (--t < 0)
{
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 538e6748e85a..c79c1d09ea86 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -133,6 +133,7 @@ static int __init machine_setup(void)
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
+ of_node_put(eth);
return 0;
}
arch_initcall(machine_setup);