aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig46
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/Makefile5
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts20
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi265
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi35
-rw-r--r--arch/arm64/boot/dts/thunder-88xx.dts67
-rw-r--r--arch/arm64/boot/dts/thunder-88xx.dtsi401
-rw-r--r--arch/arm64/configs/defconfig28
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c1
-rw-r--r--arch/arm64/include/asm/Kbuild4
-rw-r--r--arch/arm64/include/asm/arch_timer.h31
-rw-r--r--arch/arm64/include/asm/atomic.h201
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/include/asm/cachetype.h20
-rw-r--r--arch/arm64/include/asm/compat.h4
-rw-r--r--arch/arm64/include/asm/cpu_ops.h7
-rw-r--r--arch/arm64/include/asm/cpuidle.h13
-rw-r--r--arch/arm64/include/asm/debug-monitors.h49
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h28
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm64/include/asm/insn.h249
-rw-r--r--arch/arm64/include/asm/io.h5
-rw-r--r--arch/arm64/include/asm/irq_work.h22
-rw-r--r--arch/arm64/include/asm/kgdb.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h18
-rw-r--r--arch/arm64/include/asm/kvm_asm.h53
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h27
-rw-r--r--arch/arm64/include/asm/kvm_host.h72
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h150
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/page.h3
-rw-r--r--arch/arm64/include/asm/pci.h37
-rw-r--r--arch/arm64/include/asm/percpu.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h58
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/signal32.h11
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/suspend.h1
-rw-r--r--arch/arm64/include/asm/thread_info.h9
-rw-r--r--arch/arm64/include/asm/tlb.h20
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h7
-rw-r--r--arch/arm64/include/asm/virt.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/asm-offsets.c26
-rw-r--r--arch/arm64/kernel/cpuidle.c31
-rw-r--r--arch/arm64/kernel/cpuinfo.c30
-rw-r--r--arch/arm64/kernel/debug-monitors.c9
-rw-r--r--arch/arm64/kernel/efi-entry.S27
-rw-r--r--arch/arm64/kernel/efi-stub.c16
-rw-r--r--arch/arm64/kernel/efi.c48
-rw-r--r--arch/arm64/kernel/entry.S1
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/ftrace.c10
-rw-r--r--arch/arm64/kernel/head.S18
-rw-r--r--arch/arm64/kernel/insn.c676
-rw-r--r--arch/arm64/kernel/irq.c39
-rw-r--r--arch/arm64/kernel/kgdb.c4
-rw-r--r--arch/arm64/kernel/pci.c70
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/perf_regs.c6
-rw-r--r--arch/arm64/kernel/process.c56
-rw-r--r--arch/arm64/kernel/psci.c104
-rw-r--r--arch/arm64/kernel/ptrace.c21
-rw-r--r--arch/arm64/kernel/return_address.c3
-rw-r--r--arch/arm64/kernel/setup.c51
-rw-r--r--arch/arm64/kernel/signal.c58
-rw-r--r--arch/arm64/kernel/signal32.c24
-rw-r--r--arch/arm64/kernel/sleep.S47
-rw-r--r--arch/arm64/kernel/smp.c65
-rw-r--r--arch/arm64/kernel/smp_spin_table.c22
-rw-r--r--arch/arm64/kernel/stacktrace.c3
-rw-r--r--arch/arm64/kernel/suspend.c48
-rw-r--r--arch/arm64/kernel/sys_compat.c6
-rw-r--r--arch/arm64/kernel/traps.c3
-rw-r--r--arch/arm64/kernel/vdso.c19
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/guest.c68
-rw-r--r--arch/arm64/kvm/handle_exit.c6
-rw-r--r--arch/arm64/kvm/hyp-init.S4
-rw-r--r--arch/arm64/kvm/hyp.S600
-rw-r--r--arch/arm64/kvm/sys_regs.c548
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S137
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S267
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/dma-mapping.c195
-rw-r--r--arch/arm64/mm/flush.c16
-rw-r--r--arch/arm64/mm/init.c11
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/mmap.c2
-rw-r--r--arch/arm64/mm/mmu.c16
-rw-r--r--arch/arm64/mm/pageattr.c97
-rw-r--r--arch/arm64/mm/pgd.c18
-rw-r--r--arch/arm64/mm/proc.S15
-rw-r--r--arch/arm64/net/Makefile4
-rw-r--r--arch/arm64/net/bpf_jit.h173
-rw-r--r--arch/arm64/net/bpf_jit_comp.c743
109 files changed, 5625 insertions, 881 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b0f9c9db9590..9532f8d5857e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,8 @@
config ARM64
def_bool y
+ select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_ATOMIC_RMW
@@ -17,6 +19,7 @@ config ARM64
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_AUTOPROBE
@@ -29,11 +32,13 @@ config ARM64
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
+ select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_BUGVERBOSE
@@ -53,6 +58,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
@@ -80,7 +86,7 @@ config MMU
def_bool y
config NO_IOPORT_MAP
- def_bool y
+ def_bool y if !PCI
config STACKTRACE_SUPPORT
def_bool y
@@ -106,6 +112,9 @@ config GENERIC_CALIBRATE_DELAY
config ZONE_DMA
def_bool y
+config HAVE_GENERIC_RCU_GUP
+ def_bool y
+
config ARCH_DMA_ADDR_T_64BIT
def_bool y
@@ -133,6 +142,11 @@ source "kernel/Kconfig.freezer"
menu "Platform selection"
+config ARCH_THUNDER
+ bool "Cavium Inc. Thunder SoC Family"
+ help
+ This enables support for Cavium's Thunder Family of SoCs.
+
config ARCH_VEXPRESS
bool "ARMv8 software model (Versatile Express)"
select ARCH_REQUIRE_GPIOLIB
@@ -155,6 +169,26 @@ menu "Bus support"
config ARM_AMBA
bool
+config PCI
+ bool "PCI support"
+ help
+ This feature enables support for PCI bus system. If you say Y
+ here, the kernel will include drivers and infrastructure code
+ to support PCI bus devices.
+
+config PCI_DOMAINS
+ def_bool PCI
+
+config PCI_DOMAINS_GENERIC
+ def_bool PCI
+
+config PCI_SYSCALL
+ def_bool PCI
+
+source "drivers/pci/Kconfig"
+source "drivers/pci/pcie/Kconfig"
+source "drivers/pci/hotplug/Kconfig"
+
endmenu
menu "Kernel Features"
@@ -199,7 +233,7 @@ config ARM64_VA_BITS_42
config ARM64_VA_BITS_48
bool "48-bit"
- depends on BROKEN
+ depends on !ARM_SMMU
endchoice
@@ -251,11 +285,11 @@ config SCHED_SMT
places. If unsure say N here.
config NR_CPUS
- int "Maximum number of CPUs (2-32)"
- range 2 32
+ int "Maximum number of CPUs (2-64)"
+ range 2 64
depends on SMP
# These have to remain sorted largest to smallest
- default "8"
+ default "64"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
@@ -316,7 +350,7 @@ config XEN_DOM0
depends on XEN
config XEN
- bool "Xen guest support on ARM64 (EXPERIMENTAL)"
+ bool "Xen guest support on ARM64"
depends on ARM64 && OF
select SWIOTLB_XEN
help
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4ee8e90b7a45..0a12933e50ed 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
+config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+ depends on MODULES
+ help
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+ of module data. Such protection may interfere with run-time code
+ patching and dynamic kernel tracing - and they might also protect
+ against certain classes of kernel exploits.
+ If in doubt, say "N".
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 57833546bf00..20901ffed182 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -30,8 +30,6 @@ AS += -EL
LD += -EL
endif
-comma = ,
-
CHECKFLAGS += -D__aarch64__
# Default value
@@ -39,7 +37,7 @@ head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}')
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
else
TEXT_OFFSET := 0x00080000
endif
@@ -47,6 +45,7 @@ endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_NET) += arch/arm64/net/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index c52bdb051f66..f8001a62029c 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index 6541962f5d70..2e25de0800b9 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -25,6 +25,26 @@
};
};
+&pcie0clk {
+ status = "ok";
+};
+
+&pcie0 {
+ status = "ok";
+};
+
&serial0 {
status = "ok";
};
+
+&menet {
+ status = "ok";
+};
+
+&sgenet0 {
+ status = "ok";
+};
+
+&xgenet {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 40aa96ce13c4..f1ad9c2ab2e9 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -167,14 +167,33 @@
clock-output-names = "ethclk";
};
- eth8clk: eth8clk {
+ menetclk: menetclk {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&ethclk 0>;
- clock-names = "eth8clk";
reg = <0x0 0x1702C000 0x0 0x1000>;
reg-names = "csr-reg";
- clock-output-names = "eth8clk";
+ clock-output-names = "menetclk";
+ };
+
+ sge0clk: sge0clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ csr-mask = <0x3>;
+ clock-output-names = "sge0clk";
+ };
+
+ xge0clk: xge0clk@1f61c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f61c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ csr-mask = <0x3>;
+ clock-output-names = "xge0clk";
};
sataphy1clk: sataphy1clk@1f21c000 {
@@ -270,6 +289,184 @@
enable-mask = <0x2>;
clock-output-names = "rtcclk";
};
+
+ rngpkaclk: rngpkaclk@17000000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x17000000 0x0 0x2000>;
+ reg-names = "csr-reg";
+ csr-offset = <0xc>;
+ csr-mask = <0x10>;
+ enable-offset = <0x10>;
+ enable-mask = <0x10>;
+ clock-output-names = "rngpkaclk";
+ };
+
+ pcie0clk: pcie0clk@1f2bc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2bc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie0clk";
+ };
+
+ pcie1clk: pcie1clk@1f2cc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2cc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie1clk";
+ };
+
+ pcie2clk: pcie2clk@1f2dc000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f2dc000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie2clk";
+ };
+
+ pcie3clk: pcie3clk@1f50c000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f50c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie3clk";
+ };
+
+ pcie4clk: pcie4clk@1f51c000 {
+ status = "disabled";
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f51c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "pcie4clk";
+ };
+ };
+
+ pcie0: pcie@1f2b0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
+ 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
+ dma-coherent;
+ clocks = <&pcie0clk 0>;
+ };
+
+ pcie1: pcie@1f2c0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */
+ 0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
+ dma-coherent;
+ clocks = <&pcie1clk 0>;
+ };
+
+ pcie2: pcie@1f2d0000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */
+ 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
+ dma-coherent;
+ clocks = <&pcie2clk 0>;
+ };
+
+ pcie3: pcie@1f500000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */
+ 0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
+ dma-coherent;
+ clocks = <&pcie3clk 0>;
+ };
+
+ pcie4: pcie@1f510000 {
+ status = "disabled";
+ device_type = "pci";
+ compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */
+ 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */
+ reg-names = "csr", "cfg";
+ ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */
+ 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */
+ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
+ 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
+ dma-coherent;
+ clocks = <&pcie4clk 0>;
};
serial0: serial@1c020000 {
@@ -397,5 +594,67 @@
#clock-cells = <1>;
clocks = <&rtcclk 0>;
};
+
+ menet: ethernet@17020000 {
+ compatible = "apm,xgene-enet";
+ status = "disabled";
+ reg = <0x0 0x17020000 0x0 0xd100>,
+ <0x0 0X17030000 0x0 0Xc300>,
+ <0x0 0X10000000 0x0 0X200>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0x3c 0x4>;
+ dma-coherent;
+ clocks = <&menetclk 0>;
+ /* mac address will be overwritten by the bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ phy-connection-type = "rgmii";
+ phy-handle = <&menetphy>;
+ mdio {
+ compatible = "apm,xgene-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ menetphy: menetphy@3 {
+ compatible = "ethernet-phy-id001c.c915";
+ reg = <0x3>;
+ };
+
+ };
+ };
+
+ sgenet0: ethernet@1f210000 {
+ compatible = "apm,xgene-enet";
+ status = "disabled";
+ reg = <0x0 0x1f210000 0x0 0xd100>,
+ <0x0 0x1f200000 0x0 0Xc300>,
+ <0x0 0x1B000000 0x0 0X200>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0xA0 0x4>;
+ dma-coherent;
+ clocks = <&sge0clk 0>;
+ local-mac-address = [00 00 00 00 00 00];
+ phy-connection-type = "sgmii";
+ };
+
+ xgenet: ethernet@1f610000 {
+ compatible = "apm,xgene-enet";
+ status = "disabled";
+ reg = <0x0 0x1f610000 0x0 0xd100>,
+ <0x0 0x1f600000 0x0 0Xc300>,
+ <0x0 0x18000000 0x0 0X200>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0x60 0x4>;
+ dma-coherent;
+ clocks = <&xge0clk 0>;
+ /* mac address will be overwritten by the bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ phy-connection-type = "xgmii";
+ };
+
+ rng: rng@10520000 {
+ compatible = "apm,xgene-rng";
+ reg = <0x0 0x10520000 0x0 0x100>;
+ interrupts = <0x0 0x41 0x4>;
+ clocks = <&rngpkaclk 0>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index ac2cb2418025..c46cbb29f3c6 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -22,7 +22,7 @@
bank-width = <4>;
};
- vram@2,00000000 {
+ v2m_video_ram: vram@2,00000000 {
compatible = "arm,vexpress-vram";
reg = <2 0x00000000 0x00800000>;
};
@@ -179,9 +179,42 @@
clcd@1f0000 {
compatible = "arm,pl111", "arm,primecell";
reg = <0x1f0000 0x1000>;
+ interrupt-names = "combined";
interrupts = <14>;
clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
clock-names = "clcdclk", "apb_pclk";
+ arm,pl11x,framebuffer = <0x18000000 0x00180000>;
+ memory-region = <&v2m_video_ram>;
+ max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */
+
+ port {
+ v2m_clcd_pads: endpoint {
+ remote-endpoint = <&v2m_clcd_panel>;
+ arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+ };
+ };
+
+ panel {
+ compatible = "panel-dpi";
+
+ port {
+ v2m_clcd_panel: endpoint {
+ remote-endpoint = <&v2m_clcd_pads>;
+ };
+ };
+
+ panel-timing {
+ clock-frequency = <63500127>;
+ hactive = <1024>;
+ hback-porch = <152>;
+ hfront-porch = <48>;
+ hsync-len = <104>;
+ vactive = <768>;
+ vback-porch = <23>;
+ vfront-porch = <3>;
+ vsync-len = <4>;
+ };
+ };
};
virtio_block@0130000 {
diff --git a/arch/arm64/boot/dts/thunder-88xx.dts b/arch/arm64/boot/dts/thunder-88xx.dts
new file mode 100644
index 000000000000..800ba65991f7
--- /dev/null
+++ b/arch/arm64/boot/dts/thunder-88xx.dts
@@ -0,0 +1,67 @@
+/*
+ * Cavium Thunder DTS file - Thunder board description
+ *
+ * Copyright (C) 2014, Cavium Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+/include/ "thunder-88xx.dtsi"
+
+/ {
+ model = "Cavium ThunderX CN88XX board";
+ compatible = "cavium,thunder-88xx";
+
+ aliases {
+ serial0 = &uaa0;
+ serial1 = &uaa1;
+ };
+
+ memory@00000000 {
+ device_type = "memory";
+ reg = <0x0 0x00000000 0x0 0x80000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/thunder-88xx.dtsi b/arch/arm64/boot/dts/thunder-88xx.dtsi
new file mode 100644
index 000000000000..d8c0bdc51882
--- /dev/null
+++ b/arch/arm64/boot/dts/thunder-88xx.dtsi
@@ -0,0 +1,401 @@
+/*
+ * Cavium Thunder DTS file - Thunder SoC description
+ *
+ * Copyright (C) 2014, Cavium Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ compatible = "cavium,thunder-88xx";
+ interrupt-parent = <&gic0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@000 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x000>;
+ enable-method = "psci";
+ };
+ cpu@001 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x001>;
+ enable-method = "psci";
+ };
+ cpu@002 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x002>;
+ enable-method = "psci";
+ };
+ cpu@003 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x003>;
+ enable-method = "psci";
+ };
+ cpu@004 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x004>;
+ enable-method = "psci";
+ };
+ cpu@005 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x005>;
+ enable-method = "psci";
+ };
+ cpu@006 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x006>;
+ enable-method = "psci";
+ };
+ cpu@007 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x007>;
+ enable-method = "psci";
+ };
+ cpu@008 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x008>;
+ enable-method = "psci";
+ };
+ cpu@009 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x009>;
+ enable-method = "psci";
+ };
+ cpu@00a {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00a>;
+ enable-method = "psci";
+ };
+ cpu@00b {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00b>;
+ enable-method = "psci";
+ };
+ cpu@00c {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00c>;
+ enable-method = "psci";
+ };
+ cpu@00d {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00d>;
+ enable-method = "psci";
+ };
+ cpu@00e {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00e>;
+ enable-method = "psci";
+ };
+ cpu@00f {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x00f>;
+ enable-method = "psci";
+ };
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ };
+ cpu@102 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ };
+ cpu@103 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ };
+ cpu@104 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x104>;
+ enable-method = "psci";
+ };
+ cpu@105 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x105>;
+ enable-method = "psci";
+ };
+ cpu@106 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x106>;
+ enable-method = "psci";
+ };
+ cpu@107 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x107>;
+ enable-method = "psci";
+ };
+ cpu@108 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x108>;
+ enable-method = "psci";
+ };
+ cpu@109 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x109>;
+ enable-method = "psci";
+ };
+ cpu@10a {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10a>;
+ enable-method = "psci";
+ };
+ cpu@10b {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10b>;
+ enable-method = "psci";
+ };
+ cpu@10c {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10c>;
+ enable-method = "psci";
+ };
+ cpu@10d {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10d>;
+ enable-method = "psci";
+ };
+ cpu@10e {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10e>;
+ enable-method = "psci";
+ };
+ cpu@10f {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x10f>;
+ enable-method = "psci";
+ };
+ cpu@200 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+ cpu@201 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x201>;
+ enable-method = "psci";
+ };
+ cpu@202 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x202>;
+ enable-method = "psci";
+ };
+ cpu@203 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x203>;
+ enable-method = "psci";
+ };
+ cpu@204 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x204>;
+ enable-method = "psci";
+ };
+ cpu@205 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x205>;
+ enable-method = "psci";
+ };
+ cpu@206 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x206>;
+ enable-method = "psci";
+ };
+ cpu@207 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x207>;
+ enable-method = "psci";
+ };
+ cpu@208 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x208>;
+ enable-method = "psci";
+ };
+ cpu@209 {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x209>;
+ enable-method = "psci";
+ };
+ cpu@20a {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20a>;
+ enable-method = "psci";
+ };
+ cpu@20b {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20b>;
+ enable-method = "psci";
+ };
+ cpu@20c {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20c>;
+ enable-method = "psci";
+ };
+ cpu@20d {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20d>;
+ enable-method = "psci";
+ };
+ cpu@20e {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20e>;
+ enable-method = "psci";
+ };
+ cpu@20f {
+ device_type = "cpu";
+ compatible = "cavium,thunder", "arm,armv8";
+ reg = <0x0 0x20f>;
+ enable-method = "psci";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0xff01>,
+ <1 14 0xff01>,
+ <1 11 0xff01>,
+ <1 10 0xff01>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ refclk50mhz: refclk50mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "refclk50mhz";
+ };
+
+ gic0: interrupt-controller@8010,00000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
+ <0x8010 0x80000000 0x0 0x600000>; /* GICR */
+ interrupts = <1 9 0xf04>;
+ };
+
+ uaa0: serial@87e0,24000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x87e0 0x24000000 0x0 0x1000>;
+ interrupts = <1 21 4>;
+ clocks = <&refclk50mhz>;
+ clock-names = "apb_pclk";
+ };
+
+ uaa1: serial@87e0,25000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x87e0 0x25000000 0x0 0x1000>;
+ interrupts = <1 22 4>;
+ clocks = <&refclk50mhz>;
+ clock-names = "apb_pclk";
+ };
+ };
+};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 1e52b741d806..dd301be89ecc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -32,8 +32,12 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_XGENE=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
CONFIG_KSM=y
@@ -51,6 +55,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_BPF_JIT=y
# CONFIG_WIRELESS is not set
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
@@ -64,16 +69,21 @@ CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_XGENE=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -82,22 +92,40 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+# CONFIG_HMC_DRV is not set
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
# CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_ULPI=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_XGENE=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PHY_XGENE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index c294e67d3925..ae67e88c28b9 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -150,7 +150,6 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data,
kernel_neon_begin_partial(28);
sha2_ce_transform(blocks, data, sctx->state, NULL, len);
kernel_neon_end();
- data += blocks * SHA256_BLOCK_SIZE;
}
static int sha224_finup(struct shash_desc *desc, const u8 *data,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 0b3fcf86e6ba..dc770bd4f5a5 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -9,8 +9,9 @@ generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
-generic-y += emergency-restart.h
+generic-y += dma-contiguous.h
generic-y += early_ioremap.h
+generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
generic-y += hash.h
@@ -29,6 +30,7 @@ generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
+generic-y += pci-bridge.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9400596a0f39..f19097134b02 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -104,37 +104,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline void arch_counter_set_user_access(void)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
-
- /* Disable user access to the timers and the physical counter */
- /* Also disable virtual event stream */
- cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
- | ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN
- | ARCH_TIMER_USR_PCT_ACCESS_EN);
-
- /* Enable user access to the virtual counter */
- cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
-
- arch_timer_set_cntkctl(cntkctl);
-}
-
-static inline void arch_timer_evtstrm_enable(int divider)
-{
- u32 cntkctl = arch_timer_get_cntkctl();
- cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
- /* Set the divider and enable virtual event stream */
- cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
- | ARCH_TIMER_VIRT_EVT_EN;
- arch_timer_set_cntkctl(cntkctl);
- elf_hwcap |= HWCAP_EVTSTRM;
-#ifdef CONFIG_COMPAT
- compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
-#endif
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 65f1569ac96e..7047051ded40 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -35,7 +35,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/*
@@ -43,69 +43,51 @@
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add_return\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
- asm volatile("// atomic_sub\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC_OP(op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stlxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op)
- asm volatile("// atomic_sub_return\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
- smp_mb();
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
-static inline void atomic64_add(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_add\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC64_OP(op, asm_op) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, asm_op) \
+static inline long atomic64_##op##_return(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stlxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN(op, asm_op)
- asm volatile("// atomic64_add_return\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
- smp_mb();
- return result;
-}
-
-static inline void atomic64_sub(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline long atomic64_sub_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub_return\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index f2defe1c380c..689b6379188c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 7a2e0762cb40..4c631a0a3609 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -39,6 +39,26 @@
extern unsigned long __icache_flags;
+#define CCSIDR_EL1_LINESIZE_MASK 0x7
+#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
+
+#define CCSIDR_EL1_NUMSETS_SHIFT 13
+#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
+#define CCSIDR_EL1_NUMSETS(x) \
+ (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
+
+extern u64 __attribute_const__ icache_get_ccsidr(void);
+
+static inline int icache_get_linesize(void)
+{
+ return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
+}
+
+static inline int icache_get_numsets(void)
+{
+ return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
+}
+
/*
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
* permitted in the I-cache.
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 253e33bc94fb..56de5aadede2 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -37,8 +37,8 @@ typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
typedef u16 __compat_uid16_t;
typedef u16 __compat_gid16_t;
typedef u32 __compat_uid32_t;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index d7b4b38a8e86..6f8e2ef9094a 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -28,6 +28,8 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -47,6 +49,7 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -61,7 +64,7 @@ struct cpu_operations {
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
-extern int __init cpu_read_ops(struct device_node *dn, int cpu);
-extern void __init cpu_read_bootcpu_ops(void);
+int __init cpu_read_ops(struct device_node *dn, int cpu);
+void __init cpu_read_bootcpu_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
new file mode 100644
index 000000000000..b52a9932e2b1
--- /dev/null
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_CPUIDLE_H
+#define __ASM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int cpu_init_idle(unsigned int cpu);
+#else
+static inline int cpu_init_idle(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 6e9b5b36921c..40ec68aa6870 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,15 @@
#ifdef __KERNEL__
+/* Low-level stepping controls. */
+#define DBG_MDSCR_SS (1 << 0)
+#define DBG_SPSR_SS (1 << 21)
+
+/* MDSCR_EL1 enabling bits */
+#define DBG_MDSCR_KDE (1 << 13)
+#define DBG_MDSCR_MDE (1 << 15)
+#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
+
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -39,11 +48,13 @@
/*
* #imm16 values used for BRK instruction generation
* Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
*/
-#define KGDB_DYN_DGB_BRK_IMM 0x400
-#define KDBG_COMPILED_DBG_BRK_IMM 0x401
+#define FAULT_BRK_IMM 0x100
+#define KGDB_DYN_DBG_BRK_IMM 0x400
+#define KGDB_COMPILED_DBG_BRK_IMM 0x401
/*
* BRK instruction encoding
@@ -52,32 +63,33 @@
#define AARCH64_BREAK_MON 0xd4200000
/*
+ * BRK instruction for provoking a fault on purpose
+ * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
+ */
+#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
+
+/*
* Extract byte from BRK instruction
*/
-#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
/*
* Extract byte from BRK #imm16
*/
-#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
- (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
+ (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-#define KGDB_DYN_DGB_BRK_BYTE(x) \
- (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+#define KGDB_DYN_DBG_BRK_BYTE(x) \
+ (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
-#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
-#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
-#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
+#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
+#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
+#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
+#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
#define CACHE_FLUSH_IS_SAFE 1
-enum debug_el {
- DBG_ACTIVE_EL0 = 0,
- DBG_ACTIVE_EL1,
-};
-
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
@@ -115,6 +127,11 @@ void unregister_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
+enum debug_el {
+ DBG_ACTIVE_EL0 = 0,
+ DBG_ACTIVE_EL1,
+};
+
void enable_debug_monitors(enum debug_el el);
void disable_debug_monitors(enum debug_el el);
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
deleted file mode 100644
index 14c4c0ca7f2a..000000000000
--- a/arch/arm64/include/asm/dma-contiguous.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ASM_DMA_CONTIGUOUS_H
-#define _ASM_DMA_CONTIGUOUS_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
-
-#include <linux/types.h>
-
-static inline void
-dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
-
-#endif
-#endif
-
-#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index dc82e52acdb3..adeae3f6f0fc 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
+static inline int set_arch_dma_coherent_ops(struct device *dev)
+{
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+ return 0;
+}
+#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 01d3aab64b79..1f65be393139 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -126,7 +126,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* that it will "exec", and that there is sufficient room for the brk.
*/
extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_64 / 3))
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
@@ -169,7 +169,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
-#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
+#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 0be67821f9ce..e8a3268a891c 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -47,8 +47,6 @@ static inline void ack_bad_irq(unsigned int irq)
irq_err_count++;
}
-extern void handle_IRQ(unsigned int, struct pt_regs *);
-
/*
* No arch-specific IRQ flags.
*/
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index d064047612b1..52b484b6aa1a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
*/
#define ARM_MAX_BRP 16
#define ARM_MAX_WRP 16
-#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
/* Virtual debug register bases. */
#define AARCH64_DBG_REG_BVR 0
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index dc1f73b13e74..56a9e63b6c33 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -64,12 +66,155 @@ enum aarch64_insn_imm_type {
AARCH64_INSN_IMM_14,
AARCH64_INSN_IMM_12,
AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_7,
+ AARCH64_INSN_IMM_6,
+ AARCH64_INSN_IMM_S,
+ AARCH64_INSN_IMM_R,
AARCH64_INSN_IMM_MAX
};
+enum aarch64_insn_register_type {
+ AARCH64_INSN_REGTYPE_RT,
+ AARCH64_INSN_REGTYPE_RN,
+ AARCH64_INSN_REGTYPE_RT2,
+ AARCH64_INSN_REGTYPE_RM,
+ AARCH64_INSN_REGTYPE_RD,
+ AARCH64_INSN_REGTYPE_RA,
+};
+
+enum aarch64_insn_register {
+ AARCH64_INSN_REG_0 = 0,
+ AARCH64_INSN_REG_1 = 1,
+ AARCH64_INSN_REG_2 = 2,
+ AARCH64_INSN_REG_3 = 3,
+ AARCH64_INSN_REG_4 = 4,
+ AARCH64_INSN_REG_5 = 5,
+ AARCH64_INSN_REG_6 = 6,
+ AARCH64_INSN_REG_7 = 7,
+ AARCH64_INSN_REG_8 = 8,
+ AARCH64_INSN_REG_9 = 9,
+ AARCH64_INSN_REG_10 = 10,
+ AARCH64_INSN_REG_11 = 11,
+ AARCH64_INSN_REG_12 = 12,
+ AARCH64_INSN_REG_13 = 13,
+ AARCH64_INSN_REG_14 = 14,
+ AARCH64_INSN_REG_15 = 15,
+ AARCH64_INSN_REG_16 = 16,
+ AARCH64_INSN_REG_17 = 17,
+ AARCH64_INSN_REG_18 = 18,
+ AARCH64_INSN_REG_19 = 19,
+ AARCH64_INSN_REG_20 = 20,
+ AARCH64_INSN_REG_21 = 21,
+ AARCH64_INSN_REG_22 = 22,
+ AARCH64_INSN_REG_23 = 23,
+ AARCH64_INSN_REG_24 = 24,
+ AARCH64_INSN_REG_25 = 25,
+ AARCH64_INSN_REG_26 = 26,
+ AARCH64_INSN_REG_27 = 27,
+ AARCH64_INSN_REG_28 = 28,
+ AARCH64_INSN_REG_29 = 29,
+ AARCH64_INSN_REG_FP = 29, /* Frame pointer */
+ AARCH64_INSN_REG_30 = 30,
+ AARCH64_INSN_REG_LR = 30, /* Link register */
+ AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
+ AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
+};
+
+enum aarch64_insn_variant {
+ AARCH64_INSN_VARIANT_32BIT,
+ AARCH64_INSN_VARIANT_64BIT
+};
+
+enum aarch64_insn_condition {
+ AARCH64_INSN_COND_EQ = 0x0, /* == */
+ AARCH64_INSN_COND_NE = 0x1, /* != */
+ AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */
+ AARCH64_INSN_COND_CC = 0x3, /* unsigned < */
+ AARCH64_INSN_COND_MI = 0x4, /* < 0 */
+ AARCH64_INSN_COND_PL = 0x5, /* >= 0 */
+ AARCH64_INSN_COND_VS = 0x6, /* overflow */
+ AARCH64_INSN_COND_VC = 0x7, /* no overflow */
+ AARCH64_INSN_COND_HI = 0x8, /* unsigned > */
+ AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */
+ AARCH64_INSN_COND_GE = 0xa, /* signed >= */
+ AARCH64_INSN_COND_LT = 0xb, /* signed < */
+ AARCH64_INSN_COND_GT = 0xc, /* signed > */
+ AARCH64_INSN_COND_LE = 0xd, /* signed <= */
+ AARCH64_INSN_COND_AL = 0xe, /* always */
+};
+
enum aarch64_insn_branch_type {
AARCH64_INSN_BRANCH_NOLINK,
AARCH64_INSN_BRANCH_LINK,
+ AARCH64_INSN_BRANCH_RETURN,
+ AARCH64_INSN_BRANCH_COMP_ZERO,
+ AARCH64_INSN_BRANCH_COMP_NONZERO,
+};
+
+enum aarch64_insn_size_type {
+ AARCH64_INSN_SIZE_8,
+ AARCH64_INSN_SIZE_16,
+ AARCH64_INSN_SIZE_32,
+ AARCH64_INSN_SIZE_64,
+};
+
+enum aarch64_insn_ldst_type {
+ AARCH64_INSN_LDST_LOAD_REG_OFFSET,
+ AARCH64_INSN_LDST_STORE_REG_OFFSET,
+ AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
+ AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
+ AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
+ AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
+};
+
+enum aarch64_insn_adsb_type {
+ AARCH64_INSN_ADSB_ADD,
+ AARCH64_INSN_ADSB_SUB,
+ AARCH64_INSN_ADSB_ADD_SETFLAGS,
+ AARCH64_INSN_ADSB_SUB_SETFLAGS
+};
+
+enum aarch64_insn_movewide_type {
+ AARCH64_INSN_MOVEWIDE_ZERO,
+ AARCH64_INSN_MOVEWIDE_KEEP,
+ AARCH64_INSN_MOVEWIDE_INVERSE
+};
+
+enum aarch64_insn_bitfield_type {
+ AARCH64_INSN_BITFIELD_MOVE,
+ AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
+ AARCH64_INSN_BITFIELD_MOVE_SIGNED
+};
+
+enum aarch64_insn_data1_type {
+ AARCH64_INSN_DATA1_REVERSE_16,
+ AARCH64_INSN_DATA1_REVERSE_32,
+ AARCH64_INSN_DATA1_REVERSE_64,
+};
+
+enum aarch64_insn_data2_type {
+ AARCH64_INSN_DATA2_UDIV,
+ AARCH64_INSN_DATA2_SDIV,
+ AARCH64_INSN_DATA2_LSLV,
+ AARCH64_INSN_DATA2_LSRV,
+ AARCH64_INSN_DATA2_ASRV,
+ AARCH64_INSN_DATA2_RORV,
+};
+
+enum aarch64_insn_data3_type {
+ AARCH64_INSN_DATA3_MADD,
+ AARCH64_INSN_DATA3_MSUB,
+};
+
+enum aarch64_insn_logic_type {
+ AARCH64_INSN_LOGIC_AND,
+ AARCH64_INSN_LOGIC_BIC,
+ AARCH64_INSN_LOGIC_ORR,
+ AARCH64_INSN_LOGIC_ORN,
+ AARCH64_INSN_LOGIC_EOR,
+ AARCH64_INSN_LOGIC_EON,
+ AARCH64_INSN_LOGIC_AND_SETFLAGS,
+ AARCH64_INSN_LOGIC_BIC_SETFLAGS
};
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
@@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
+__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
+__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
+__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
+__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000)
+__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000)
+__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000)
+__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000)
+__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000)
+__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000)
+__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000)
+__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000)
+__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000)
+__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000)
+__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000)
+__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000)
+__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000)
+__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000)
+__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000)
+__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000)
+__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000)
+__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800)
+__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00)
+__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000)
+__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400)
+__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800)
+__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00)
+__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400)
+__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800)
+__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
+__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
+__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
+__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
+__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
+__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
+__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
+__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000)
__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000)
__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
+__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
+__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS
@@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_condition cond);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
u32 aarch64_insn_gen_nop(void);
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register offset,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_register base,
+ int offset,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int imm, enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int immr, int imms,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_bitfield_type type);
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+ int imm, int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_movewide_type type);
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data1_type type);
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data2_type type);
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data3_type type);
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_logic_type type);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index e0ecdcf6632d..79f1d519221f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,7 +121,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
/*
* I/O port access primitives.
*/
-#define IO_SPACE_LIMIT 0xffff
+#define arch_has_dev_port() (1)
+#define IO_SPACE_LIMIT (SZ_32M - 1)
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
@@ -243,7 +244,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
* (PHYS_OFFSET and PHYS_MASK taken into account).
*/
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range(unsigned long addr, size_t size);
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
new file mode 100644
index 000000000000..b4f6b19a8a68
--- /dev/null
+++ b/arch/arm64/include/asm/irq_work.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return !!__smp_cross_call;
+}
+
+#else
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+
+#endif
+
+#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index 3c8aafc1082f..f69f69c8120c 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -29,7 +29,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+ asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3d6903006a8a..7fd3e27e3ccc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -76,9 +76,10 @@
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
- HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_SWIO | HCR_TIDCP | HCR_RW)
+ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+
/* Hyp System Control Register (SCTLR_EL2) bits */
#define SCTLR_EL2_EE (1 << 25)
@@ -121,6 +122,17 @@
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+/*
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
+ * not known to exist and will break with this configuration.
+ *
+ * Note that when using 4K pages, we concatenate two first level page tables
+ * together.
+ *
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
+ */
#ifdef CONFIG_ARM64_64K_PAGES
/*
* Stage2 translation configuration:
@@ -148,7 +160,7 @@
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9fcd54b1e16d..483842180f8f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -18,6 +18,8 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
+#include <asm/virt.h>
+
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
@@ -43,14 +45,25 @@
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
+#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
+#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
+#define DBGBCR15_EL1 38
+#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
+#define DBGBVR15_EL1 54
+#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
+#define DBGWCR15_EL1 70
+#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
+#define DBGWVR15_EL1 86
+#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 22 /* Domain Access Control Register */
-#define IFSR32_EL2 23 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
-#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 28
+#define DACR32_EL2 88 /* Domain Access Control Register */
+#define IFSR32_EL2 89 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
+#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 94
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -82,11 +95,23 @@
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-#define NR_CP15_REGS (NR_SYS_REGS * 2)
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
+#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
@@ -96,13 +121,21 @@ extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
+#define __kvm_hyp_code_start __hyp_text_start
+#define __kvm_hyp_code_end __hyp_text_end
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+
+extern char __save_vgic_v2_state[];
+extern char __restore_vgic_v2_state[];
+extern char __save_vgic_v3_state[];
+extern char __restore_vgic_v3_state[];
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
index 9a59301cd014..0b52377a6c11 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -39,7 +39,8 @@ void kvm_register_target_sys_reg_table(unsigned int target,
struct kvm_sys_reg_target_table *table);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc3f995..5674a55b5518 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+{
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
@@ -213,6 +218,17 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
default:
return be64_to_cpu(data);
}
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return le16_to_cpu(data & 0xffff);
+ case 4:
+ return le32_to_cpu(data & 0xffffffff);
+ default:
+ return le64_to_cpu(data);
+ }
}
return data; /* Leave LE untouched */
@@ -233,6 +249,17 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
default:
return cpu_to_be64(data);
}
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_le16(data & 0xffff);
+ case 4:
+ return cpu_to_le32(data & 0xffffffff);
+ default:
+ return cpu_to_le64(data);
+ }
}
return data; /* Leave LE untouched */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 92242ce06309..2012c4ba8d67 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
+#include <linux/types.h>
+#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -41,8 +43,7 @@
#define KVM_VCPU_MAX_FEATURES 3
-struct kvm_vcpu;
-int kvm_target_cpu(void);
+int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -86,7 +87,7 @@ struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
u64 sys_regs[NR_SYS_REGS];
- u32 cp15[NR_CP15_REGS];
+ u32 copro[NR_COPRO_REGS];
};
};
@@ -101,6 +102,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
+ /* Debug state */
+ u64 debug_flags;
+
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
@@ -138,7 +142,20 @@ struct kvm_vcpu_arch {
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
+/*
+ * CP14 and CP15 live in the same array, as they are backed by the
+ * same system registers.
+ */
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
+#else
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
+#endif
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -148,25 +165,23 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-struct kvm_one_reg;
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-struct kvm;
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
return 0;
}
@@ -176,8 +191,13 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
+static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+ unsigned long address)
+{
+}
+
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
@@ -200,4 +220,38 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
+struct vgic_sr_vectors {
+ void *save_vgic;
+ void *restore_vgic;
+};
+
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+ extern struct vgic_sr_vectors __vgic_sr_vectors;
+
+ switch(vgic->type)
+ {
+ case VGIC_V2:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
+ break;
+
+#ifdef CONFIG_ARM_GIC_V3
+ case VGIC_V3:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
+ break;
+#endif
+
+ default:
+ BUG();
+ }
+}
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7d29847a893b..0caf7a59f6a1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -41,6 +41,18 @@
*/
#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
+ * levels in addition to the PGD and potentially the PUD which are
+ * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
+ * tables use one level of tables less than the kernel.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define KVM_MMU_CACHE_MIN_PAGES 1
+#else
+#define KVM_MMU_CACHE_MIN_PAGES 2
+#endif
+
#ifdef __ASSEMBLY__
/*
@@ -53,23 +65,19 @@
#else
+#include <asm/pgalloc.h>
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
/*
- * Align KVM with the kernel's view of physical memory. Should be
- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
+ * We currently only support a 40bit IPA.
*/
-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
+#define KVM_PHYS_SHIFT (40)
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
-/* Make sure we get the right size, and thus the right alignment */
-#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
-
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
@@ -78,7 +86,7 @@ void free_hyp_pgds(void);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size);
+ phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -93,20 +101,8 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
-static inline bool kvm_is_write_fault(unsigned long esr)
-{
- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
-
- if (esr_ec == ESR_EL2_EC_IABT)
- return false;
-
- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
- return false;
-
- return true;
-}
-
static inline void kvm_clean_pgd(pgd_t *pgd) {}
+static inline void kvm_clean_pmd(pmd_t *pmd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
static inline void kvm_clean_pte_entry(pte_t *pte) {}
@@ -125,6 +121,118 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
+/*
+ * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
+ * the entire IPA input range with a single pgd entry, and we would only need
+ * one pgd entry. Note that in this case, the pgd is actually not used by
+ * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
+ * structure for the kernel pgtable macros to work.
+ */
+#if PGDIR_SHIFT > KVM_PHYS_SHIFT
+#define PTRS_PER_S2_PGD_SHIFT 0
+#else
+#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
+#endif
+#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
+/*
+ * If we are concatenating first level stage-2 page tables, we would have less
+ * than or equal to 16 pointers in the fake PGD, because that's what the
+ * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
+ * represents the first level for the host, and we add 1 to go to the next
+ * level (which uses contatenation) for the stage-2 tables.
+ */
+#if PTRS_PER_S2_PGD <= 16
+#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
+#else
+#define KVM_PREALLOC_LEVEL (0)
+#endif
+
+/**
+ * kvm_prealloc_hwpgd - allocate inital table for VTTBR
+ * @kvm: The KVM struct pointer for the VM.
+ * @pgd: The kernel pseudo pgd
+ *
+ * When the kernel uses more levels of page tables than the guest, we allocate
+ * a fake PGD and pre-populate it to point to the next-level page table, which
+ * will be the real initial page table pointed to by the VTTBR.
+ *
+ * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
+ * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
+ * allocate 2 consecutive PUD pages.
+ */
+static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
+{
+ unsigned int i;
+ unsigned long hwpgd;
+
+ if (KVM_PREALLOC_LEVEL == 0)
+ return 0;
+
+ hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
+ if (!hwpgd)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+ if (KVM_PREALLOC_LEVEL == 1)
+ pgd_populate(NULL, pgd + i,
+ (pud_t *)hwpgd + i * PTRS_PER_PUD);
+ else if (KVM_PREALLOC_LEVEL == 2)
+ pud_populate(NULL, pud_offset(pgd, 0) + i,
+ (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+ }
+
+ return 0;
+}
+
+static inline void *kvm_get_hwpgd(struct kvm *kvm)
+{
+ pgd_t *pgd = kvm->arch.pgd;
+ pud_t *pud;
+
+ if (KVM_PREALLOC_LEVEL == 0)
+ return pgd;
+
+ pud = pud_offset(pgd, 0);
+ if (KVM_PREALLOC_LEVEL == 1)
+ return pud;
+
+ BUG_ON(KVM_PREALLOC_LEVEL != 2);
+ return pmd_offset(pud, 0);
+}
+
+static inline void kvm_free_hwpgd(struct kvm *kvm)
+{
+ if (KVM_PREALLOC_LEVEL > 0) {
+ unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
+ free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
+ }
+}
+
+static inline bool kvm_page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
+#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define kvm_pmd_table_empty(kvm, pmdp) (0)
+#else
+#define kvm_pmd_table_empty(kvm, pmdp) \
+ (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
+#endif
+
+#ifdef __PAGETABLE_PUD_FOLDED
+#define kvm_pud_table_empty(kvm, pudp) (0)
+#else
+#define kvm_pud_table_empty(kvm, pudp) \
+ (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
+#endif
+
+
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ccc7087d3c4e..a62cd077457b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 7a3f462133b0..22b16232bd60 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -28,9 +28,6 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
-/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
-#define __HAVE_ARCH_GATE_AREA 1
-
/*
* The idmap and swapper page tables need some space reserved in the kernel
* image. Both require pgd, pud (4 levels only) and pmd tables to (section)
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
new file mode 100644
index 000000000000..872ba939fcb2
--- /dev/null
+++ b/arch/arm64/include/asm/pci.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_PCI_H
+#define __ASM_PCI_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm-generic/pci-bridge.h>
+#include <asm-generic/pci-dma-compat.h>
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0
+
+/*
+ * Set to 1 if the kernel should re-assign all PCI bus numbers
+ */
+#define pcibios_assign_all_busses() \
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+
+/*
+ * PCI address space differs from physical memory address space
+ */
+#define PCI_DMA_BUS_IS_PHYS (0)
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 453a179469a3..5279e5733386 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- register unsigned long *sp asm ("sp");
/*
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+ asm("mrs %0, tpidr_el1" : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ffe1ba0506d1..41a43bf26492 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
+#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
@@ -149,46 +149,51 @@ extern struct page *empty_zero_page;
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
- pte_val(pte) &= ~PTE_WRITE;
+ pte_val(pte) &= ~pgprot_val(prot);
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
{
- pte_val(pte) |= PTE_WRITE;
+ pte_val(pte) |= pgprot_val(prot);
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
- pte_val(pte) &= ~PTE_DIRTY;
- return pte;
+ return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= PTE_DIRTY;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
- pte_val(pte) &= ~PTE_AF;
- return pte;
+ return clear_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- pte_val(pte) |= PTE_AF;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
- pte_val(pte) |= PTE_SPECIAL;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
}
static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -239,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+static inline pmd_t pud_pmd(pud_t pud)
+{
+ return __pmd(pud_val(pud));
+}
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
@@ -256,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
-#endif
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+struct vm_area_struct;
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
@@ -277,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte)
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
@@ -296,6 +318,8 @@ static inline int has_transparent_hugepage(void)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -376,6 +400,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
+#define pud_page(pud) pmd_page(pud_pmd(pud))
+
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 0c657bb54597..9a8fd84f8fb2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -32,6 +32,8 @@ extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+ unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 3df21feeabdd..286b1bec547c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp)
+#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
/*
* Prefetching support
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 501000fadb6f..41ed9e13795e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -137,7 +137,7 @@ struct pt_regs {
(!((regs)->pstate & PSR_F_BIT))
#define user_stack_pointer(regs) \
- (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
+ (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index 7c275e3b640f..eeaa97559bab 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -24,22 +24,21 @@
extern const compat_ulong_t aarch32_sigret_code[6];
-int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
+int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
-int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs);
+int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs);
void compat_setup_restart_syscall(struct pt_regs *regs);
#else
-static inline int compat_setup_frame(int usid, struct k_sigaction *ka,
+static inline int compat_setup_frame(int usid, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs)
{
return -ENOSYS;
}
-static inline int compat_setup_rt_frame(int usig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *set,
+static inline int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
return -ENOSYS;
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index a498f2cd2c2a..780f82c827b6 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -48,6 +48,8 @@ extern void smp_init_cpus(void);
*/
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+
/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
*/
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 1be62bcb9d47..74a9d301819f 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -17,7 +17,7 @@
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS 40
+#define MAX_PHYSMEM_BITS 48
#define SECTION_SIZE_BITS 30
#endif
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index e9c149c042e0..456d67c1f0fa 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,7 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
+extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 45108d802f5e..459bf8e53208 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -69,14 +69,19 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 62731ef9749a..a82c0c5c8b52 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -23,6 +23,20 @@
#include <asm-generic/tlb.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
+static inline void __tlb_remove_table(void *_table)
+{
+ free_page_and_swap_cache((struct page *)_table);
+}
+#else
+#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
/*
* There's three ways the TLB shootdown code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
@@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, pte);
+ tlb_remove_entry(tlb, pte);
}
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
@@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pmdp));
+ tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pudp));
+ tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 4bc95d27e063..6d2bf419431d 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -41,7 +41,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 383
+#define __NR_compat_syscalls 386
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index e242600c4046..9dfdac4a74a1 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -787,3 +787,10 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
#define __NR_renameat2 382
__SYSCALL(__NR_renameat2, sys_renameat2)
+ /* 383 for seccomp */
+#define __NR_getrandom 384
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 385
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 386
+__SYSCALL(__NR_bpf, sys_bpf)
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 215ad4649dd7..7a5df5252dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -50,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
+
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index e633ff8cdec8..8e38878c87c6 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -37,6 +37,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -159,6 +160,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index df7ef8768fc2..5bd029b43644 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -26,9 +26,11 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
+arm64-obj-$(CONFIG_PCI) += pci.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 646f888387cd..9a9fce090d58 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -120,6 +120,7 @@ int main(void)
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+ DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
@@ -129,13 +130,24 @@ int main(void)
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
+ DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
+ DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
+ DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
+ DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
+ DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
+ DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
+ DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
+ DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
+ DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
new file mode 100644
index 000000000000..19d17f51db37
--- /dev/null
+++ b/arch/arm64/kernel/cpuidle.c
@@ -0,0 +1,31 @@
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cpu_ops.h>
+
+int cpu_init_idle(unsigned int cpu)
+{
+ int ret = -EOPNOTSUPP;
+ struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+
+ if (!cpu_node)
+ return -ENODEV;
+
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
+ ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+ return ret;
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index f798f66634af..504fdaa8367e 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -20,8 +20,10 @@
#include <asm/cputype.h>
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/preempt.h>
#include <linux/printk.h>
#include <linux/smp.h>
@@ -47,9 +49,19 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
unsigned int cpu = smp_processor_id();
u32 l1ip = CTR_L1IP(info->reg_ctr);
- if (l1ip != ICACHE_POLICY_PIPT)
- set_bit(ICACHEF_ALIASING, &__icache_flags);
- if (l1ip == ICACHE_POLICY_AIVIVT);
+ if (l1ip != ICACHE_POLICY_PIPT) {
+ /*
+ * VIPT caches are non-aliasing if the VA always equals the PA
+ * in all bit positions that are covered by the index. This is
+ * the case if the size of a way (# of sets * line size) does
+ * not exceed PAGE_SIZE.
+ */
+ u32 waysize = icache_get_numsets() * icache_get_linesize();
+
+ if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
+ set_bit(ICACHEF_ALIASING, &__icache_flags);
+ }
+ if (l1ip == ICACHE_POLICY_AIVIVT)
set_bit(ICACHEF_AIVIVT, &__icache_flags);
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
@@ -190,3 +202,15 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
+
+u64 __attribute_const__ icache_get_ccsidr(void)
+{
+ u64 ccsidr;
+
+ WARN_ON(preemptible());
+
+ /* Select L1 I-cache and read its size ID register */
+ asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
+ : "=r"(ccsidr) : "r"(1L));
+ return ccsidr;
+}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index fe5b94078d82..b056369fd47d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,15 +30,6 @@
#include <asm/cputype.h>
#include <asm/system_misc.h>
-/* Low-level stepping controls. */
-#define DBG_MDSCR_SS (1 << 0)
-#define DBG_SPSR_SS (1 << 21)
-
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE (1 << 13)
-#define DBG_MDSCR_MDE (1 << 15)
-#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
/* Determine debug architecture. */
u8 debug_monitors_arch(void)
{
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 619b1dd7bcde..d18a44940968 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
b.eq efi_load_fail
/*
- * efi_entry() will have relocated the kernel image if necessary
- * and we return here with device tree address in x0 and the kernel
- * entry point stored at *image_addr. Save those values in registers
- * which are callee preserved.
+ * efi_entry() will have copied the kernel image if necessary and we
+ * return here with device tree address in x0 and the kernel entry
+ * point stored at *image_addr. Save those values in registers which
+ * are callee preserved.
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
mov x21, x0
/*
- * Flush dcache covering current runtime addresses
- * of kernel text/data. Then flush all of icache.
+ * Calculate size of the kernel Image (same for original and copy).
*/
adrp x1, _text
add x1, x1, #:lo12:_text
@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
add x2, x2, #:lo12:_edata
sub x1, x2, x1
+ /*
+ * Flush the copied Image to the PoC, and ensure it is not shadowed by
+ * stale icache entries from before relocation.
+ */
bl __flush_dcache_area
ic ialluis
+ /*
+ * Ensure that the rest of this function (in the original Image) is
+ * visible when the caches are disabled. The I-cache can't have stale
+ * entries for the VA range of the current image, so no maintenance is
+ * necessary.
+ */
+ adr x0, efi_stub_entry
+ adr x1, efi_stub_entry_end
+ sub x1, x1, x0
+ bl __flush_dcache_area
+
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@@ -105,4 +119,5 @@ efi_load_fail:
ldp x29, x30, [sp], #32
ret
+efi_stub_entry_end:
ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 1317fef8dde9..d27dd982ff26 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
- status = efi_relocate_kernel(sys_table, image_addr,
- kernel_size, kernel_memsize,
- dram_base + TEXT_OFFSET,
- PAGE_SIZE);
+ status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
+ SZ_2M, reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
}
- if (*image_addr != (dram_base + TEXT_OFFSET)) {
- pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
- efi_free(sys_table, kernel_memsize, *image_addr);
- return EFI_LOAD_ERROR;
- }
- *image_size = kernel_memsize;
+ memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
+ kernel_size);
+ *image_addr = *reserve_addr + TEXT_OFFSET;
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e72f3100958f..95c49ebc660d 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -89,7 +89,8 @@ static int __init uefi_init(void)
*/
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}
if ((efi.systab->hdr.revision >> 16) < 2)
pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
@@ -103,6 +104,7 @@ static int __init uefi_init(void)
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
vendor[i] = '\0';
+ early_memunmap(c16, sizeof(vendor));
}
pr_info("EFI v%u.%.02u by %s\n",
@@ -113,29 +115,11 @@ static int __init uefi_init(void)
if (retval == 0)
set_bit(EFI_CONFIG_TABLES, &efi.flags);
- early_memunmap(c16, sizeof(vendor));
+out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
-
return retval;
}
-static __initdata char memory_type_name[][32] = {
- {"Reserved"},
- {"Loader Code"},
- {"Loader Data"},
- {"Boot Code"},
- {"Boot Data"},
- {"Runtime Code"},
- {"Runtime Data"},
- {"Conventional Memory"},
- {"Unusable Memory"},
- {"ACPI Reclaim Memory"},
- {"ACPI Memory NVS"},
- {"Memory Mapped I/O"},
- {"MMIO Port Space"},
- {"PAL Code"},
-};
-
/*
* Return true for RAM regions we want to permanently reserve.
*/
@@ -166,10 +150,13 @@ static __init void reserve_regions(void)
paddr = md->phys_addr;
npages = md->num_pages;
- if (uefi_debug)
- pr_info(" 0x%012llx-0x%012llx [%s]",
+ if (uefi_debug) {
+ char buf[64];
+
+ pr_info(" 0x%012llx-0x%012llx %s",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- memory_type_name[md->type]);
+ efi_md_typeattr_format(buf, sizeof(buf), md));
+ }
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
@@ -188,6 +175,8 @@ static __init void reserve_regions(void)
if (uefi_debug)
pr_cont("\n");
}
+
+ set_bit(EFI_MEMMAP, &efi.flags);
}
@@ -391,11 +380,16 @@ static int __init arm64_enter_virtual_mode(void)
return -1;
}
- pr_info("Remapping and enabling EFI services.\n");
-
- /* replace early memmap mapping with permanent mapping */
mapsize = memmap.map_end - memmap.map;
early_memunmap(memmap.map, mapsize);
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return -1;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+ /* replace early memmap mapping with permanent mapping */
memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
mapsize);
memmap.map_end = memmap.map + mapsize;
@@ -463,6 +457,8 @@ static int __init arm64_enter_virtual_mode(void)
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ efi.runtime_version = efi.systab->hdr.revision;
+
return 0;
err_unmap:
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f0b5e5120a87..726b910fe6ec 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -324,7 +324,6 @@ el1_dbg:
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
- enable_dbg
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ad8aebb1cdef..3dca15634e69 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
case CPU_PM_ENTER:
if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
+ this_cpu_write(fpsimd_last_state, NULL);
break;
case CPU_PM_EXIT:
if (current->mm)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 7924d73b6476..cf8556ae09d0 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
u32 new;
pc = (unsigned long)&ftrace_call;
- new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
+ AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, 0, new, false);
}
@@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
u32 old, new;
old = aarch64_insn_gen_nop();
- new = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
}
@@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old, new;
- old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
@@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable)
u32 branch, nop;
branch = aarch64_insn_gen_branch_imm(pc,
- (unsigned long)ftrace_graph_caller, false);
+ (unsigned long)ftrace_graph_caller,
+ AARCH64_INSN_BRANCH_LINK);
nop = aarch64_insn_gen_nop();
if (enable)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 144f10567f82..0a6e4f924df8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -38,11 +38,11 @@
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
-#if (TEXT_OFFSET & 0xf) != 0
-#error TEXT_OFFSET must be at least 16B aligned
-#elif (PAGE_OFFSET & 0xfffff) != 0
+#if (TEXT_OFFSET & 0xfff) != 0
+#error TEXT_OFFSET must be at least 4KB aligned
+#elif (PAGE_OFFSET & 0x1fffff) != 0
#error PAGE_OFFSET must be at least 2MB aligned
-#elif TEXT_OFFSET > 0xfffff
+#elif TEXT_OFFSET > 0x1fffff
#error TEXT_OFFSET must be less than 2MB
#endif
@@ -151,7 +151,7 @@ optional_header:
.short 0x20b // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
- .long _edata - stext // SizeOfCode
+ .long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long efi_stub_entry - efi_head // AddressOfEntryPoint
@@ -169,7 +169,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _edata - efi_head // SizeOfImage
+ .long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long stext - efi_head // SizeOfHeaders
@@ -216,7 +216,7 @@ section_table:
.byte 0
.byte 0
.byte 0 // end of 0 padding of section name
- .long _edata - stext // VirtualSize
+ .long _end - stext // VirtualSize
.long stext - efi_head // VirtualAddress
.long _edata - stext // SizeOfRawData
.long stext - efi_head // PointerToRawData
@@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode)
.long 0
.popsection
- .align 3
-2: .quad .
- .quad PAGE_OFFSET
-
#ifdef CONFIG_SMP
.align 3
1: .quad .
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 92f36835486b..8cd27fedc8b6 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -20,9 +22,14 @@
#include <linux/smp.h>
#include <linux/stop_machine.h>
#include <linux/uaccess.h>
+
#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
#include <asm/insn.h>
+#define AARCH64_INSN_SF_BIT BIT(31)
+#define AARCH64_INSN_N_BIT BIT(22)
+
static int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
@@ -156,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
* which ends with "dsb; isb" pair guaranteeing global
* visibility.
*/
- atomic_set(&pp->cpu_count, -1);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
} else {
- while (atomic_read(&pp->cpu_count) != -1)
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
cpu_relax();
isb();
}
@@ -251,6 +259,19 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
mask = BIT(9) - 1;
shift = 12;
break;
+ case AARCH64_INSN_IMM_7:
+ mask = BIT(7) - 1;
+ shift = 15;
+ break;
+ case AARCH64_INSN_IMM_6:
+ case AARCH64_INSN_IMM_S:
+ mask = BIT(6) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_R:
+ mask = BIT(6) - 1;
+ shift = 16;
+ break;
default:
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type);
@@ -264,10 +285,76 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
return insn;
}
-u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
- enum aarch64_insn_branch_type type)
+static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
+ u32 insn,
+ enum aarch64_insn_register reg)
+{
+ int shift;
+
+ if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
+ pr_err("%s: unknown register encoding %d\n", __func__, reg);
+ return 0;
+ }
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return 0;
+ }
+
+ insn &= ~(GENMASK(4, 0) << shift);
+ insn |= reg << shift;
+
+ return insn;
+}
+
+static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
+ u32 insn)
+{
+ u32 size;
+
+ switch (type) {
+ case AARCH64_INSN_SIZE_8:
+ size = 0;
+ break;
+ case AARCH64_INSN_SIZE_16:
+ size = 1;
+ break;
+ case AARCH64_INSN_SIZE_32:
+ size = 2;
+ break;
+ case AARCH64_INSN_SIZE_64:
+ size = 3;
+ break;
+ default:
+ pr_err("%s: unknown size encoding %d\n", __func__, type);
+ return 0;
+ }
+
+ insn &= ~GENMASK(31, 30);
+ insn |= size << 30;
+
+ return insn;
+}
+
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+ long range)
{
- u32 insn;
long offset;
/*
@@ -276,23 +363,97 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
*/
BUG_ON((pc & 0x3) || (addr & 0x3));
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -range || offset >= range);
+
+ return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
/*
* B/BL support [-128M, 128M) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within +/-128M.
*/
- offset = ((long)addr - (long)pc);
- BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+ offset = branch_imm_common(pc, addr, SZ_128M);
- if (type == AARCH64_INSN_BRANCH_LINK)
+ switch (type) {
+ case AARCH64_INSN_BRANCH_LINK:
insn = aarch64_insn_get_bl_value();
- else
+ break;
+ case AARCH64_INSN_BRANCH_NOLINK:
insn = aarch64_insn_get_b_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
offset >> 2);
}
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_COMP_ZERO:
+ insn = aarch64_insn_get_cbz_value();
+ break;
+ case AARCH64_INSN_BRANCH_COMP_NONZERO:
+ insn = aarch64_insn_get_cbnz_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_condition cond)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+
+ insn = aarch64_insn_get_bcond_value();
+
+ BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
+ insn |= cond;
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
{
return aarch64_insn_get_hint_value() | op;
@@ -302,3 +463,500 @@ u32 __kprobes aarch64_insn_gen_nop(void)
{
return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
}
+
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_NOLINK:
+ insn = aarch64_insn_get_br_value();
+ break;
+ case AARCH64_INSN_BRANCH_LINK:
+ insn = aarch64_insn_get_blr_value();
+ break;
+ case AARCH64_INSN_BRANCH_RETURN:
+ insn = aarch64_insn_get_ret_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
+}
+
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register offset,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
+ insn = aarch64_insn_get_ldr_reg_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_REG_OFFSET:
+ insn = aarch64_insn_get_str_reg_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ offset);
+}
+
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_register base,
+ int offset,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_ldp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_stp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_ldp_post_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_stp_post_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ /* offset must be multiples of 4 in the range [-256, 252] */
+ BUG_ON(offset & 0x3);
+ BUG_ON(offset < -256 || offset > 252);
+ shift = 2;
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ /* offset must be multiples of 8 in the range [-512, 504] */
+ BUG_ON(offset & 0x7);
+ BUG_ON(offset < -512 || offset > 504);
+ shift = 3;
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg1);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+ reg2);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
+ offset >> shift);
+}
+
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int imm, enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_imm_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(imm & ~(SZ_4K - 1));
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+}
+
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int immr, int imms,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_bitfield_type type)
+{
+ u32 insn;
+ u32 mask;
+
+ switch (type) {
+ case AARCH64_INSN_BITFIELD_MOVE:
+ insn = aarch64_insn_get_bfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
+ insn = aarch64_insn_get_ubfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
+ insn = aarch64_insn_get_sbfm_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ mask = GENMASK(4, 0);
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
+ mask = GENMASK(5, 0);
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(immr & ~mask);
+ BUG_ON(imms & ~mask);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+ int imm, int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_movewide_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_MOVEWIDE_ZERO:
+ insn = aarch64_insn_get_movz_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_KEEP:
+ insn = aarch64_insn_get_movk_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_INVERSE:
+ insn = aarch64_insn_get_movn_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(imm & ~(SZ_64K - 1));
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift != 0 && shift != 16);
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
+ shift != 48);
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn |= (shift >> 4) << 21;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+}
+
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift & ~(SZ_32 - 1));
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift & ~(SZ_64 - 1));
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
+
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data1_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA1_REVERSE_16:
+ insn = aarch64_insn_get_rev16_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_32:
+ insn = aarch64_insn_get_rev32_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_64:
+ BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
+ insn = aarch64_insn_get_rev64_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+}
+
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data2_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA2_UDIV:
+ insn = aarch64_insn_get_udiv_value();
+ break;
+ case AARCH64_INSN_DATA2_SDIV:
+ insn = aarch64_insn_get_sdiv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSLV:
+ insn = aarch64_insn_get_lslv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSRV:
+ insn = aarch64_insn_get_lsrv_value();
+ break;
+ case AARCH64_INSN_DATA2_ASRV:
+ insn = aarch64_insn_get_asrv_value();
+ break;
+ case AARCH64_INSN_DATA2_RORV:
+ insn = aarch64_insn_get_rorv_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+}
+
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data3_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA3_MADD:
+ insn = aarch64_insn_get_madd_value();
+ break;
+ case AARCH64_INSN_DATA3_MSUB:
+ insn = aarch64_insn_get_msub_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ reg1);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ reg2);
+}
+
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_logic_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LOGIC_AND:
+ insn = aarch64_insn_get_and_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC:
+ insn = aarch64_insn_get_bic_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORR:
+ insn = aarch64_insn_get_orr_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORN:
+ insn = aarch64_insn_get_orn_value();
+ break;
+ case AARCH64_INSN_LOGIC_EOR:
+ insn = aarch64_insn_get_eor_value();
+ break;
+ case AARCH64_INSN_LOGIC_EON:
+ insn = aarch64_insn_get_eon_value();
+ break;
+ case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+ insn = aarch64_insn_get_ands_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
+ insn = aarch64_insn_get_bics_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift & ~(SZ_32 - 1));
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift & ~(SZ_64 - 1));
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0f08dfd69ebc..071a6ec13bd8 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -40,33 +40,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-/*
- * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
- * not come via this function. Instead, they should provide their
- * own 'handler'. Used by platform code implementing C-based 1st
- * level decoding.
- */
-void handle_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (unlikely(irq >= nr_irqs)) {
- pr_warn_ratelimited("Bad IRQ%u\n", irq);
- ack_bad_irq(irq);
- } else {
- generic_handle_irq(irq);
- }
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-
void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
if (handle_arch_irq)
@@ -97,19 +70,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
ret = true;
+ }
- /*
- * when using forced irq_set_affinity we must ensure that the cpu
- * being offlined is not present in the affinity mask, it may be
- * selected as the target CPU otherwise
- */
- affinity = cpu_online_mask;
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 75c9cf1aafee..a0d10c55f307 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
new file mode 100644
index 000000000000..ce5836c14ec1
--- /dev/null
+++ b/arch/arm64/kernel/pci.c
@@ -0,0 +1,70 @@
+/*
+ * Code borrowed from powerpc/kernel/pci-common.c
+ *
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include <asm/pci-bridge.h>
+
+/*
+ * Called after each bus is probed, but before its children are examined
+ */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* nothing to do, expected to be removed in the future */
+}
+
+/*
+ * We don't have to worry about legacy ISA devices, so nothing to do here
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+
+/*
+ * Try to assign the IRQ number from DT when adding a new device
+ */
+int pcibios_add_device(struct pci_dev *dev)
+{
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static bool dt_domain_found = false;
+
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ int domain = of_get_pci_domain_nr(parent->of_node);
+
+ if (domain >= 0) {
+ dt_domain_found = true;
+ } else if (dt_domain_found == true) {
+ dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
+ parent->of_node->full_name);
+ return;
+ } else {
+ domain = pci_get_new_domain_nr();
+ }
+
+ bus->domain_nr = domain;
+}
+#endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index baf5afb7e6a0..aa29ecb4f800 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
-static struct of_device_id armpmu_of_device_ids[] = {
+static const struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3"},
{},
};
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 422ebd63b619..6762ad705587 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return regs->compat_lr;
}
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->sp;
+
+ if ((u32)idx == PERF_REG_ARM64_PC)
+ return regs->pc;
+
return regs->regs[idx];
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1309d64aa926..fde9923af859 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
-static void setup_restart(void)
-{
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot();
-
- /* Clean and invalidate caches */
- flush_cache_all();
-
- /* Turn D-cache off */
- cpu_cache_off();
-
- /* Push out any further dirty data, and ensure cache is empty */
- flush_cache_all();
-}
-
void soft_restart(unsigned long addr)
{
- typedef void (*phys_reset_t)(unsigned long);
- phys_reset_t phys_reset;
-
- setup_restart();
-
- /* Switch to the identity mapping */
- phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
- phys_reset(addr);
-
+ setup_mm_for_reboot();
+ cpu_soft_restart(virt_to_phys(cpu_reset), addr);
/* Should never get here */
BUG();
}
@@ -98,7 +72,6 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-EXPORT_SYMBOL_GPL(arm_pm_restart);
/*
* This is our default idle handler.
@@ -180,6 +153,8 @@ void machine_restart(char *cmd)
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
+ else
+ do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
@@ -230,9 +205,27 @@ void exit_thread(void)
{
}
+static void tls_thread_flush(void)
+{
+ asm ("msr tpidr_el0, xzr");
+
+ if (is_compat_task()) {
+ current->thread.tp_value = 0;
+
+ /*
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ */
+ barrier();
+ asm ("msr tpidrro_el0, xzr");
+ }
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
+ tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
}
@@ -385,8 +378,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_base(mm->brk);
}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
- return randomize_base(base);
-}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 553954771a67..663da771580a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
@@ -28,6 +29,7 @@
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
@@ -65,6 +67,8 @@ enum psci_function {
PSCI_FN_MAX,
};
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
+
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
@@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
}
+static void psci_power_state_unpack(u32 power_state,
+ struct psci_power_state *state)
+{
+ state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
+ PSCI_0_2_POWER_STATE_ID_SHIFT;
+ state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
+ PSCI_0_2_POWER_STATE_TYPE_SHIFT;
+ state->affinity_level =
+ (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
+ PSCI_0_2_POWER_STATE_AFFL_SHIFT;
+}
+
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
@@ -199,6 +215,63 @@ static int psci_migrate_info_type(void)
return err;
}
+static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
+ unsigned int cpu)
+{
+ int i, ret, count = 0;
+ struct psci_power_state *psci_states;
+ struct device_node *state_node;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 psci_power_state;
+
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+ ret = of_property_read_u32(state_node,
+ "arm,psci-suspend-param",
+ &psci_power_state);
+ if (ret) {
+ pr_warn(" * %s missing arm,psci-suspend-param property\n",
+ state_node->full_name);
+ of_node_put(state_node);
+ goto free_mem;
+ }
+
+ of_node_put(state_node);
+ pr_debug("psci-power-state %#x index %d\n", psci_power_state,
+ i);
+ psci_power_state_unpack(psci_power_state, &psci_states[i]);
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
#endif
#endif
+static int psci_suspend_finisher(unsigned long index)
+{
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+
+ return psci_ops.cpu_suspend(state[index - 1],
+ virt_to_phys(cpu_resume));
+}
+
+static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
+{
+ int ret;
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+ /*
+ * idle state index 0 corresponds to wfi, should never be called
+ * from the cpu_suspend operations
+ */
+ if (WARN_ON_ONCE(!index))
+ return -EINVAL;
+
+ if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
+ ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ else
+ ret = __cpu_suspend(index, psci_suspend_finisher);
+
+ return ret;
+}
+
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
+#ifdef CONFIG_CPU_IDLE
+ .cpu_init_idle = cpu_psci_cpu_init_idle,
+ .cpu_suspend = cpu_psci_cpu_suspend,
+#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 0310811bd77d..8a4ae8e73213 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
break;
}
}
- for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
+
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
if (current->thread.debug.hbp_watch[i] == bp) {
info.si_errno = -((i << 1) + 1);
break;
@@ -662,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target,
kbuf += sizeof(reg);
} else {
ret = copy_to_user(ubuf, &reg, sizeof(reg));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
break;
+ }
ubuf += sizeof(reg);
}
@@ -701,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target,
kbuf += sizeof(reg);
} else {
ret = copy_from_user(&reg, ubuf, sizeof(reg));
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
ubuf += sizeof(reg);
}
@@ -1115,19 +1120,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
-#ifdef CONFIG_AUDITSYSCALL
- audit_syscall_entry(syscall_get_arch(), regs->syscallno,
- regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
-#endif
+ audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
+ regs->regs[2], regs->regs[3]);
return regs->syscallno;
}
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
-#ifdef CONFIG_AUDITSYSCALL
audit_syscall_exit(regs);
-#endif
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 89102a6ffad5..6c4fd2810ecb 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -36,13 +36,12 @@ void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
- register unsigned long current_sp asm ("sp");
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
walk_stackframe(&frame, save_return_addr, &data);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f6f0ccf35ae6..2437196cc5d4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif
static const char *cpu_name;
+static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
/*
@@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
while (true)
cpu_relax();
}
+
+ machine_name = of_flat_dt_get_machine_name();
}
/*
@@ -362,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
- /*
- * Unmask asynchronous aborts early to catch possible system errors.
- */
- local_async_enable();
-
setup_processor();
setup_machine_fdt(__fdt_pointer);
@@ -382,6 +380,12 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ /*
+ * Unmask asynchronous aborts after bringing up possible earlycon.
+ * (Report possible System Errors once we can report this occurred)
+ */
+ local_async_enable();
+
efi_init();
arm64_memblock_init();
@@ -447,21 +451,10 @@ static int c_show(struct seq_file *m, void *v)
{
int i;
- /*
- * Dump out the common processor features in a single line. Userspace
- * should read the hwcaps with getauxval(AT_HWCAP) rather than
- * attempting to parse this.
- */
- seq_puts(m, "features\t:");
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, " %s", hwcap_str[i]);
- seq_puts(m, "\n\n");
+ seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+ cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
for_each_online_cpu(i) {
- struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
- u32 midr = cpuinfo->reg_midr;
-
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
@@ -470,13 +463,25 @@ static int c_show(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "implementer\t: 0x%02x\n",
- MIDR_IMPLEMENTOR(midr));
- seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
- seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
- seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
}
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: ");
+
+ for (i = 0; hwcap_str[i]; i++)
+ if (elf_hwcap & (1 << i))
+ seq_printf(m, "%s ", hwcap_str[i]);
+
+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+ seq_printf(m, "CPU architecture: AArch64\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
+ seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
+ seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+
return 0;
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6357b9c6c90e..6fa792137eda 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -209,19 +209,13 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
return err;
}
-static struct rt_sigframe __user *get_sigframe(struct k_sigaction *ka,
+static struct rt_sigframe __user *get_sigframe(struct ksignal *ksig,
struct pt_regs *regs)
{
unsigned long sp, sp_top;
struct rt_sigframe __user *frame;
- sp = sp_top = regs->sp;
-
- /*
- * This is the X/Open sanctioned signal stack switching.
- */
- if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
- sp = sp_top = current->sas_ss_sp + current->sas_ss_size;
+ sp = sp_top = sigsp(regs->sp, ksig);
sp = (sp - sizeof(struct rt_sigframe)) & ~15;
frame = (struct rt_sigframe __user *)sp;
@@ -253,13 +247,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->regs[30] = (unsigned long)sigtramp;
}
-static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
+static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0;
- frame = get_sigframe(ka, regs);
+ frame = get_sigframe(ksig, regs);
if (!frame)
return 1;
@@ -269,9 +263,9 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigframe(frame, regs, set);
if (err == 0) {
- setup_return(regs, ka, frame, usig);
- if (ka->sa.sa_flags & SA_SIGINFO) {
- err |= copy_siginfo_to_user(&frame->info, info);
+ setup_return(regs, &ksig->ka, frame, usig);
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
regs->regs[1] = (unsigned long)&frame->info;
regs->regs[2] = (unsigned long)&frame->uc;
}
@@ -291,13 +285,12 @@ static void setup_restart_syscall(struct pt_regs *regs)
/*
* OK, we're invoking a handler
*/
-static void handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, struct pt_regs *regs)
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
sigset_t *oldset = sigmask_to_save();
- int usig = sig;
+ int usig = ksig->sig;
int ret;
/*
@@ -310,13 +303,12 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka,
* Set up the stack frame
*/
if (is_compat_task()) {
- if (ka->sa.sa_flags & SA_SIGINFO)
- ret = compat_setup_rt_frame(usig, ka, info, oldset,
- regs);
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
else
- ret = compat_setup_frame(usig, ka, oldset, regs);
+ ret = compat_setup_frame(usig, ksig, oldset, regs);
} else {
- ret = setup_rt_frame(usig, ka, info, oldset, regs);
+ ret = setup_rt_frame(usig, ksig, oldset, regs);
}
/*
@@ -324,18 +316,14 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka,
*/
ret |= !valid_user_regs(&regs->user_regs);
- if (ret != 0) {
- force_sigsegv(sig, tsk);
- return;
- }
-
/*
* Fast forward the stepping logic so we step into the signal
* handler.
*/
- user_fastforward_single_step(tsk);
+ if (!ret)
+ user_fastforward_single_step(tsk);
- signal_delivered(sig, info, ka, regs, 0);
+ signal_setup_done(ret, ksig, 0);
}
/*
@@ -350,10 +338,9 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka,
static void do_signal(struct pt_regs *regs)
{
unsigned long continue_addr = 0, restart_addr = 0;
- struct k_sigaction ka;
- siginfo_t info;
- int signr, retval = 0;
+ int retval = 0;
int syscall = (int)regs->syscallno;
+ struct ksignal ksig;
/*
* If we were from a system call, check for system call restarting...
@@ -387,8 +374,7 @@ static void do_signal(struct pt_regs *regs)
* Get the signal to deliver. When running under ptrace, at this point
* the debugger may change all of our registers.
*/
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
+ if (get_signal(&ksig)) {
/*
* Depending on the signal settings, we may need to revert the
* decision to restart the system call, but skip this if a
@@ -398,12 +384,12 @@ static void do_signal(struct pt_regs *regs)
(retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK ||
(retval == -ERESTARTSYS &&
- !(ka.sa.sa_flags & SA_RESTART)))) {
+ !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
regs->regs[0] = -EINTR;
regs->pc = continue_addr;
}
- handle_signal(signr, &ka, &info, regs);
+ handle_signal(&ksig, regs);
return;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index c5ee208321c3..1b9ad02837cf 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -407,20 +407,14 @@ badframe:
return 0;
}
-static void __user *compat_get_sigframe(struct k_sigaction *ka,
+static void __user *compat_get_sigframe(struct ksignal *ksig,
struct pt_regs *regs,
int framesize)
{
- compat_ulong_t sp = regs->compat_sp;
+ compat_ulong_t sp = sigsp(regs->compat_sp, ksig);
void __user *frame;
/*
- * This is the X/Open sanctioned signal stack switching.
- */
- if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- /*
* ATPCS B01 mandates 8-byte alignment
*/
frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
@@ -520,18 +514,18 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
/*
* 32-bit signal handling routines called from signal.c
*/
-int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
+int compat_setup_rt_frame(int usig, struct ksignal *ksig,
sigset_t *set, struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame;
int err = 0;
- frame = compat_get_sigframe(ka, regs, sizeof(*frame));
+ frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
if (!frame)
return 1;
- err |= copy_siginfo_to_user32(&frame->info, info);
+ err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
__put_user_error(0, &frame->sig.uc.uc_flags, err);
__put_user_error(0, &frame->sig.uc.uc_link, err);
@@ -541,7 +535,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
err |= compat_setup_sigframe(&frame->sig, regs, set);
if (err == 0) {
- compat_setup_return(regs, ka, frame->sig.retcode, frame, usig);
+ compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig);
regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
}
@@ -549,13 +543,13 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
return err;
}
-int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
+int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct compat_sigframe __user *frame;
int err = 0;
- frame = compat_get_sigframe(ka, regs, sizeof(*frame));
+ frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
if (!frame)
return 1;
@@ -564,7 +558,7 @@ int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
err |= compat_setup_sigframe(frame, regs, set);
if (err == 0)
- compat_setup_return(regs, ka, frame->retcode, frame, usig);
+ compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig);
return err;
}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b1925729c692..a564b440416a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -49,28 +49,39 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
- * Save CPU state for a suspend. This saves callee registers, and allocates
- * space on the kernel stack to save the CPU specific registers + some
- * other data for resume.
+ * Save CPU state for a suspend and execute the suspend finisher.
+ * On success it will return 0 through cpu_resume - ie through a CPU
+ * soft/hard reboot from the reset vector.
+ * On failure it returns the suspend finisher return value or force
+ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
+ * is not allowed to return, if it does this must be considered failure).
+ * It saves callee registers, and allocates space on the kernel stack
+ * to save the CPU specific registers + some other data for resume.
*
* x0 = suspend finisher argument
+ * x1 = suspend finisher function pointer
*/
-ENTRY(__cpu_suspend)
+ENTRY(__cpu_suspend_enter)
stp x29, lr, [sp, #-96]!
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
+ /*
+ * Stash suspend finisher and its argument in x20 and x19
+ */
+ mov x19, x0
+ mov x20, x1
mov x2, sp
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
- mov x1, sp
+ mov x0, sp
/*
- * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ * x0 now points to struct cpu_suspend_ctx allocated on the stack
*/
- str x2, [x1, #CPU_CTX_SP]
- ldr x2, =sleep_save_sp
- ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+ str x2, [x0, #CPU_CTX_SP]
+ ldr x1, =sleep_save_sp
+ ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
- add x2, x2, x8, lsl #3
+ add x1, x1, x8, lsl #3
#endif
- bl __cpu_suspend_finisher
+ bl __cpu_suspend_save
+ /*
+ * Grab suspend finisher in x20 and its argument in x19
+ */
+ mov x0, x19
+ mov x1, x20
+ /*
+ * We are ready for power down, fire off the suspend finisher
+ * in x1, with argument in x0
+ */
+ blr x1
/*
- * Never gets here, unless suspend fails.
+ * Never gets here, unless suspend finisher fails.
* Successful cpu_suspend should return from cpu_resume, returning
* through this code path is considered an error
* If the return value is set to 0 force x0 = -EOPNOTSUPP
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
ldp x27, x28, [sp, #80]
ldp x29, lr, [sp], #96
ret
-ENDPROC(__cpu_suspend)
+ENDPROC(__cpu_suspend_enter)
.ltorg
/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 3e2f5ebbf63e..b06d1d90ee8c 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -51,6 +51,9 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -313,8 +316,6 @@ void __init smp_prepare_boot_cpu(void)
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -469,32 +470,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
+void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- smp_cross_call = fn;
+ __smp_cross_call = fn;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
-}
-#endif
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x - IPI_RESCHEDULE] = s
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
@@ -503,12 +487,18 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_IRQ_WORK, "IRQ work interrupts"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
@@ -528,6 +518,24 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (__smp_cross_call)
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
@@ -559,8 +567,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
- __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_RESCHEDULE:
@@ -605,6 +615,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 0347d38eea29..4f93c67e63de 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
- void **release_addr;
+ __le64 __iomem *release_addr;
if (!cpu_release_addr[cpu])
return -ENODEV;
- release_addr = __va(cpu_release_addr[cpu]);
+ /*
+ * The cpu-release-addr may or may not be inside the linear mapping.
+ * As ioremap_cache will either give us a new mapping or reuse the
+ * existing linear mapping, we can use it to cover both cases. In
+ * either case the memory will be MT_NORMAL.
+ */
+ release_addr = ioremap_cache(cpu_release_addr[cpu],
+ sizeof(*release_addr));
+ if (!release_addr)
+ return -ENOMEM;
/*
* We write the release address as LE regardless of the native
@@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
-
- __flush_dcache_area(release_addr, sizeof(release_addr[0]));
+ writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+ __flush_dcache_area((__force void *)release_addr,
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
+ iounmap(release_addr);
+
return 0;
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 55437ba1f5a4..407991bf79f5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 55a99b9a97e0..13ad4dbb1615 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -9,22 +9,19 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-extern int __cpu_suspend(unsigned long);
+extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
/*
- * This is called by __cpu_suspend() to save the state, and do whatever
+ * This is called by __cpu_suspend_enter() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*
- * @arg: Argument to pass to suspend operations
- * @ptr: CPU context virtual address
- * @save_ptr: address of the location where the context physical address
- * must be saved
+ * ptr: CPU context virtual address
+ * save_ptr: address of the location where the context physical address
+ * must be saved
*/
-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
- phys_addr_t *save_ptr)
+void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
{
- int cpu = smp_processor_id();
-
*save_ptr = virt_to_phys(ptr);
cpu_do_suspend(ptr);
@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
*/
__flush_dcache_area(ptr, sizeof(*ptr));
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
-
- return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/**
- * cpu_suspend
+ * cpu_suspend() - function to enter a low-power state
+ * @arg: argument to pass to CPU suspend operations
*
- * @arg: argument to pass to the finisher function
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
*/
int cpu_suspend(unsigned long arg)
{
- struct mm_struct *mm = current->active_mm;
- int ret, cpu = smp_processor_id();
- unsigned long flags;
+ int cpu = smp_processor_id();
/*
* If cpu_ops have not been registered or suspend
@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * __cpu_suspend
+ *
+ * arg: argument to pass to the finisher function
+ * fn: finisher function pointer
+ *
+ */
+int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret;
+ unsigned long flags;
/*
* From this point debug exceptions are disabled to prevent
@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg)
* page tables, so that the thread address space is properly
* set-up on function return.
*/
- ret = __cpu_suspend(arg);
+ ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg)
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
- set_my_cpu_offset(per_cpu_offset(cpu));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Restore HW breakpoint registers to sane values
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index de2b0226e06d..dc47e53e9e28 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0];
+
+ /*
+ * Protect against register corruption from context switch.
+ * See comment in tls_thread_flush.
+ */
+ barrier();
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
return 0;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 02cd3f023e9a..de1b085e7963 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- const register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a81a446a5786..32aeea083d93 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -195,25 +195,6 @@ up_fail:
}
/*
- * We define AT_SYSINFO_EHDR, so we need these function stubs to keep
- * Linux happy.
- */
-int in_gate_area_no_mm(unsigned long addr)
-{
- return 0;
-}
-
-int in_gate_area(struct mm_struct *mm, unsigned long addr)
-{
- return 0;
-}
-
-struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-{
- return NULL;
-}
-
-/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
void update_vsyscall(struct timekeeper *tk)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 97f0c0429dfa..edf8715ba39b 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -97,9 +97,9 @@ SECTIONS
PERCPU_SECTION(64)
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
- . = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 72a9fd583ad3..32a096174b94 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,4 +20,8 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 60b5c31f3c10..76794692c20b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void)
}
/**
+ * ARM64 versions of the TIMER registers, always available on arm64
+ */
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+ switch (index) {
+ case KVM_REG_ARM_TIMER_CTL:
+ case KVM_REG_ARM_TIMER_CNT:
+ case KVM_REG_ARM_TIMER_CVAL:
+ return true;
+ }
+ return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+ int ret;
+
+ ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+ if (ret != 0)
+ return -EFAULT;
+
+ return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
+/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu);
+ return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+ + NUM_TIMER_REGS;
}
/**
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+ int ret;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
if (put_user(core_reg | i, uindices))
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
+ ret = copy_timer_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += NUM_TIMER_REGS;
+
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
}
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return get_timer_reg(vcpu, reg);
+
return kvm_arm_sys_reg_get_reg(vcpu, reg);
}
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return set_timer_reg(vcpu, reg);
+
return kvm_arm_sys_reg_set_reg(vcpu, reg);
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 182415e1a952..34b8bd0711e9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
else
kvm_vcpu_block(vcpu);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 1;
}
@@ -73,9 +75,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_EL2_EC_WFI] = kvm_handle_wfx,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
- [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
+ [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
+ [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
[ESR_EL2_EC_HVC32] = handle_hvc,
[ESR_EL2_EC_SMC32] = handle_smc,
[ESR_EL2_EC_HVC64] = handle_hvc,
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index d968796f4b2d..c3191168a994 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -80,6 +80,10 @@ __do_hyp_init:
msr mair_el2, x4
isb
+ /* Invalidate the stale TLBs from Bootloader */
+ tlbi alle2
+ dsb sy
+
mrs x4, sctlr_el2
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
ldr x5, =SCTLR_EL2_FLAGS
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index b0d1512acf08..b72aa9f9215c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -16,11 +16,11 @@
*/
#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
@@ -36,9 +36,6 @@
.pushsection .hyp.text, "ax"
.align PAGE_SHIFT
-__kvm_hyp_code_start:
- .globl __kvm_hyp_code_start
-
.macro save_common_regs
// x2: base address for cpu context
// x3: tmp register
@@ -215,6 +212,7 @@ __kvm_hyp_code_start:
mrs x22, amair_el1
mrs x23, cntkctl_el1
mrs x24, par_el1
+ mrs x25, mdscr_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
@@ -226,7 +224,202 @@ __kvm_hyp_code_start:
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
- str x24, [x3, #160]
+ stp x24, x25, [x3, #160]
+.endm
+
+.macro save_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbcr15_el1
+ mrs x19, dbgbcr14_el1
+ mrs x18, dbgbcr13_el1
+ mrs x17, dbgbcr12_el1
+ mrs x16, dbgbcr11_el1
+ mrs x15, dbgbcr10_el1
+ mrs x14, dbgbcr9_el1
+ mrs x13, dbgbcr8_el1
+ mrs x12, dbgbcr7_el1
+ mrs x11, dbgbcr6_el1
+ mrs x10, dbgbcr5_el1
+ mrs x9, dbgbcr4_el1
+ mrs x8, dbgbcr3_el1
+ mrs x7, dbgbcr2_el1
+ mrs x6, dbgbcr1_el1
+ mrs x5, dbgbcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbvr15_el1
+ mrs x19, dbgbvr14_el1
+ mrs x18, dbgbvr13_el1
+ mrs x17, dbgbvr12_el1
+ mrs x16, dbgbvr11_el1
+ mrs x15, dbgbvr10_el1
+ mrs x14, dbgbvr9_el1
+ mrs x13, dbgbvr8_el1
+ mrs x12, dbgbvr7_el1
+ mrs x11, dbgbvr6_el1
+ mrs x10, dbgbvr5_el1
+ mrs x9, dbgbvr4_el1
+ mrs x8, dbgbvr3_el1
+ mrs x7, dbgbvr2_el1
+ mrs x6, dbgbvr1_el1
+ mrs x5, dbgbvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwcr15_el1
+ mrs x19, dbgwcr14_el1
+ mrs x18, dbgwcr13_el1
+ mrs x17, dbgwcr12_el1
+ mrs x16, dbgwcr11_el1
+ mrs x15, dbgwcr10_el1
+ mrs x14, dbgwcr9_el1
+ mrs x13, dbgwcr8_el1
+ mrs x12, dbgwcr7_el1
+ mrs x11, dbgwcr6_el1
+ mrs x10, dbgwcr5_el1
+ mrs x9, dbgwcr4_el1
+ mrs x8, dbgwcr3_el1
+ mrs x7, dbgwcr2_el1
+ mrs x6, dbgwcr1_el1
+ mrs x5, dbgwcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwvr15_el1
+ mrs x19, dbgwvr14_el1
+ mrs x18, dbgwvr13_el1
+ mrs x17, dbgwvr12_el1
+ mrs x16, dbgwvr11_el1
+ mrs x15, dbgwvr10_el1
+ mrs x14, dbgwvr9_el1
+ mrs x13, dbgwvr8_el1
+ mrs x12, dbgwvr7_el1
+ mrs x11, dbgwvr6_el1
+ mrs x10, dbgwvr5_el1
+ mrs x9, dbgwvr4_el1
+ mrs x8, dbgwvr3_el1
+ mrs x7, dbgwvr2_el1
+ mrs x6, dbgwvr1_el1
+ mrs x5, dbgwvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
.endm
.macro restore_sysregs
@@ -245,7 +438,7 @@ __kvm_hyp_code_start:
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
- ldr x24, [x3, #160]
+ ldp x24, x25, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
@@ -268,6 +461,198 @@ __kvm_hyp_code_start:
msr amair_el1, x22
msr cntkctl_el1, x23
msr par_el1, x24
+ msr mdscr_el1, x25
+.endm
+
+.macro restore_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbcr15_el1, x20
+ msr dbgbcr14_el1, x19
+ msr dbgbcr13_el1, x18
+ msr dbgbcr12_el1, x17
+ msr dbgbcr11_el1, x16
+ msr dbgbcr10_el1, x15
+ msr dbgbcr9_el1, x14
+ msr dbgbcr8_el1, x13
+ msr dbgbcr7_el1, x12
+ msr dbgbcr6_el1, x11
+ msr dbgbcr5_el1, x10
+ msr dbgbcr4_el1, x9
+ msr dbgbcr3_el1, x8
+ msr dbgbcr2_el1, x7
+ msr dbgbcr1_el1, x6
+ msr dbgbcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbvr15_el1, x20
+ msr dbgbvr14_el1, x19
+ msr dbgbvr13_el1, x18
+ msr dbgbvr12_el1, x17
+ msr dbgbvr11_el1, x16
+ msr dbgbvr10_el1, x15
+ msr dbgbvr9_el1, x14
+ msr dbgbvr8_el1, x13
+ msr dbgbvr7_el1, x12
+ msr dbgbvr6_el1, x11
+ msr dbgbvr5_el1, x10
+ msr dbgbvr4_el1, x9
+ msr dbgbvr3_el1, x8
+ msr dbgbvr2_el1, x7
+ msr dbgbvr1_el1, x6
+ msr dbgbvr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwcr15_el1, x20
+ msr dbgwcr14_el1, x19
+ msr dbgwcr13_el1, x18
+ msr dbgwcr12_el1, x17
+ msr dbgwcr11_el1, x16
+ msr dbgwcr10_el1, x15
+ msr dbgwcr9_el1, x14
+ msr dbgwcr8_el1, x13
+ msr dbgwcr7_el1, x12
+ msr dbgwcr6_el1, x11
+ msr dbgwcr5_el1, x10
+ msr dbgwcr4_el1, x9
+ msr dbgwcr3_el1, x8
+ msr dbgwcr2_el1, x7
+ msr dbgwcr1_el1, x6
+ msr dbgwcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwvr15_el1, x20
+ msr dbgwvr14_el1, x19
+ msr dbgwvr13_el1, x18
+ msr dbgwvr12_el1, x17
+ msr dbgwvr11_el1, x16
+ msr dbgwvr10_el1, x15
+ msr dbgwvr9_el1, x14
+ msr dbgwvr8_el1, x13
+ msr dbgwvr7_el1, x12
+ msr dbgwvr6_el1, x11
+ msr dbgwvr5_el1, x10
+ msr dbgwvr4_el1, x9
+ msr dbgwvr3_el1, x8
+ msr dbgwvr2_el1, x7
+ msr dbgwvr1_el1, x6
+ msr dbgwvr0_el1, x5
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
.endm
.macro skip_32bit_state tmp, target
@@ -282,6 +667,35 @@ __kvm_hyp_code_start:
tbz \tmp, #12, \target
.endm
+.macro skip_debug_state tmp, target
+ ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
+ tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
+.endm
+
+.macro compute_debug_state target
+ // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
+ // is set, we do a full save/restore cycle and disable trapping.
+ add x25, x0, #VCPU_CONTEXT
+
+ // Check the state of MDSCR_EL1
+ ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
+ and x26, x25, #DBG_MDSCR_KDE
+ and x25, x25, #DBG_MDSCR_MDE
+ adds xzr, x25, x26
+ b.eq 9998f // Nothing to see there
+
+ // If any interesting bits was set, we must set the flag
+ mov x26, #KVM_ARM64_DEBUG_DIRTY
+ str x26, [x0, #VCPU_DEBUG_FLAGS]
+ b 9999f // Don't skip restore
+
+9998:
+ // Otherwise load the flags from memory in case we recently
+ // trapped
+ skip_debug_state x25, \target
+9999:
+.endm
+
.macro save_guest_32bit_state
skip_32bit_state x3, 1f
@@ -297,10 +711,13 @@ __kvm_hyp_code_start:
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
mrs x6, fpexc32_el2
- mrs x7, dbgvcr32_el2
stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
+ str x6, [x3, #16]
+ skip_debug_state x8, 2f
+ mrs x7, dbgvcr32_el2
+ str x7, [x3, #24]
+2:
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -323,12 +740,15 @@ __kvm_hyp_code_start:
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
- ldp x6, x7, [x3, #16]
+ ldr x6, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
msr fpexc32_el2, x6
- msr dbgvcr32_el2, x7
+ skip_debug_state x8, 2f
+ ldr x7, [x3, #24]
+ msr dbgvcr32_el2, x7
+2:
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -339,11 +759,8 @@ __kvm_hyp_code_start:
.endm
.macro activate_traps
- ldr x2, [x0, #VCPU_IRQ_LINES]
- ldr x1, [x0, #VCPU_HCR_EL2]
- orr x2, x2, x1
- msr hcr_el2, x2
-
+ ldr x2, [x0, #VCPU_HCR_EL2]
+ msr hcr_el2, x2
ldr x2, =(CPTR_EL2_TTA)
msr cptr_el2, x2
@@ -353,6 +770,14 @@ __kvm_hyp_code_start:
mrs x2, mdcr_el2
and x2, x2, #MDCR_EL2_HPMN_MASK
orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
+ orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
+
+ // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
+ // if not dirty.
+ ldr x3, [x0, #VCPU_DEBUG_FLAGS]
+ tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
+ orr x2, x2, #MDCR_EL2_TDA
+1:
msr mdcr_el2, x2
.endm
@@ -379,100 +804,33 @@ __kvm_hyp_code_start:
.endm
/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
+ * Call into the vgic backend for state saving
*/
.macro save_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* Save all interesting registers */
- ldr w4, [x2, #GICH_HCR]
- ldr w5, [x2, #GICH_VMCR]
- ldr w6, [x2, #GICH_MISR]
- ldr w7, [x2, #GICH_EISR0]
- ldr w8, [x2, #GICH_EISR1]
- ldr w9, [x2, #GICH_ELRSR0]
- ldr w10, [x2, #GICH_ELRSR1]
- ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
-CPU_BE( rev w9, w9 )
-CPU_BE( rev w10, w10 )
-CPU_BE( rev w11, w11 )
-
- str w4, [x3, #VGIC_CPU_HCR]
- str w5, [x3, #VGIC_CPU_VMCR]
- str w6, [x3, #VGIC_CPU_MISR]
- str w7, [x3, #VGIC_CPU_EISR]
- str w8, [x3, #(VGIC_CPU_EISR + 4)]
- str w9, [x3, #VGIC_CPU_ELRSR]
- str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
- str w11, [x3, #VGIC_CPU_APR]
-
- /* Clear GICH_HCR */
- str wzr, [x2, #GICH_HCR]
-
- /* Save list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
-1: ldr w5, [x2], #4
-CPU_BE( rev w5, w5 )
- str w5, [x3], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, VGIC_SAVE_FN]
+ kern_hyp_va x24
+ blr x24
+ mrs x24, hcr_el2
+ mov x25, #HCR_INT_OVERRIDE
+ neg x25, x25
+ and x24, x24, x25
+ msr hcr_el2, x24
.endm
/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
+ * Call into the vgic backend for state restoring
*/
.macro restore_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* We only restore a minimal set of registers */
- ldr w4, [x3, #VGIC_CPU_HCR]
- ldr w5, [x3, #VGIC_CPU_VMCR]
- ldr w6, [x3, #VGIC_CPU_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-
- str w4, [x2, #GICH_HCR]
- str w5, [x2, #GICH_VMCR]
- str w6, [x2, #GICH_APR]
-
- /* Restore list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
-1: ldr w5, [x3], #4
-CPU_BE( rev w5, w5 )
- str w5, [x2], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
+ mrs x24, hcr_el2
+ ldr x25, [x0, #VCPU_IRQ_LINES]
+ orr x24, x24, #HCR_INT_OVERRIDE
+ orr x24, x24, x25
+ msr hcr_el2, x24
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, #VGIC_RESTORE_FN]
+ kern_hyp_va x24
+ blr x24
.endm
.macro save_timer_state
@@ -537,6 +895,14 @@ __restore_sysregs:
restore_sysregs
ret
+__save_debug:
+ save_debug
+ ret
+
+__restore_debug:
+ restore_debug
+ ret
+
__save_fpsimd:
save_fpsimd
ret
@@ -568,6 +934,9 @@ ENTRY(__kvm_vcpu_run)
bl __save_fpsimd
bl __save_sysregs
+ compute_debug_state 1f
+ bl __save_debug
+1:
activate_traps
activate_vm
@@ -579,6 +948,10 @@ ENTRY(__kvm_vcpu_run)
bl __restore_sysregs
bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ bl __restore_debug
+1:
restore_guest_32bit_state
restore_guest_regs
@@ -595,6 +968,10 @@ __kvm_vcpu_return:
save_guest_regs
bl __save_fpsimd
bl __save_sysregs
+
+ skip_debug_state x3, 1f
+ bl __save_debug
+1:
save_guest_32bit_state
save_timer_state
@@ -609,6 +986,14 @@ __kvm_vcpu_return:
bl __restore_sysregs
bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ // Clear the dirty flag for the next run, as all the state has
+ // already been saved. Note that we nuke the whole 64bit word.
+ // If we ever add more flags, we'll have to be more careful...
+ str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ bl __restore_debug
+1:
restore_host_regs
mov x0, x1
@@ -653,6 +1038,12 @@ ENTRY(__kvm_flush_vm_context)
ret
ENDPROC(__kvm_flush_vm_context)
+ // struct vgic_sr_vectors __vgi_sr_vectors;
+ .align 3
+ENTRY(__vgic_sr_vectors)
+ .skip VGIC_SR_VECTOR_SZ
+ENDPROC(__vgic_sr_vectors)
+
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
@@ -830,7 +1221,7 @@ el1_trap:
mrs x2, far_el2
2: mrs x0, tpidr_el2
- str x1, [x0, #VCPU_ESR_EL2]
+ str w1, [x0, #VCPU_ESR_EL2]
str x2, [x0, #VCPU_FAR_EL2]
str x3, [x0, #VCPU_HPFAR_EL2]
@@ -880,7 +1271,4 @@ ENTRY(__kvm_hyp_vector)
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
-__kvm_hyp_code_end:
- .globl __kvm_hyp_code_end
-
.popsection
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c59a1bdab5eb..4cc3b719208e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -30,6 +30,7 @@
#include <asm/kvm_mmu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/debug-monitors.h>
#include <trace/events/kvm.h>
#include "sys_regs.h"
@@ -137,10 +138,11 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
if (!p->is_aarch32) {
vcpu_sys_reg(vcpu, r->reg) = val;
} else {
- vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
if (!p->is_32bit)
- vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
+ vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
+ vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
+
return true;
}
@@ -163,18 +165,9 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
return true;
}
-/*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters. Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- */
-static bool pm_fake(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
@@ -182,6 +175,73 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
+static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+ return true;
+ }
+}
+
+static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u32 val;
+ asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
+ *vcpu_reg(vcpu, p->Rt) = val;
+ return true;
+ }
+}
+
+/*
+ * We want to avoid world-switching all the DBG registers all the
+ * time:
+ *
+ * - If we've touched any debug register, it is likely that we're
+ * going to touch more of them. It then makes sense to disable the
+ * traps and start doing the save/restore dance
+ * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
+ * then mandatory to save/restore the registers, as the guest
+ * depends on them.
+ *
+ * For this, we use a DIRTY bit, indicating the guest has modified the
+ * debug registers, used as follow:
+ *
+ * On guest entry:
+ * - If the dirty bit is set (because we're coming back from trapping),
+ * disable the traps, save host registers, restore guest registers.
+ * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
+ * set the dirty bit, disable the traps, save host registers,
+ * restore guest registers.
+ * - Otherwise, enable the traps
+ *
+ * On guest exit:
+ * - If the dirty bit is set, save guest registers, restore host
+ * registers and clear the dirty bit. This ensure that the host can
+ * now use the debug registers.
+ */
+static bool trap_debug_regs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ }
+
+ return true;
+}
+
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
@@ -198,9 +258,39 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
}
+/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
+#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
+ /* DBGBVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
+ trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ /* DBGBCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
+ trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ /* DBGWVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
+ trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ /* DBGWCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
+ trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters. Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ *
+ * Debug handling: We do trap most, if not all debug related system
+ * registers. The implementation is good enough to ensure that a guest
+ * can use these with minimal performance degradation. The drawback is
+ * that we don't implement any of the external debug, none of the
+ * OSlock protocol. This should be revisited if we ever encounter a
+ * more demanding guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
/* DC ISW */
@@ -213,12 +303,71 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
access_dcsw },
+ DBG_BCR_BVR_WCR_WVR_EL1(0),
+ DBG_BCR_BVR_WCR_WVR_EL1(1),
+ /* MDCCINT_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+ trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
+ /* MDSCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+ trap_debug_regs, reset_val, MDSCR_EL1, 0 },
+ DBG_BCR_BVR_WCR_WVR_EL1(2),
+ DBG_BCR_BVR_WCR_WVR_EL1(3),
+ DBG_BCR_BVR_WCR_WVR_EL1(4),
+ DBG_BCR_BVR_WCR_WVR_EL1(5),
+ DBG_BCR_BVR_WCR_WVR_EL1(6),
+ DBG_BCR_BVR_WCR_WVR_EL1(7),
+ DBG_BCR_BVR_WCR_WVR_EL1(8),
+ DBG_BCR_BVR_WCR_WVR_EL1(9),
+ DBG_BCR_BVR_WCR_WVR_EL1(10),
+ DBG_BCR_BVR_WCR_WVR_EL1(11),
+ DBG_BCR_BVR_WCR_WVR_EL1(12),
+ DBG_BCR_BVR_WCR_WVR_EL1(13),
+ DBG_BCR_BVR_WCR_WVR_EL1(14),
+ DBG_BCR_BVR_WCR_WVR_EL1(15),
+
+ /* MDRAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+ trap_raz_wi },
+ /* OSLAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
+ trap_raz_wi },
+ /* OSLSR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
+ trap_oslsr_el1 },
+ /* OSDLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
+ trap_raz_wi },
+ /* DBGPRCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
+ trap_raz_wi },
+ /* DBGCLAIMSET_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
+ trap_raz_wi },
+ /* DBGCLAIMCLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
+ trap_raz_wi },
+ /* DBGAUTHSTATUS_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
+ trap_dbgauthstatus_el1 },
+
/* TEECR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEECR32_EL1, 0 },
/* TEEHBR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEEHBR32_EL1, 0 },
+
+ /* MDCCSR_EL1 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR[TR]X_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
+ trap_raz_wi },
+
/* DBGVCR32_EL2 */
{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
NULL, reset_val, DBGVCR32_EL2, 0 },
@@ -260,10 +409,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMINTENCLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* MAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -292,43 +441,43 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMCR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMCNTENCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
- pm_fake },
+ trap_raz_wi },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- pm_fake },
+ trap_raz_wi },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
- pm_fake },
+ trap_raz_wi },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
- pm_fake },
+ trap_raz_wi },
/* PMCEID1_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
- pm_fake },
+ trap_raz_wi },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* PMUSERENR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
- pm_fake },
+ trap_raz_wi },
/* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -348,13 +497,161 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_val, FPEXC32_EL2, 0x70 },
};
+static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
+ u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
+ u32 el3 = !!((pfr >> 12) & 0xf);
+
+ *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
+ (((dfr >> 12) & 0xf) << 24) |
+ (((dfr >> 28) & 0xf) << 20) |
+ (6 << 16) | (el3 << 14) | (el3 << 12));
+ return true;
+ }
+}
+
+static bool trap_debug32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+ }
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
+ NULL, (cp14_DBGBVR0 + (n) * 2) }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
+ NULL, (cp14_DBGBCR0 + (n) * 2) }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
+ NULL, (cp14_DBGWVR0 + (n) * 2) }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
+ NULL, (cp14_DBGWCR0 + (n) * 2) }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
+ NULL, cp14_DBGBXVR0 + n * 2 }
+
+/*
+ * Trapped cp14 registers. We generally ignore most of the external
+ * debug, on the principle that they don't really make sense to a
+ * guest. Revisit this one day, whould this principle change.
+ */
+static const struct sys_reg_desc cp14_regs[] = {
+ /* DBGIDR */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+ /* DBGDTRRXext */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
+
+ DBG_BCR_BVR_WCR_WVR(0),
+ /* DBGDSCRint */
+ { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(1),
+ /* DBGDCCINT */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+ /* DBGDSCRext */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(2),
+ /* DBGDTR[RT]Xint */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
+ /* DBGDTR[RT]Xext */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(3),
+ DBG_BCR_BVR_WCR_WVR(4),
+ DBG_BCR_BVR_WCR_WVR(5),
+ /* DBGWFAR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
+ /* DBGOSECCR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(6),
+ /* DBGVCR */
+ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(7),
+ DBG_BCR_BVR_WCR_WVR(8),
+ DBG_BCR_BVR_WCR_WVR(9),
+ DBG_BCR_BVR_WCR_WVR(10),
+ DBG_BCR_BVR_WCR_WVR(11),
+ DBG_BCR_BVR_WCR_WVR(12),
+ DBG_BCR_BVR_WCR_WVR(13),
+ DBG_BCR_BVR_WCR_WVR(14),
+ DBG_BCR_BVR_WCR_WVR(15),
+
+ /* DBGDRAR (32bit) */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
+
+ DBGBXVR(0),
+ /* DBGOSLAR */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
+ DBGBXVR(1),
+ /* DBGOSLSR */
+ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
+ DBGBXVR(2),
+ DBGBXVR(3),
+ /* DBGOSDLR */
+ { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
+ DBGBXVR(4),
+ /* DBGPRCR */
+ { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
+ DBGBXVR(5),
+ DBGBXVR(6),
+ DBGBXVR(7),
+ DBGBXVR(8),
+ DBGBXVR(9),
+ DBGBXVR(10),
+ DBGBXVR(11),
+ DBGBXVR(12),
+ DBGBXVR(13),
+ DBGBXVR(14),
+ DBGBXVR(15),
+
+ /* DBGDSAR (32bit) */
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
+
+ /* DBGDEVID2 */
+ { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
+ /* DBGDEVID1 */
+ { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
+ /* DBGDEVID */
+ { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
+ /* DBGCLAIMSET */
+ { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
+ /* DBGCLAIMCLR */
+ { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
+ /* DBGAUTHSTATUS */
+ { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
+};
+
+/* Trapped cp14 64bit registers */
+static const struct sys_reg_desc cp14_64_regs[] = {
+ /* DBGDRAR (64bit) */
+ { Op1( 0), CRm( 1), .access = trap_raz_wi },
+
+ /* DBGDSAR (64bit) */
+ { Op1( 0), CRm( 2), .access = trap_raz_wi },
+};
+
/*
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
* depending on the way they are accessed (as a 32bit or a 64bit
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -374,26 +671,30 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
- { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
+ /* PMU */
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+};
+static const struct sys_reg_desc cp15_64_regs[] = {
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
};
@@ -454,26 +755,29 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static void emulate_cp15(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
+/*
+ * emulate_cp -- tries to match a sys_reg access in a handling table, and
+ * call the corresponding trap handler.
+ *
+ * @params: pointer to the descriptor of the access
+ * @table: array of trap descriptors
+ * @num: size of the trap descriptor array
+ *
+ * Return 0 if the access has been handled, and -1 if not.
+ */
+static int emulate_cp(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params,
+ const struct sys_reg_desc *table,
+ size_t num)
{
- size_t num;
- const struct sys_reg_desc *table, *r;
+ const struct sys_reg_desc *r;
- table = get_target_table(vcpu->arch.target, false, &num);
+ if (!table)
+ return -1; /* Not handled */
- /* Search target-specific then generic table. */
r = find_reg(params, table, num);
- if (!r)
- r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
- if (likely(r)) {
+ if (r) {
/*
* Not having an accessor means that we have
* configured a trap that we don't know how to
@@ -485,22 +789,51 @@ static void emulate_cp15(struct kvm_vcpu *vcpu,
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return;
}
- /* If access function fails, it should complain. */
+
+ /* Handled */
+ return 0;
+ }
+
+ /* Not handled */
+ return -1;
+}
+
+static void unhandled_cp_access(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params)
+{
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ int cp;
+
+ switch(hsr_ec) {
+ case ESR_EL2_EC_CP15_32:
+ case ESR_EL2_EC_CP15_64:
+ cp = 15;
+ break;
+ case ESR_EL2_EC_CP14_MR:
+ case ESR_EL2_EC_CP14_64:
+ cp = 14;
+ break;
+ default:
+ WARN_ON((cp = -1));
}
- kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest CP%d access at: %08lx\n",
+ cp, *vcpu_pc(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -529,8 +862,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
*vcpu_reg(vcpu, params.Rt) = val;
}
- emulate_cp15(vcpu, &params);
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ goto out;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ goto out;
+
+ unhandled_cp_access(vcpu, &params);
+out:
/* Do the opposite hack for the read side */
if (!params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
@@ -546,7 +885,11 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -561,10 +904,51 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
- emulate_cp15(vcpu, &params);
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ return 1;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ return 1;
+
+ unhandled_cp_access(vcpu, &params);
return 1;
}
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_64(vcpu,
+ cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_32(vcpu,
+ cp15_regs, ARRAY_SIZE(cp15_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_64(vcpu,
+ cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
+ NULL, 0);
+}
+
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_32(vcpu,
+ cp14_regs, ARRAY_SIZE(cp14_regs),
+ NULL, 0);
+}
+
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
@@ -776,17 +1160,15 @@ static struct sys_reg_desc invariant_sys_regs[] = {
NULL, get_ctr_el0 },
};
-static int reg_from_user(void *val, const void __user *uaddr, u64 id)
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
{
- /* This Just Works because we are little endian. */
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
-static int reg_to_user(void __user *uaddr, const void *val, u64 id)
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
{
- /* This Just Works because we are little endian. */
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
@@ -836,7 +1218,7 @@ static bool is_valid_cache(u32 val)
u32 level, ctype;
if (val >= CSSELR_MAX)
- return -ENOENT;
+ return false;
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
level = (val >> 1);
@@ -962,7 +1344,7 @@ static unsigned int num_demux_regs(void)
static int write_demux_regids(u64 __user *uindices)
{
- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
unsigned int i;
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
@@ -1069,14 +1451,32 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return write_demux_regids(uindices);
}
+static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 1; i < n; i++) {
+ if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
+ kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
void kvm_sys_reg_table_init(void)
{
unsigned int i;
struct sys_reg_desc clidr;
/* Make sure tables are unique and in order. */
- for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
- BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
+ BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
+ BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
+ BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
+ BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+ BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
+ BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
/* We abuse the reset function to overwrite the table itself. */
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 000000000000..f002fe1c3700
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+ENTRY(__save_vgic_v2_state)
+__save_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* Save all interesting registers */
+ ldr w4, [x2, #GICH_HCR]
+ ldr w5, [x2, #GICH_VMCR]
+ ldr w6, [x2, #GICH_MISR]
+ ldr w7, [x2, #GICH_EISR0]
+ ldr w8, [x2, #GICH_EISR1]
+ ldr w9, [x2, #GICH_ELRSR0]
+ ldr w10, [x2, #GICH_ELRSR1]
+ ldr w11, [x2, #GICH_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+CPU_BE( rev w7, w7 )
+CPU_BE( rev w8, w8 )
+CPU_BE( rev w9, w9 )
+CPU_BE( rev w10, w10 )
+CPU_BE( rev w11, w11 )
+
+ str w4, [x3, #VGIC_V2_CPU_HCR]
+ str w5, [x3, #VGIC_V2_CPU_VMCR]
+ str w6, [x3, #VGIC_V2_CPU_MISR]
+CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
+CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
+CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
+CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
+ str w11, [x3, #VGIC_V2_CPU_APR]
+
+ /* Clear GICH_HCR */
+ str wzr, [x2, #GICH_HCR]
+
+ /* Save list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x2], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x3], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__save_vgic_v2_state)
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+ENTRY(__restore_vgic_v2_state)
+__restore_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* We only restore a minimal set of registers */
+ ldr w4, [x3, #VGIC_V2_CPU_HCR]
+ ldr w5, [x3, #VGIC_V2_CPU_VMCR]
+ ldr w6, [x3, #VGIC_V2_CPU_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+
+ str w4, [x2, #GICH_HCR]
+ str w5, [x2, #GICH_VMCR]
+ str w6, [x2, #GICH_APR]
+
+ /* Restore list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x3], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x2], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__restore_vgic_v2_state)
+
+ .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 000000000000..d16046999e06
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Make sure stores to the GIC via the memory mapped interface
+ // are now visible to the system register interface
+ dsb st
+
+ // Save all interesting registers
+ mrs_s x4, ICH_HCR_EL2
+ mrs_s x5, ICH_VMCR_EL2
+ mrs_s x6, ICH_MISR_EL2
+ mrs_s x7, ICH_EISR_EL2
+ mrs_s x8, ICH_ELSR_EL2
+
+ str w4, [x3, #VGIC_V3_CPU_HCR]
+ str w5, [x3, #VGIC_V3_CPU_VMCR]
+ str w6, [x3, #VGIC_V3_CPU_MISR]
+ str w7, [x3, #VGIC_V3_CPU_EISR]
+ str w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+ msr_s ICH_HCR_EL2, xzr
+
+ mrs_s x21, ICH_VTR_EL2
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ mrs_s x20, ICH_LR15_EL2
+ mrs_s x19, ICH_LR14_EL2
+ mrs_s x18, ICH_LR13_EL2
+ mrs_s x17, ICH_LR12_EL2
+ mrs_s x16, ICH_LR11_EL2
+ mrs_s x15, ICH_LR10_EL2
+ mrs_s x14, ICH_LR9_EL2
+ mrs_s x13, ICH_LR8_EL2
+ mrs_s x12, ICH_LR7_EL2
+ mrs_s x11, ICH_LR6_EL2
+ mrs_s x10, ICH_LR5_EL2
+ mrs_s x9, ICH_LR4_EL2
+ mrs_s x8, ICH_LR3_EL2
+ mrs_s x7, ICH_LR2_EL2
+ mrs_s x6, ICH_LR1_EL2
+ mrs_s x5, ICH_LR0_EL2
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ str x20, [x3, #LR_OFFSET(15)]
+ str x19, [x3, #LR_OFFSET(14)]
+ str x18, [x3, #LR_OFFSET(13)]
+ str x17, [x3, #LR_OFFSET(12)]
+ str x16, [x3, #LR_OFFSET(11)]
+ str x15, [x3, #LR_OFFSET(10)]
+ str x14, [x3, #LR_OFFSET(9)]
+ str x13, [x3, #LR_OFFSET(8)]
+ str x12, [x3, #LR_OFFSET(7)]
+ str x11, [x3, #LR_OFFSET(6)]
+ str x10, [x3, #LR_OFFSET(5)]
+ str x9, [x3, #LR_OFFSET(4)]
+ str x8, [x3, #LR_OFFSET(3)]
+ str x7, [x3, #LR_OFFSET(2)]
+ str x6, [x3, #LR_OFFSET(1)]
+ str x5, [x3, #LR_OFFSET(0)]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP0R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ mrs_s x19, ICH_AP0R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6: mrs_s x18, ICH_AP0R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5: mrs_s x17, ICH_AP0R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP0R]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP1R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ mrs_s x19, ICH_AP1R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6: mrs_s x18, ICH_AP1R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5: mrs_s x17, ICH_AP1R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP1R]
+
+ // Restore SRE_EL1 access and re-enable SRE at EL1.
+ mrs_s x5, ICC_SRE_EL2
+ orr x5, x5, #ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+ isb
+ mov x5, #1
+ msr_s ICC_SRE_EL1, x5
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+ // Disable SRE_EL1 access. Necessary, otherwise
+ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+ msr_s ICC_SRE_EL1, xzr
+ isb
+
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Restore all interesting registers
+ ldr w4, [x3, #VGIC_V3_CPU_HCR]
+ ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+
+ msr_s ICH_HCR_EL2, x4
+ msr_s ICH_VMCR_EL2, x5
+
+ mrs_s x21, ICH_VTR_EL2
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ msr_s ICH_AP1R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+ msr_s ICH_AP1R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+ msr_s ICH_AP1R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
+ msr_s ICH_AP1R0_EL2, x17
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ msr_s ICH_AP0R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+ msr_s ICH_AP0R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+ msr_s ICH_AP0R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
+ msr_s ICH_AP0R0_EL2, x17
+
+ and w22, w21, #0xf
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ ldr x20, [x3, #LR_OFFSET(15)]
+ ldr x19, [x3, #LR_OFFSET(14)]
+ ldr x18, [x3, #LR_OFFSET(13)]
+ ldr x17, [x3, #LR_OFFSET(12)]
+ ldr x16, [x3, #LR_OFFSET(11)]
+ ldr x15, [x3, #LR_OFFSET(10)]
+ ldr x14, [x3, #LR_OFFSET(9)]
+ ldr x13, [x3, #LR_OFFSET(8)]
+ ldr x12, [x3, #LR_OFFSET(7)]
+ ldr x11, [x3, #LR_OFFSET(6)]
+ ldr x10, [x3, #LR_OFFSET(5)]
+ ldr x9, [x3, #LR_OFFSET(4)]
+ ldr x8, [x3, #LR_OFFSET(3)]
+ ldr x7, [x3, #LR_OFFSET(2)]
+ ldr x6, [x3, #LR_OFFSET(1)]
+ ldr x5, [x3, #LR_OFFSET(0)]
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ msr_s ICH_LR15_EL2, x20
+ msr_s ICH_LR14_EL2, x19
+ msr_s ICH_LR13_EL2, x18
+ msr_s ICH_LR12_EL2, x17
+ msr_s ICH_LR11_EL2, x16
+ msr_s ICH_LR10_EL2, x15
+ msr_s ICH_LR9_EL2, x14
+ msr_s ICH_LR8_EL2, x13
+ msr_s ICH_LR7_EL2, x12
+ msr_s ICH_LR6_EL2, x11
+ msr_s ICH_LR5_EL2, x10
+ msr_s ICH_LR4_EL2, x9
+ msr_s ICH_LR3_EL2, x8
+ msr_s ICH_LR2_EL2, x7
+ msr_s ICH_LR1_EL2, x6
+ msr_s ICH_LR0_EL2, x5
+
+ // Ensure that the above will have reached the
+ // (re)distributors. This ensure the guest will read
+ // the correct values from the memory-mapped interface.
+ isb
+ dsb sy
+
+ // Prevent the guest from touching the GIC system registers
+ mrs_s x5, ICC_SRE_EL2
+ and x5, x5, #~ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+ save_vgic_v3_state
+ ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+ restore_vgic_v3_state
+ ret
+ENDPROC(__restore_vgic_v3_state)
+
+ENTRY(__vgic_v3_get_ich_vtr_el2)
+ mrs_s x0, ICH_VTR_EL2
+ ret
+ENDPROC(__vgic_v3_get_ich_vtr_el2)
+
+ .popsection
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 6e0ed93d51fe..c17967fdf5f6 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
- strb wzr, [x0]
+USER(9f, strb wzr, [x0] )
5: mov x0, #0
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3ecb56c624d3..c56179ed2c09 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o proc.o
+ context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4164c5ace9f8..d92094203913 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -20,13 +20,11 @@
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
-#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
@@ -41,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
return prot;
}
+static struct gen_pool *atomic_pool;
+
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+
+static int __init early_coherent_pool(char *p)
+{
+ atomic_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
+{
+ unsigned long val;
+ void *ptr = NULL;
+
+ if (!atomic_pool) {
+ WARN(1, "coherent pool not initialised!\n");
+ return NULL;
+ }
+
+ val = gen_pool_alloc(atomic_pool, size);
+ if (val) {
+ phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+ *ret_page = phys_to_page(phys);
+ ptr = (void *)val;
+ }
+
+ return ptr;
+}
+
+static bool __in_atomic_pool(void *start, size_t size)
+{
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+}
+
+static int __free_from_pool(void *start, size_t size)
+{
+ if (!__in_atomic_pool(start, size))
+ return 0;
+
+ gen_pool_free(atomic_pool, (unsigned long)start, size);
+
+ return 1;
+}
+
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
@@ -53,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA;
- if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page;
size = PAGE_ALIGN(size);
@@ -73,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
+ bool freed;
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return;
}
- if (IS_ENABLED(CONFIG_DMA_CMA)) {
- phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
- dma_release_from_contiguous(dev,
+ freed = dma_release_from_contiguous(dev,
phys_to_page(paddr),
size >> PAGE_SHIFT);
- } else {
+ if (!freed)
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
- }
}
static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
- struct page *page, **map;
+ struct page *page;
void *ptr, *coherent_ptr;
- int order, i;
size = PAGE_ALIGN(size);
- order = get_order(size);
+
+ if (!(flags & __GFP_WAIT)) {
+ struct page *page = NULL;
+ void *addr = __alloc_from_pool(size, &page);
+
+ if (addr)
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+
+ return addr;
+
+ }
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
- map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
- if (!map)
- goto no_map;
/* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size);
/* create a coherent mapping */
page = virt_to_page(ptr);
- for (i = 0; i < (size >> PAGE_SHIFT); i++)
- map[i] = page + i;
- coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
- __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
- kfree(map);
+ coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ __get_dma_pgprot(attrs,
+ __pgprot(PROT_NORMAL_NC), false),
+ NULL);
if (!coherent_ptr)
goto no_map;
@@ -125,7 +175,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
- *dma_handle = ~0;
+ *dma_handle = DMA_ERROR_CODE;
return NULL;
}
@@ -135,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size,
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+ if (__free_from_pool(vaddr, size))
+ return;
vunmap(vaddr);
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}
@@ -308,45 +360,88 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
-static int dma_bus_notifier(struct notifier_block *nb,
- unsigned long event, void *_dev)
-{
- struct device *dev = _dev;
-
- if (event != BUS_NOTIFY_ADD_DEVICE)
- return NOTIFY_DONE;
-
- if (of_property_read_bool(dev->of_node, "dma-coherent"))
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+extern int swiotlb_late_init_with_default_size(size_t default_size);
- return NOTIFY_OK;
+static int __init atomic_pool_init(void)
+{
+ pgprot_t prot = __pgprot(PROT_NORMAL_NC);
+ unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+ struct page *page;
+ void *addr;
+ unsigned int pool_size_order = get_order(atomic_pool_size);
+
+ if (dev_get_cma_area(NULL))
+ page = dma_alloc_from_contiguous(NULL, nr_pages,
+ pool_size_order);
+ else
+ page = alloc_pages(GFP_DMA, pool_size_order);
+
+ if (page) {
+ int ret;
+ void *page_addr = page_address(page);
+
+ memset(page_addr, 0, atomic_pool_size);
+ __dma_flush_range(page_addr, page_addr + atomic_pool_size);
+
+ atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!atomic_pool)
+ goto free_page;
+
+ addr = dma_common_contiguous_remap(page, atomic_pool_size,
+ VM_USERMAP, prot, atomic_pool_init);
+
+ if (!addr)
+ goto destroy_genpool;
+
+ ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
+ page_to_phys(page),
+ atomic_pool_size, -1);
+ if (ret)
+ goto remove_mapping;
+
+ gen_pool_set_algo(atomic_pool,
+ gen_pool_first_fit_order_align,
+ (void *)PAGE_SHIFT);
+
+ pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
+ atomic_pool_size / 1024);
+ return 0;
+ }
+ goto out;
+
+remove_mapping:
+ dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+destroy_genpool:
+ gen_pool_destroy(atomic_pool);
+ atomic_pool = NULL;
+free_page:
+ if (!dma_release_from_contiguous(NULL, page, nr_pages))
+ __free_pages(page, pool_size_order);
+out:
+ pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
+ atomic_pool_size / 1024);
+ return -ENOMEM;
}
-static struct notifier_block platform_bus_nb = {
- .notifier_call = dma_bus_notifier,
-};
-
-static struct notifier_block amba_bus_nb = {
- .notifier_call = dma_bus_notifier,
-};
-
-extern int swiotlb_late_init_with_default_size(size_t default_size);
-
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- /*
- * These must be registered before of_platform_populate().
- */
- bus_register_notifier(&platform_bus_type, &platform_bus_nb);
- bus_register_notifier(&amba_bustype, &amba_bus_nb);
-
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
-arch_initcall(swiotlb_late_init);
+
+static int __init arm64_dma_init(void)
+{
+ int ret = 0;
+
+ ret |= swiotlb_late_init();
+ ret |= atomic_pool_init();
+
+ return ret;
+}
+arch_initcall(arm64_dma_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 0d64089d28b5..b6f14e8d2121 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -104,3 +104,19 @@ EXPORT_SYMBOL(flush_dcache_page);
*/
EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = pmd_mksplitting(*pmdp);
+
+ VM_BUG_ON(address & ~PMD_MASK);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+
+ /* dummy IPI to serialise against fast_gup */
+ kick_all_cpus_sync();
+}
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5b4526ee3a01..494297c698ca 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -32,6 +32,7 @@
#include <linux/of_fdt.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/efi.h>
#include <asm/fixmap.h>
#include <asm/sections.h>
@@ -254,7 +255,7 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+ set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
@@ -332,8 +333,14 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd)
+ if (!keep_initrd) {
+ if (start == initrd_start)
+ start = round_down(start, PAGE_SIZE);
+ if (end == initrd_end)
+ end = round_up(end, PAGE_SIZE);
+
free_reserved_area((void *)start, (void *)end, 0, "initrd");
+ }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index fa324bd5a5c4..4a07630a6616 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache);
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
-static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
-static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
static inline pud_t * __init early_ioremap_pud(unsigned long addr)
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8ed6cb1a900f..1d73662f00ff 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
-int valid_phys_addr_range(unsigned long addr, size_t size)
+int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c55567283cde..f4f8b500f74c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
*/
asm volatile(
" mrs %0, mair_el1\n"
- " bfi %0, %1, #%2, #8\n"
+ " bfi %0, %1, %2, #8\n"
" msr mair_el1, %0\n"
" isb\n"
: "=&r" (tmp)
@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys,
+ unsigned long end, phys_addr_t phys,
int map_io)
{
pud_t *pud;
@@ -297,11 +297,15 @@ static void __init map_mem(void)
* create_mapping requires puds, pmds and ptes to be allocated from
* memory addressable from the initial direct kernel mapping.
*
- * The initial direct kernel mapping, located at swapper_pg_dir,
- * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
- * aligned to 2MB as per Documentation/arm64/booting.txt).
+ * The initial direct kernel mapping, located at swapper_pg_dir, gives
+ * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
+ * PHYS_OFFSET (which must be aligned to 2MB as per
+ * Documentation/arm64/booting.txt).
*/
- limit = PHYS_OFFSET + PUD_SIZE;
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ limit = PHYS_OFFSET + PMD_SIZE;
+ else
+ limit = PHYS_OFFSET + PUD_SIZE;
memblock_set_current_limit(limit);
/* map all the memory banks */
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644
index 000000000000..bb0ea94c4ba1
--- /dev/null
+++ b/arch/arm64/mm/pageattr.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, cdata->clear_mask);
+ pte = set_pte_bit(pte, cdata->set_mask);
+
+ set_pte(ptep, pte);
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ unsigned long start = addr;
+ unsigned long size = PAGE_SIZE*numpages;
+ unsigned long end = start + size;
+ int ret;
+ struct page_change_data data;
+
+ if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+ start &= PAGE_MASK;
+ end = start + size;
+ WARN_ON_ONCE(1);
+ }
+
+ if (!is_module_address(start) || !is_module_address(end - 1))
+ return -EINVAL;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_RDONLY),
+ __pgprot(PTE_WRITE));
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_WRITE),
+ __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_PXN),
+ __pgprot(0));
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_x);
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 62c6101df260..6682b361d3ac 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,12 +30,14 @@
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+static struct kmem_cache *pgd_cache;
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (PGD_SIZE == PAGE_SIZE)
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- return kzalloc(PGD_SIZE, GFP_KERNEL);
+ return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
if (PGD_SIZE == PAGE_SIZE)
free_page((unsigned long)pgd);
else
- kfree(pgd);
+ kmem_cache_free(pgd_cache, pgd);
+}
+
+static int __init pgd_cache_init(void)
+{
+ /*
+ * Naturally aligned pgds required by the architecture.
+ */
+ if (PGD_SIZE != PAGE_SIZE)
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
+ return 0;
}
+core_initcall(pgd_cache_init);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 7736779c9809..4e778b13291b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -76,6 +76,21 @@ ENTRY(cpu_reset)
ret x0
ENDPROC(cpu_reset)
+ENTRY(cpu_soft_restart)
+ /* Save address of cpu_reset() and reset address */
+ mov x19, x0
+ mov x20, x1
+
+ /* Turn D-cache off */
+ bl cpu_cache_off
+
+ /* Push out all dirty data, and ensure cache is empty */
+ bl flush_cache_all
+
+ mov x0, x20
+ ret x19
+ENDPROC(cpu_soft_restart)
+
/*
* cpu_do_idle()
*
diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile
new file mode 100644
index 000000000000..da9763378284
--- /dev/null
+++ b/arch/arm64/net/Makefile
@@ -0,0 +1,4 @@
+#
+# ARM64 networking code
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
new file mode 100644
index 000000000000..de0a81a539a0
--- /dev/null
+++ b/arch/arm64/net/bpf_jit.h
@@ -0,0 +1,173 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <asm/insn.h>
+
+/* 5-bit Register Operand */
+#define A64_R(x) AARCH64_INSN_REG_##x
+#define A64_FP AARCH64_INSN_REG_FP
+#define A64_LR AARCH64_INSN_REG_LR
+#define A64_ZR AARCH64_INSN_REG_ZR
+#define A64_SP AARCH64_INSN_REG_SP
+
+#define A64_VARIANT(sf) \
+ ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
+
+/* Compare & branch (immediate) */
+#define A64_COMP_BRANCH(sf, Rt, offset, type) \
+ aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
+ AARCH64_INSN_BRANCH_COMP_##type)
+#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
+
+/* Conditional branch (immediate) */
+#define A64_COND_BRANCH(cond, offset) \
+ aarch64_insn_gen_cond_branch_imm(0, offset, cond)
+#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
+#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
+#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
+#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
+#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
+#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
+
+/* Unconditional branch (immediate) */
+#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
+ AARCH64_INSN_BRANCH_##type)
+#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
+#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
+
+/* Unconditional branch (register) */
+#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
+#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
+
+/* Load/store register (register offset) */
+#define A64_LS_REG(Rt, Rn, Rm, size, type) \
+ aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
+ AARCH64_INSN_SIZE_##size, \
+ AARCH64_INSN_LDST_##type##_REG_OFFSET)
+#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
+#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
+#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
+#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
+#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
+#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
+#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
+#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
+
+/* Load/store register pair */
+#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
+ aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
+ AARCH64_INSN_VARIANT_64BIT, \
+ AARCH64_INSN_LDST_##ls##_PAIR_##type)
+/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
+#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
+/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
+#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
+
+/* Add/subtract (immediate) */
+#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+ aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
+ A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP imm12 */
+#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
+#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
+/* Rd = Rn */
+#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
+
+/* Bitfield move */
+#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
+ aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
+ A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
+/* Signed, with sign replication to left and zeros to right */
+#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
+/* Unsigned, with zeros to left and right */
+#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
+
+/* Rd = Rn << shift */
+#define A64_LSL(sf, Rd, Rn, shift) ({ \
+ int sz = (sf) ? 64 : 32; \
+ A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
+})
+/* Rd = Rn >> shift */
+#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+/* Rd = Rn >> shift; signed */
+#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+
+/* Move wide (immediate) */
+#define A64_MOVEW(sf, Rd, imm16, shift, type) \
+ aarch64_insn_gen_movewide(Rd, imm16, shift, \
+ A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
+/* Rd = Zeros (for MOVZ);
+ * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
+ * Rd = ~Rd; (for MOVN); */
+#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
+#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
+#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
+
+/* Add/subtract (shifted register) */
+#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
+ aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
+ A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP Rm */
+#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
+#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
+#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
+/* Rd = -Rm */
+#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
+/* Rn - Rm; set condition flags */
+#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
+
+/* Data-processing (1 source) */
+#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
+/* Rd = BSWAPx(Rn) */
+#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
+#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
+#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
+
+/* Data-processing (2 source) */
+/* Rd = Rn OP Rm */
+#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
+#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
+#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
+#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
+#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
+
+/* Data-processing (3 source) */
+/* Rd = Ra + Rn * Rm */
+#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
+/* Rd = Rn * Rm */
+#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
+
+/* Logical (shifted register) */
+#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
+ aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
+ A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
+/* Rd = Rn OP Rm */
+#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
+#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
+#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
+#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
+/* Rn & Rm; set condition flags */
+#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..41f1e3e2ea24
--- /dev/null
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -0,0 +1,743 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "bpf_jit: " fmt
+
+#include <linux/filter.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+
+#include "bpf_jit.h"
+
+int bpf_jit_enable __read_mostly;
+
+#define TMP_REG_1 (MAX_BPF_REG + 0)
+#define TMP_REG_2 (MAX_BPF_REG + 1)
+
+/* Map BPF registers to A64 registers */
+static const int bpf2a64[] = {
+ /* return value from in-kernel function, and exit value from eBPF */
+ [BPF_REG_0] = A64_R(7),
+ /* arguments from eBPF program to in-kernel function */
+ [BPF_REG_1] = A64_R(0),
+ [BPF_REG_2] = A64_R(1),
+ [BPF_REG_3] = A64_R(2),
+ [BPF_REG_4] = A64_R(3),
+ [BPF_REG_5] = A64_R(4),
+ /* callee saved registers that in-kernel function will preserve */
+ [BPF_REG_6] = A64_R(19),
+ [BPF_REG_7] = A64_R(20),
+ [BPF_REG_8] = A64_R(21),
+ [BPF_REG_9] = A64_R(22),
+ /* read-only frame pointer to access stack */
+ [BPF_REG_FP] = A64_FP,
+ /* temporary register for internal BPF JIT */
+ [TMP_REG_1] = A64_R(23),
+ [TMP_REG_2] = A64_R(24),
+};
+
+struct jit_ctx {
+ const struct bpf_prog *prog;
+ int idx;
+ int tmp_used;
+ int body_offset;
+ int *offset;
+ u32 *image;
+};
+
+static inline void emit(const u32 insn, struct jit_ctx *ctx)
+{
+ if (ctx->image != NULL)
+ ctx->image[ctx->idx] = cpu_to_le32(insn);
+
+ ctx->idx++;
+}
+
+static inline void emit_a64_mov_i64(const int reg, const u64 val,
+ struct jit_ctx *ctx)
+{
+ u64 tmp = val;
+ int shift = 0;
+
+ emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+ tmp >>= 16;
+ shift += 16;
+ while (tmp) {
+ if (tmp & 0xffff)
+ emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+ tmp >>= 16;
+ shift += 16;
+ }
+}
+
+static inline void emit_a64_mov_i(const int is64, const int reg,
+ const s32 val, struct jit_ctx *ctx)
+{
+ u16 hi = val >> 16;
+ u16 lo = val & 0xffff;
+
+ if (hi & 0x8000) {
+ if (hi == 0xffff) {
+ emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
+ } else {
+ emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
+ emit(A64_MOVK(is64, reg, lo, 0), ctx);
+ }
+ } else {
+ emit(A64_MOVZ(is64, reg, lo, 0), ctx);
+ if (hi)
+ emit(A64_MOVK(is64, reg, hi, 16), ctx);
+ }
+}
+
+static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+ const struct jit_ctx *ctx)
+{
+ int to = ctx->offset[bpf_to + 1];
+ /* -1 to account for the Branch instruction */
+ int from = ctx->offset[bpf_from + 1] - 1;
+
+ return to - from;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+ u32 *ptr;
+ /* We are guaranteed to have aligned memory. */
+ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+ *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
+}
+
+static inline int epilogue_offset(const struct jit_ctx *ctx)
+{
+ int to = ctx->offset[ctx->prog->len - 1];
+ int from = ctx->idx - ctx->body_offset;
+
+ return to - from;
+}
+
+/* Stack must be multiples of 16B */
+#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ const u8 r6 = bpf2a64[BPF_REG_6];
+ const u8 r7 = bpf2a64[BPF_REG_7];
+ const u8 r8 = bpf2a64[BPF_REG_8];
+ const u8 r9 = bpf2a64[BPF_REG_9];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 ra = bpf2a64[BPF_REG_A];
+ const u8 rx = bpf2a64[BPF_REG_X];
+ const u8 tmp1 = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ int stack_size = MAX_BPF_STACK;
+
+ stack_size += 4; /* extra for skb_copy_bits buffer */
+ stack_size = STACK_ALIGN(stack_size);
+
+ /* Save callee-saved register */
+ emit(A64_PUSH(r6, r7, A64_SP), ctx);
+ emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ if (ctx->tmp_used)
+ emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
+
+ /* Set up BPF stack */
+ emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+ /* Set up frame pointer */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
+
+ /* Clear registers A and X */
+ emit_a64_mov_i64(ra, 0, ctx);
+ emit_a64_mov_i64(rx, 0, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ const u8 r0 = bpf2a64[BPF_REG_0];
+ const u8 r6 = bpf2a64[BPF_REG_6];
+ const u8 r7 = bpf2a64[BPF_REG_7];
+ const u8 r8 = bpf2a64[BPF_REG_8];
+ const u8 r9 = bpf2a64[BPF_REG_9];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 tmp1 = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ int stack_size = MAX_BPF_STACK;
+
+ stack_size += 4; /* extra for skb_copy_bits buffer */
+ stack_size = STACK_ALIGN(stack_size);
+
+ /* We're done with BPF stack */
+ emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+ /* Restore callee-saved register */
+ if (ctx->tmp_used)
+ emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
+ emit(A64_POP(r8, r9, A64_SP), ctx);
+ emit(A64_POP(r6, r7, A64_SP), ctx);
+
+ /* Restore frame pointer */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
+
+ /* Set return value */
+ emit(A64_MOV(1, A64_R(0), r0), ctx);
+
+ emit(A64_RET(A64_LR), ctx);
+}
+
+/* JITs an eBPF instruction.
+ * Returns:
+ * 0 - successfully JITed an 8-byte eBPF instruction.
+ * >0 - successfully JITed a 16-byte eBPF instruction.
+ * <0 - failed to JIT.
+ */
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+ const int i = insn - ctx->prog->insnsi;
+ const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+ u8 jmp_cond;
+ s32 jmp_offset;
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ emit(A64_MOV(is64, dst, src), ctx);
+ break;
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ emit(A64_ADD(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ emit(A64_SUB(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ emit(A64_AND(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ emit(A64_ORR(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ emit(A64_EOR(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ emit(A64_MUL(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ ctx->tmp_used = 1;
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MUL(is64, tmp, tmp, src), ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit(A64_LSLV(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ emit(A64_LSRV(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ emit(A64_ASRV(is64, dst, dst, src), ctx);
+ break;
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ emit(A64_NEG(is64, dst, dst), ctx);
+ break;
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (BPF_SRC(code) == BPF_FROM_BE)
+ break;
+#else /* !CONFIG_CPU_BIG_ENDIAN */
+ if (BPF_SRC(code) == BPF_FROM_LE)
+ break;
+#endif
+ switch (imm) {
+ case 16:
+ emit(A64_REV16(is64, dst, dst), ctx);
+ break;
+ case 32:
+ emit(A64_REV32(is64, dst, dst), ctx);
+ break;
+ case 64:
+ emit(A64_REV64(dst, dst), ctx);
+ break;
+ }
+ break;
+ /* dst = imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ emit_a64_mov_i(is64, dst, imm, ctx);
+ break;
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ADD(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_AND(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ORR(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_EOR(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_MUL(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_UDIV(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp2, imm, ctx);
+ emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
+ emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ emit(A64_LSL(is64, dst, dst, imm), ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ emit(A64_LSR(is64, dst, dst, imm), ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit(A64_ASR(is64, dst, dst, imm), ctx);
+ break;
+
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ break;
+ /* IF (dst COND src) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ emit(A64_CMP(1, dst, src), ctx);
+emit_cond_jmp:
+ jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ check_imm19(jmp_offset);
+ switch (BPF_OP(code)) {
+ case BPF_JEQ:
+ jmp_cond = A64_COND_EQ;
+ break;
+ case BPF_JGT:
+ jmp_cond = A64_COND_HI;
+ break;
+ case BPF_JGE:
+ jmp_cond = A64_COND_CS;
+ break;
+ case BPF_JNE:
+ jmp_cond = A64_COND_NE;
+ break;
+ case BPF_JSGT:
+ jmp_cond = A64_COND_GT;
+ break;
+ case BPF_JSGE:
+ jmp_cond = A64_COND_GE;
+ break;
+ default:
+ return -EFAULT;
+ }
+ emit(A64_B_(jmp_cond, jmp_offset), ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ emit(A64_TST(1, dst, src), ctx);
+ goto emit_cond_jmp;
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, imm, ctx);
+ emit(A64_CMP(1, dst, tmp), ctx);
+ goto emit_cond_jmp;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, imm, ctx);
+ emit(A64_TST(1, dst, tmp), ctx);
+ goto emit_cond_jmp;
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0];
+ const u64 func = (u64)__bpf_call_base + imm;
+
+ ctx->tmp_used = 1;
+ emit_a64_mov_i64(tmp, func, ctx);
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+ emit(A64_BLR(tmp), ctx);
+ emit(A64_MOV(1, r0, A64_R(0)), ctx);
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+ break;
+ }
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+ jmp_offset = epilogue_offset(ctx);
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ break;
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ const struct bpf_insn insn1 = insn[1];
+ u64 imm64;
+
+ if (insn1.code != 0 || insn1.src_reg != 0 ||
+ insn1.dst_reg != 0 || insn1.off != 0) {
+ /* Note: verifier in BPF core must catch invalid
+ * instructions.
+ */
+ pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
+ return -EINVAL;
+ }
+
+ imm64 = (u64)insn1.imm << 32 | imm;
+ emit_a64_mov_i64(dst, imm64, ctx);
+
+ return 1;
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, off, ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_LDR32(dst, src, tmp), ctx);
+ break;
+ case BPF_H:
+ emit(A64_LDRH(dst, src, tmp), ctx);
+ break;
+ case BPF_B:
+ emit(A64_LDRB(dst, src, tmp), ctx);
+ break;
+ case BPF_DW:
+ emit(A64_LDR64(dst, src, tmp), ctx);
+ break;
+ }
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_DW:
+ goto notyet;
+
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, off, ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_STR32(src, dst, tmp), ctx);
+ break;
+ case BPF_H:
+ emit(A64_STRH(src, dst, tmp), ctx);
+ break;
+ case BPF_B:
+ emit(A64_STRB(src, dst, tmp), ctx);
+ break;
+ case BPF_DW:
+ emit(A64_STR64(src, dst, tmp), ctx);
+ break;
+ }
+ break;
+ /* STX XADD: lock *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* STX XADD: lock *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ goto notyet;
+
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
+ case BPF_LD | BPF_ABS | BPF_W:
+ case BPF_LD | BPF_ABS | BPF_H:
+ case BPF_LD | BPF_ABS | BPF_B:
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
+ case BPF_LD | BPF_IND | BPF_W:
+ case BPF_LD | BPF_IND | BPF_H:
+ case BPF_LD | BPF_IND | BPF_B:
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
+ const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
+ const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
+ const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
+ const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
+ const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
+ int size;
+
+ emit(A64_MOV(1, r1, r6), ctx);
+ emit_a64_mov_i(0, r2, imm, ctx);
+ if (BPF_MODE(code) == BPF_IND)
+ emit(A64_ADD(0, r2, r2, src), ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ size = 4;
+ break;
+ case BPF_H:
+ size = 2;
+ break;
+ case BPF_B:
+ size = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ emit_a64_mov_i64(r3, size, ctx);
+ emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+ emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+ emit(A64_BLR(r5), ctx);
+ emit(A64_MOV(1, r0, A64_R(0)), ctx);
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+
+ jmp_offset = epilogue_offset(ctx);
+ check_imm19(jmp_offset);
+ emit(A64_CBZ(1, r0, jmp_offset), ctx);
+ emit(A64_MOV(1, r5, r0), ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_LDR32(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ emit(A64_REV32(0, r0, r0), ctx);
+#endif
+ break;
+ case BPF_H:
+ emit(A64_LDRH(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ emit(A64_REV16(0, r0, r0), ctx);
+#endif
+ break;
+ case BPF_B:
+ emit(A64_LDRB(r0, r5, A64_ZR), ctx);
+ break;
+ }
+ break;
+ }
+notyet:
+ pr_info_once("*** NOT YET: opcode %02x ***\n", code);
+ return -EFAULT;
+
+ default:
+ pr_err_once("unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
+
+ ret = build_insn(insn, ctx);
+ if (ret > 0) {
+ i++;
+ continue;
+ }
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+void bpf_jit_compile(struct bpf_prog *prog)
+{
+ /* Nothing to do here. We support Internal BPF. */
+}
+
+void bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct bpf_binary_header *header;
+ struct jit_ctx ctx;
+ int image_size;
+ u8 *image_ptr;
+
+ if (!bpf_jit_enable)
+ return;
+
+ if (!prog || !prog->len)
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog = prog;
+
+ ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (ctx.offset == NULL)
+ return;
+
+ /* 1. Initial fake pass to compute ctx->idx. */
+
+ /* Fake pass to fill in ctx->offset. */
+ if (build_body(&ctx))
+ goto out;
+
+ build_prologue(&ctx);
+ build_epilogue(&ctx);
+
+ /* Now we know the actual image size. */
+ image_size = sizeof(u32) * ctx.idx;
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL)
+ goto out;
+
+ /* 2. Now, the actual pass. */
+
+ ctx.image = (u32 *)image_ptr;
+ ctx.idx = 0;
+
+ build_prologue(&ctx);
+
+ ctx.body_offset = ctx.idx;
+ if (build_body(&ctx)) {
+ bpf_jit_binary_free(header);
+ goto out;
+ }
+
+ build_epilogue(&ctx);
+
+ /* And we're done. */
+ if (bpf_jit_enable > 1)
+ bpf_jit_dump(prog->len, image_size, 2, ctx.image);
+
+ bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+
+ set_memory_ro((unsigned long)header, header->pages);
+ prog->bpf_func = (void *)ctx.image;
+ prog->jited = true;
+out:
+ kfree(ctx.offset);
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *header = (void *)addr;
+
+ if (!prog->jited)
+ goto free_filter;
+
+ set_memory_rw(addr, header->pages);
+ bpf_jit_binary_free(header);
+
+free_filter:
+ bpf_prog_unlock_free(prog);
+}