aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig53
-rw-r--r--arch/riscv/Kconfig.erratas2
-rw-r--r--arch/riscv/Kconfig.socs4
-rw-r--r--arch/riscv/Makefile3
-rw-r--r--arch/riscv/boot/dts/canaan/Makefile10
-rw-r--r--arch/riscv/boot/dts/canaan/canaan_kd233.dts8
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi85
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts4
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts4
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts8
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts4
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi58
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi24
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi24
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts2
-rw-r--r--arch/riscv/boot/dts/starfive/jh7100.dtsi16
-rw-r--r--arch/riscv/configs/32-bit.config2
-rw-r--r--arch/riscv/configs/defconfig65
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig1
-rw-r--r--arch/riscv/errata/sifive/errata.c3
-rw-r--r--arch/riscv/include/asm/asm.h15
-rw-r--r--arch/riscv/include/asm/barrier.h2
-rw-r--r--arch/riscv/include/asm/csr.h16
-rw-r--r--arch/riscv/include/asm/efi.h2
-rw-r--r--arch/riscv/include/asm/errata_list.h22
-rw-r--r--arch/riscv/include/asm/kvm_host.h24
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_fp.h8
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_insn.h48
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_timer.h2
-rw-r--r--arch/riscv/include/asm/page.h1
-rw-r--r--arch/riscv/include/asm/pci.h32
-rw-r--r--arch/riscv/include/asm/pgtable-64.h12
-rw-r--r--arch/riscv/include/asm/pgtable.h26
-rw-r--r--arch/riscv/include/asm/processor.h4
-rw-r--r--arch/riscv/include/asm/smp.h4
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h1
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/cpu.c26
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c4
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c4
-rw-r--r--arch/riscv/kernel/cpufeature.c6
-rw-r--r--arch/riscv/kernel/elf_kexec.c2
-rw-r--r--arch/riscv/kernel/entry.S12
-rw-r--r--arch/riscv/kernel/jump_label.c12
-rw-r--r--arch/riscv/kernel/smp.c4
-rw-r--r--arch/riscv/kernel/smpboot.c9
-rw-r--r--arch/riscv/kernel/sys_riscv.c5
-rw-r--r--arch/riscv/kernel/traps.c4
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/mmu.c37
-rw-r--r--arch/riscv/kvm/vcpu.c205
-rw-r--r--arch/riscv/kvm/vcpu_exit.c496
-rw-r--r--arch/riscv/kvm/vcpu_fp.c27
-rw-r--r--arch/riscv/kvm/vcpu_insn.c752
-rw-r--r--arch/riscv/kvm/vcpu_timer.c4
-rw-r--r--arch/riscv/kvm/vm.c4
-rw-r--r--arch/riscv/kvm/vmid.c2
-rw-r--r--arch/riscv/lib/uaccess.S4
-rw-r--r--arch/riscv/mm/dma-noncoherent.c8
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c20
-rw-r--r--arch/riscv/net/bpf_jit.h1
-rw-r--r--arch/riscv/net/bpf_jit_core.c8
66 files changed, 1469 insertions, 796 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 897ae28abf81..3d49317f5019 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -38,7 +38,7 @@ config RISCV
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
- select ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -86,7 +86,7 @@ config RISCV
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
- select HAVE_CONTEXT_TRACKING
+ select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_EBPF_JIT if MMU
@@ -232,6 +232,21 @@ source "arch/riscv/Kconfig.erratas"
menu "Platform type"
+config NONPORTABLE
+ bool "Allow configurations that result in non-portable kernels"
+ help
+ RISC-V kernel binaries are compatible between all known systems
+ whenever possible, but there are some use cases that can only be
+ satisfied by configurations that result in kernel binaries that are
+ not portable between systems.
+
+ Selecting N does not guarantee kernels will be portable to all known
+ systems. Selecting any of the options guarded by NONPORTABLE will
+ result in kernel binaries that are unlikely to be portable between
+ systems.
+
+ If unsure, say N.
+
choice
prompt "Base ISA"
default ARCH_RV64I
@@ -241,6 +256,7 @@ choice
config ARCH_RV32I
bool "RV32I"
+ depends on NONPORTABLE
select 32BIT
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
@@ -361,11 +377,11 @@ config RISCV_ISA_C
bool "Emit compressed instructions when building Linux"
default y
help
- Adds "C" to the ISA subsets that the toolchain is allowed to emit
- when building Linux, which results in compressed instructions in the
- Linux binary.
+ Adds "C" to the ISA subsets that the toolchain is allowed to emit
+ when building Linux, which results in compressed instructions in the
+ Linux binary.
- If you don't know what to do here, say Y.
+ If you don't know what to do here, say Y.
config RISCV_ISA_SVPBMT
bool "SVPBMT extension support"
@@ -393,7 +409,7 @@ config CC_HAS_ZICBOM
config RISCV_ISA_ZICBOM
bool "Zicbom extension support for non-coherent DMA operation"
depends on CC_HAS_ZICBOM
- depends on !XIP_KERNEL
+ depends on !XIP_KERNEL && MMU
select RISCV_DMA_NONCOHERENT
select RISCV_ALTERNATIVE
default y
@@ -416,7 +432,7 @@ config FPU
If you don't know what to do here, say Y.
-endmenu
+endmenu # "Platform type"
menu "Kernel features"
@@ -505,7 +521,7 @@ config COMPAT
If you want to execute 32-bit userspace applications, say Y.
-endmenu
+endmenu # "Kernel features"
menu "Boot options"
@@ -541,7 +557,6 @@ config CMDLINE_EXTEND
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
-
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
@@ -584,6 +599,7 @@ config STACKPROTECTOR_PER_TASK
config PHYS_RAM_BASE_FIXED
bool "Explicitly specified physical RAM address"
+ depends on NONPORTABLE
default n
config PHYS_RAM_BASE
@@ -597,7 +613,7 @@ config PHYS_RAM_BASE
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
- depends on MMU && SPARSEMEM
+ depends on MMU && SPARSEMEM && NONPORTABLE
# This prevents XIP from being enabled by all{yes,mod}config, which
# fail to build since XIP doesn't support large kernels.
depends on !COMPILE_TEST
@@ -633,23 +649,30 @@ config XIP_PHYS_ADDR
be linked for and stored to. This address is dependent on your
own flash usage.
-endmenu
+endmenu # "Boot options"
config BUILTIN_DTB
bool
- depends on OF
+ depends on OF && NONPORTABLE
default y if XIP_KERNEL
+config PORTABLE
+ bool
+ default !NONPORTABLE
+ select EFI
+ select OF
+ select MMU
+
menu "Power management options"
source "kernel/power/Kconfig"
-endmenu
+endmenu # "Power management options"
menu "CPU Power Management"
source "drivers/cpuidle/Kconfig"
-endmenu
+endmenu # "CPU Power Management"
source "arch/riscv/kvm/Kconfig"
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
index 3223e533fd87..6850e9389930 100644
--- a/arch/riscv/Kconfig.erratas
+++ b/arch/riscv/Kconfig.erratas
@@ -66,4 +66,4 @@ config ERRATA_THEAD_CMO
If you don't know what to do here, say "Y".
-endmenu
+endmenu # "CPU errata selection"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 85670dc9fe95..69774bb362d6 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -78,6 +78,6 @@ config SOC_CANAAN_K210_DTB_SOURCE
for the DTS file that will be used to produce the DTB linked into the
kernel.
-endif
+endif # SOC_CANAAN
-endmenu
+endmenu # "SoC selection"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index fbaabc98b3d2..42d7ff8730aa 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -77,6 +77,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the
@@ -114,7 +115,7 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
- $(build)=arch/riscv/kernel/compat_vdso $@)
+ $(build)=arch/riscv/kernel/compat_vdso compat_$@)
ifeq ($(KBUILD_EXTMOD),)
ifeq ($(CONFIG_MMU),y)
diff --git a/arch/riscv/boot/dts/canaan/Makefile b/arch/riscv/boot/dts/canaan/Makefile
index c61b08ac8554..befe4eb7527b 100644
--- a/arch/riscv/boot/dts/canaan/Makefile
+++ b/arch/riscv/boot/dts/canaan/Makefile
@@ -1,3 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .dtb, $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))
-obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
+dtb-$(CONFIG_SOC_CANAAN) += canaan_kd233.dtb
+dtb-$(CONFIG_SOC_CANAAN) += k210_generic.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_bit.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_dock.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_go.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maixduino.dtb
+
+obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .dtb.o, $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))
diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
index 039b92abf046..8df4cf3656f2 100644
--- a/arch/riscv/boot/dts/canaan/canaan_kd233.dts
+++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
@@ -35,7 +35,7 @@
gpio-keys {
compatible = "gpio-keys";
- key0 {
+ key {
label = "KEY0";
linux,code = <BTN_0>;
gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
@@ -127,10 +127,10 @@
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
panel@0 {
- compatible = "ilitek,ili9341";
+ compatible = "canaan,kd233-tft", "ilitek,ili9341";
reg = <0>;
dc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
- spi-max-frequency = <15000000>;
+ spi-max-frequency = <10000000>;
status = "disabled";
};
};
@@ -142,7 +142,7 @@
cs-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
index 44d338514761..07e2e2649604 100644
--- a/arch/riscv/boot/dts/canaan/k210.dtsi
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -65,15 +65,29 @@
compatible = "riscv,cpu-intc";
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
};
sram: memory@80000000 {
device_type = "memory";
+ reg = <0x80000000 0x400000>, /* sram0 4 MiB */
+ <0x80400000 0x200000>, /* sram1 2 MiB */
+ <0x80600000 0x200000>; /* aisram 2 MiB */
+ };
+
+ sram_controller: memory-controller {
compatible = "canaan,k210-sram";
- reg = <0x80000000 0x400000>,
- <0x80400000 0x200000>,
- <0x80600000 0x200000>;
- reg-names = "sram0", "sram1", "aisram";
clocks = <&sysclk K210_CLK_SRAM0>,
<&sysclk K210_CLK_SRAM1>,
<&sysclk K210_CLK_AI>;
@@ -161,7 +175,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x50200000 0x50200000 0x200000>;
clocks = <&sysclk K210_CLK_APB0>;
gpio1: gpio@50200000 {
@@ -249,7 +263,7 @@
};
i2s0: i2s@50250000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50250000 0x200>;
interrupts = <5>;
clocks = <&sysclk K210_CLK_I2S0>;
@@ -258,7 +272,7 @@
};
i2s1: i2s@50260000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50260000 0x200>;
interrupts = <6>;
clocks = <&sysclk K210_CLK_I2S1>;
@@ -267,7 +281,7 @@
};
i2s2: i2s@50270000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50270000 0x200>;
interrupts = <7>;
clocks = <&sysclk K210_CLK_I2S2>;
@@ -317,28 +331,58 @@
timer0: timer@502d0000 {
compatible = "snps,dw-apb-timer";
- reg = <0x502D0000 0x100>;
- interrupts = <14>, <15>;
+ reg = <0x502D0000 0x14>;
+ interrupts = <14>;
clocks = <&sysclk K210_CLK_TIMER0>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
resets = <&sysrst K210_RST_TIMER0>;
};
- timer1: timer@502e0000 {
+ timer1: timer@502d0014 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502D0014 0x14>;
+ interrupts = <15>;
+ clocks = <&sysclk K210_CLK_TIMER0>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER0>;
+ };
+
+ timer2: timer@502e0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502E0000 0x14>;
+ interrupts = <16>;
+ clocks = <&sysclk K210_CLK_TIMER1>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER1>;
+ };
+
+ timer3: timer@502e0014 {
compatible = "snps,dw-apb-timer";
- reg = <0x502E0000 0x100>;
- interrupts = <16>, <17>;
+ reg = <0x502E0014 0x114>;
+ interrupts = <17>;
clocks = <&sysclk K210_CLK_TIMER1>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
resets = <&sysrst K210_RST_TIMER1>;
};
- timer2: timer@502f0000 {
+ timer4: timer@502f0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502F0000 0x14>;
+ interrupts = <18>;
+ clocks = <&sysclk K210_CLK_TIMER2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER2>;
+ };
+
+ timer5: timer@502f0014 {
compatible = "snps,dw-apb-timer";
- reg = <0x502F0000 0x100>;
- interrupts = <18>, <19>;
+ reg = <0x502F0014 0x14>;
+ interrupts = <19>;
clocks = <&sysclk K210_CLK_TIMER2>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
@@ -350,7 +394,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x50400000 0x50400000 0x40100>;
clocks = <&sysclk K210_CLK_APB1>;
wdt0: watchdog@50400000 {
@@ -405,7 +449,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x52000000 0x52000000 0x2000200>;
clocks = <&sysclk K210_CLK_APB2>;
spi0: spi@52000000 {
@@ -419,7 +463,6 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI0>;
reset-names = "spi";
- spi-max-frequency = <25000000>;
num-cs = <4>;
reg-io-width = <4>;
};
@@ -435,7 +478,6 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI1>;
reset-names = "spi";
- spi-max-frequency = <25000000>;
num-cs = <4>;
reg-io-width = <4>;
};
@@ -451,8 +493,7 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI3>;
reset-names = "spi";
- /* Could possibly go up to 200 MHz */
- spi-max-frequency = <100000000>;
+
num-cs = <4>;
reg-io-width = <4>;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index b9e30df127fe..6d25bf07481a 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -47,7 +47,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
@@ -189,7 +189,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index 8d23401b0bbb..f4f4d8d5e8b8 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -52,7 +52,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
@@ -191,7 +191,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index 24fd83b43d9d..0d86df47e1ed 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -46,19 +46,19 @@
gpio-keys {
compatible = "gpio-keys";
- up {
+ key-up {
label = "UP";
linux,code = <BTN_1>;
gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
};
- press {
+ key-press {
label = "PRESS";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
};
- down {
+ key-down {
label = "DOWN";
linux,code = <BTN_2>;
gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
@@ -199,7 +199,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 25341f38292a..5c05c498e2b8 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -23,7 +23,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
@@ -164,7 +164,7 @@
cs-gpios = <&gpio1_0 2 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index 8c3259134194..499c2e63ad35 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -50,6 +50,7 @@
riscv,isa = "rv64imafdc";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
+ next-level-cache = <&cctrllr>;
status = "okay";
cpu1_intc: interrupt-controller {
@@ -77,6 +78,7 @@
riscv,isa = "rv64imafdc";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
+ next-level-cache = <&cctrllr>;
status = "okay";
cpu2_intc: interrupt-controller {
@@ -104,6 +106,7 @@
riscv,isa = "rv64imafdc";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
+ next-level-cache = <&cctrllr>;
status = "okay";
cpu3_intc: interrupt-controller {
@@ -131,6 +134,7 @@
riscv,isa = "rv64imafdc";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
+ next-level-cache = <&cctrllr>;
status = "okay";
cpu4_intc: interrupt-controller {
#interrupt-cells = <1>;
@@ -138,6 +142,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
refclk: mssrefclk {
@@ -192,6 +220,15 @@
riscv,ndev = <186>;
};
+ pdma: dma-controller@3000000 {
+ compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic>;
+ interrupts = <5 6>, <7 8>, <9 10>, <11 12>;
+ dma-channels = <4>;
+ #dma-cells = <1>;
+ };
+
clkcfg: clkcfg@20002000 {
compatible = "microchip,mpfs-clkcfg";
reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>;
@@ -278,7 +315,6 @@
interrupt-parent = <&plic>;
interrupts = <54>;
clocks = <&clkcfg CLK_SPI0>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -290,7 +326,6 @@
interrupt-parent = <&plic>;
interrupts = <55>;
clocks = <&clkcfg CLK_SPI1>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -302,7 +337,6 @@
interrupt-parent = <&plic>;
interrupts = <85>;
clocks = <&clkcfg CLK_QSPI>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -330,6 +364,24 @@
status = "disabled";
};
+ can0: can@2010c000 {
+ compatible = "microchip,mpfs-can";
+ reg = <0x0 0x2010c000 0x0 0x1000>;
+ clocks = <&clkcfg CLK_CAN0>;
+ interrupt-parent = <&plic>;
+ interrupts = <56>;
+ status = "disabled";
+ };
+
+ can1: can@2010d000 {
+ compatible = "microchip,mpfs-can";
+ reg = <0x0 0x2010d000 0x0 0x1000>;
+ clocks = <&clkcfg CLK_CAN1>;
+ interrupt-parent = <&plic>;
+ interrupts = <57>;
+ status = "disabled";
+ };
+
mac0: ethernet@20110000 {
compatible = "cdns,macb";
reg = <0x0 0x20110000 0x0 0x2000>;
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index e3172d0ffac4..24bba83bec77 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -133,6 +133,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
soc {
#address-cells = <2>;
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 7b77c13496d8..43bed6c0a84f 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -134,6 +134,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
soc {
#address-cells = <2>;
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index c4ed9efdff03..1f386b07a832 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -90,7 +90,7 @@
compatible = "dlg,da9063-rtc";
};
- wdt {
+ watchdog {
compatible = "dlg,da9063-watchdog";
};
diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi
index 69f22f9aad9d..c617a61e26e2 100644
--- a/arch/riscv/boot/dts/starfive/jh7100.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi
@@ -17,7 +17,7 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ U74_0: cpu@0 {
compatible = "sifive,u74-mc", "riscv";
reg = <0>;
d-cache-block-size = <64>;
@@ -42,7 +42,7 @@
};
};
- cpu@1 {
+ U74_1: cpu@1 {
compatible = "sifive,u74-mc", "riscv";
reg = <1>;
d-cache-block-size = <64>;
@@ -66,6 +66,18 @@
#interrupt-cells = <1>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&U74_0>;
+ };
+
+ core1 {
+ cpu = <&U74_1>;
+ };
+ };
+ };
};
osc_sys: osc_sys {
diff --git a/arch/riscv/configs/32-bit.config b/arch/riscv/configs/32-bit.config
index 43f41323b67e..f6af0f708df4 100644
--- a/arch/riscv/configs/32-bit.config
+++ b/arch/riscv/configs/32-bit.config
@@ -1,2 +1,4 @@
CONFIG_ARCH_RV32I=y
CONFIG_32BIT=y
+# CONFIG_PORTABLE is not set
+CONFIG_NONPORTABLE=y
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 0cc17db8aaba..aed332a9d4ea 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -6,8 +6,17 @@ CONFIG_BPF_SYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
@@ -18,6 +27,7 @@ CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_STARFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
@@ -28,9 +38,11 @@ CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_BLK_DEV_THROTTLING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -38,7 +50,43 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=m
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_LOG_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=m
CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
@@ -57,7 +105,15 @@ CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=m
+CONFIG_VETH=m
CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
@@ -105,7 +161,11 @@ CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
+CONFIG_OVERLAY_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
@@ -119,6 +179,10 @@ CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
@@ -137,7 +201,6 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_RWSEMS=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 2438fa39f8ae..96fe8def644c 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -28,6 +28,7 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 9a133e63ae5b..379740654373 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -20,6 +20,7 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index 5269fbb6b4fc..1a56eda5ce46 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -25,6 +25,7 @@ CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_VIRT=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
CONFIG_CMDLINE_FORCE=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 6cd9d84d3e13..38760e4296cf 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -18,6 +18,7 @@ CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
+CONFIG_NONPORTABLE=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
index 672f02b21ce0..1031038423e7 100644
--- a/arch/riscv/errata/sifive/errata.c
+++ b/arch/riscv/errata/sifive/errata.c
@@ -111,6 +111,7 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
cpu_apply_errata |= tmp;
}
}
- if (cpu_apply_errata != cpu_req_errata)
+ if (stage != RISCV_ALTERNATIVES_MODULE &&
+ cpu_apply_errata != cpu_req_errata)
warn_miss_errata(cpu_req_errata - cpu_apply_errata);
}
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 618d7c5af1a2..1b471ff73178 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,4 +67,19 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+/*
+ * NOP sequence
+ */
+.macro nops, num
+ .rept \num
+ nop
+ .endr
+.endm
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index d0e24aaa2aa0..110752594228 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -13,6 +13,8 @@
#ifndef __ASSEMBLY__
#define nop() __asm__ __volatile__ ("nop")
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) __asm__ __volatile__ (__nops(n))
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 6d85655e7edf..17516afc389a 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -156,6 +156,18 @@
(_AC(1, UL) << IRQ_S_TIMER) | \
(_AC(1, UL) << IRQ_S_EXT))
+/* xENVCFG flags */
+#define ENVCFG_STCE (_AC(1, ULL) << 63)
+#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
+#define ENVCFG_CBZE (_AC(1, UL) << 7)
+#define ENVCFG_CBCFE (_AC(1, UL) << 6)
+#define ENVCFG_CBIE_SHIFT 4
+#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
+#define ENVCFG_CBIE_ILL _AC(0x0, UL)
+#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
+#define ENVCFG_CBIE_INV _AC(0x3, UL)
+#define ENVCFG_FIOM _AC(0x1, UL)
+
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
@@ -252,7 +264,9 @@
#define CSR_HTIMEDELTA 0x605
#define CSR_HCOUNTEREN 0x606
#define CSR_HGEIE 0x607
+#define CSR_HENVCFG 0x60a
#define CSR_HTIMEDELTAH 0x615
+#define CSR_HENVCFGH 0x61a
#define CSR_HTVAL 0x643
#define CSR_HIP 0x644
#define CSR_HVIP 0x645
@@ -264,6 +278,8 @@
#define CSR_MISA 0x301
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
+#define CSR_MENVCFG 0x30a
+#define CSR_MENVCFGH 0x31a
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index cc4f6787f937..f74879a8f1ea 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -23,8 +23,6 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
-#define arch_efi_call_virt(p, f, args...) p->f(args)
-
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
/* Load initrd anywhere in system RAM */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 0f66e368e351..19a771085781 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -70,27 +70,21 @@ asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
*/
#define ALT_THEAD_PMA(_val) \
asm volatile(ALTERNATIVE( \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop", \
- "li t3, %2\n\t" \
- "slli t3, t3, %4\n\t" \
+ __nops(7), \
+ "li t3, %1\n\t" \
+ "slli t3, t3, %3\n\t" \
"and t3, %0, t3\n\t" \
"bne t3, zero, 2f\n\t" \
- "li t3, %3\n\t" \
- "slli t3, t3, %4\n\t" \
+ "li t3, %2\n\t" \
+ "slli t3, t3, %3\n\t" \
"or %0, %0, t3\n\t" \
"2:", THEAD_VENDOR_ID, \
ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
: "+r"(_val) \
- : "0"(_val), \
- "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
"I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT))
+ "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "t3")
#else
#define ALT_THEAD_PMA(_val)
#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 319c8aeb42af..60c517e4d576 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -14,7 +14,9 @@
#include <linux/kvm_types.h>
#include <linux/spinlock.h>
#include <asm/csr.h>
+#include <asm/hwcap.h>
#include <asm/kvm_vcpu_fp.h>
+#include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_timer.h>
#define KVM_MAX_VCPUS 1024
@@ -63,6 +65,8 @@ struct kvm_vcpu_stat {
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
+ u64 csr_exit_user;
+ u64 csr_exit_kernel;
u64 exits;
};
@@ -90,14 +94,6 @@ struct kvm_arch {
struct kvm_guest_timer timer;
};
-struct kvm_mmio_decode {
- unsigned long insn;
- int insn_len;
- int len;
- int shift;
- int return_handled;
-};
-
struct kvm_sbi_context {
int return_handled;
};
@@ -170,7 +166,7 @@ struct kvm_vcpu_arch {
int last_exit_cpu;
/* ISA feature bits (similar to MISA) */
- unsigned long isa;
+ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
unsigned long host_sscratch;
@@ -216,6 +212,9 @@ struct kvm_vcpu_arch {
/* MMIO instruction details */
struct kvm_mmio_decode mmio_decode;
+ /* CSR instruction details */
+ struct kvm_csr_decode csr_decode;
+
/* SBI context */
struct kvm_sbi_context sbi_context;
@@ -285,6 +284,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask);
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+ phys_addr_t hpa, unsigned long size,
+ bool writable, bool in_atomic);
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
+ unsigned long size);
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
@@ -303,14 +307,12 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
void __kvm_riscv_unpriv_trap(void);
-void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
bool read_insn,
unsigned long guest_addr,
struct kvm_cpu_trap *trap);
void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap);
-int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap);
diff --git a/arch/riscv/include/asm/kvm_vcpu_fp.h b/arch/riscv/include/asm/kvm_vcpu_fp.h
index 4da9b8e0f050..b5540147409f 100644
--- a/arch/riscv/include/asm/kvm_vcpu_fp.h
+++ b/arch/riscv/include/asm/kvm_vcpu_fp.h
@@ -22,9 +22,9 @@ void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
- unsigned long isa);
+ const unsigned long *isa);
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
- unsigned long isa);
+ const unsigned long *isa);
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
#else
@@ -32,12 +32,12 @@ static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
}
static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
- unsigned long isa)
+ const unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_guest_fp_restore(
struct kvm_cpu_context *cntx,
- unsigned long isa)
+ const unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
diff --git a/arch/riscv/include/asm/kvm_vcpu_insn.h b/arch/riscv/include/asm/kvm_vcpu_insn.h
new file mode 100644
index 000000000000..350011c83581
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_insn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef __KVM_VCPU_RISCV_INSN_H
+#define __KVM_VCPU_RISCV_INSN_H
+
+struct kvm_vcpu;
+struct kvm_run;
+struct kvm_cpu_trap;
+
+struct kvm_mmio_decode {
+ unsigned long insn;
+ int insn_len;
+ int len;
+ int shift;
+ int return_handled;
+};
+
+struct kvm_csr_decode {
+ unsigned long insn;
+ int return_handled;
+};
+
+/* Return values used by function emulating a particular instruction */
+enum kvm_insn_return {
+ KVM_INSN_EXIT_TO_USER_SPACE = 0,
+ KVM_INSN_CONTINUE_NEXT_SEPC,
+ KVM_INSN_CONTINUE_SAME_SEPC,
+ KVM_INSN_ILLEGAL_TRAP,
+ KVM_INSN_VIRTUAL_TRAP
+};
+
+void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap);
+
+int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h
index 375281eb49e0..50138e2eb91b 100644
--- a/arch/riscv/include/asm/kvm_vcpu_timer.h
+++ b/arch/riscv/include/asm/kvm_vcpu_timer.h
@@ -39,6 +39,6 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
-int kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_guest_timer_init(struct kvm *kvm);
#endif
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 1526e410e802..ac70b0fd9a9a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -167,7 +167,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
-#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
index 7fd52a30e605..cc2a184cfc2e 100644
--- a/arch/riscv/include/asm/pci.h
+++ b/arch/riscv/include/asm/pci.h
@@ -12,31 +12,10 @@
#include <asm/io.h>
-#define PCIBIOS_MIN_IO 0
-#define PCIBIOS_MIN_MEM 0
-
-/* RISC-V shim does not initialize PCI bus */
-#define pcibios_assign_all_busses() 1
-
-#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
-
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on risc-v */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- /* always show the domain in /proc */
- return 1;
-}
-
-#ifdef CONFIG_NUMA
+#define PCIBIOS_MIN_IO 4
+#define PCIBIOS_MIN_MEM 16
+#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
static inline int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
@@ -46,8 +25,9 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
#endif
-#endif /* CONFIG_NUMA */
+#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 5c2aba5efbd0..dc42375c2357 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -175,7 +175,7 @@ static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
static inline unsigned long _pud_pfn(pud_t pud)
{
- return pud_val(pud) >> _PAGE_PFN_SHIFT;
+ return __page_val_to_pfn(pud_val(pud));
}
static inline pmd_t *pud_pgtable(pud_t pud)
@@ -278,13 +278,13 @@ static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
static inline unsigned long _p4d_pfn(p4d_t p4d)
{
- return p4d_val(p4d) >> _PAGE_PFN_SHIFT;
+ return __page_val_to_pfn(p4d_val(p4d));
}
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
if (pgtable_l4_enabled)
- return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+ return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
}
@@ -292,7 +292,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
static inline struct page *p4d_page(p4d_t p4d)
{
- return pfn_to_page(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
}
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
@@ -347,7 +347,7 @@ static inline void pgd_clear(pgd_t *pgd)
static inline p4d_t *pgd_pgtable(pgd_t pgd)
{
if (pgtable_l5_enabled)
- return (p4d_t *)pfn_to_virt(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+ return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
}
@@ -355,7 +355,7 @@ static inline p4d_t *pgd_pgtable(pgd_t pgd)
static inline struct page *pgd_page(pgd_t pgd)
{
- return pfn_to_page(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
}
#define pgd_page(pgd) pgd_page(pgd)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 1d1be9d9419c..7ec936910a96 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -186,26 +186,6 @@ extern struct pt_alloc_ops pt_ops __initdata;
extern pgd_t swapper_pg_dir[];
-/* MAP_PRIVATE permissions: xwr (copy-on-write) */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXEC
-#define __P101 PAGE_READ_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_READ_EXEC
-
-/* MAP_SHARED permissions: xwr */
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXEC
-#define __S101 PAGE_READ_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
@@ -261,7 +241,7 @@ static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
static inline unsigned long _pgd_pfn(pgd_t pgd)
{
- return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
+ return __page_val_to_pfn(pgd_val(pgd));
}
static inline struct page *pmd_page(pmd_t pmd)
@@ -590,14 +570,14 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}
-#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}
-#define __pud_to_phys(pud) (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
static inline unsigned long pud_pfn(pud_t pud)
{
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 21c8072dce17..19eedd4af4cd 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -79,8 +79,8 @@ static inline void wait_for_interrupt(void)
}
struct device_node;
-int riscv_of_processor_hartid(struct device_node *node);
-int riscv_of_parent_hartid(struct device_node *node);
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
extern void riscv_fill_hwcap(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 23170c933d73..d3443be7eedc 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -42,7 +42,7 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask);
/* Hook for the generic smp_call_function_single() routine. */
void arch_send_call_function_single_ipi(int cpu);
-int riscv_hartid_to_cpuid(int hartid);
+int riscv_hartid_to_cpuid(unsigned long hartid);
/* Set custom IPI operations */
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
@@ -70,7 +70,7 @@ static inline void show_ipi_stats(struct seq_file *p, int prec)
{
}
-static inline int riscv_hartid_to_cpuid(int hartid)
+static inline int riscv_hartid_to_cpuid(unsigned long hartid)
{
if (hartid == boot_cpu_hartid)
return 0;
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 6119368ba6d5..24b2a6e27698 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -96,6 +96,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_H,
KVM_RISCV_ISA_EXT_I,
KVM_RISCV_ISA_EXT_M,
+ KVM_RISCV_ISA_EXT_SVPBMT,
KVM_RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index c71d6591d539..33bb60a354cd 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 0365557f7122..76a2a225e3d9 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -14,37 +14,36 @@
* Returns the hart ID of the given device tree node, or -ENODEV if the node
* isn't an enabled and valid RISC-V hart node.
*/
-int riscv_of_processor_hartid(struct device_node *node)
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
{
const char *isa;
- u32 hart;
if (!of_device_is_compatible(node, "riscv")) {
pr_warn("Found incompatible CPU\n");
return -ENODEV;
}
- hart = of_get_cpu_hwid(node, 0);
- if (hart == ~0U) {
+ *hart = (unsigned long) of_get_cpu_hwid(node, 0);
+ if (*hart == ~0UL) {
pr_warn("Found CPU without hart ID\n");
return -ENODEV;
}
if (!of_device_is_available(node)) {
- pr_info("CPU with hartid=%d is not available\n", hart);
+ pr_info("CPU with hartid=%lu is not available\n", *hart);
return -ENODEV;
}
if (of_property_read_string(node, "riscv,isa", &isa)) {
- pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
+ pr_warn("CPU with hartid=%lu has no \"riscv,isa\" property\n", *hart);
return -ENODEV;
}
if (isa[0] != 'r' || isa[1] != 'v') {
- pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
+ pr_warn("CPU with hartid=%lu has an invalid ISA of \"%s\"\n", *hart, isa);
return -ENODEV;
}
- return hart;
+ return 0;
}
/*
@@ -53,11 +52,16 @@ int riscv_of_processor_hartid(struct device_node *node)
* To achieve this, we walk up the DT tree until we find an active
* RISC-V core (HART) node and extract the cpuid from it.
*/
-int riscv_of_parent_hartid(struct device_node *node)
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
{
+ int rc;
+
for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hartid(node);
+ if (of_device_is_compatible(node, "riscv")) {
+ rc = riscv_of_processor_hartid(node, hartid);
+ if (!rc)
+ return 0;
+ }
}
return -1;
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 4f5a6f84e2a4..efa0f0816634 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -65,7 +65,7 @@ static int sbi_hsm_hart_get_status(unsigned long hartid)
static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data;
struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
@@ -107,7 +107,7 @@ static void sbi_cpu_stop(void)
static int sbi_cpu_is_stopped(unsigned int cpuid)
{
int rc;
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
rc = sbi_hsm_hart_get_status(hartid);
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index 346847f6c41c..3ade9152a3c7 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -18,7 +18,7 @@ void *__cpu_spinwait_task_pointer[NR_CPUS] __section(".data");
static void cpu_update_secondary_bootdata(unsigned int cpuid,
struct task_struct *tidle)
{
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
/*
* The hartid must be less than NR_CPUS to avoid out-of-bound access
@@ -27,7 +27,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
* spinwait booting is not the recommended approach for any platforms
* booting Linux in S-mode and can be disabled in the future.
*/
- if (hartid == INVALID_HARTID || hartid >= NR_CPUS)
+ if (hartid == INVALID_HARTID || hartid >= (unsigned long) NR_CPUS)
return;
/* Make sure tidle is updated */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 1e33beda4f01..f914e8da157a 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -73,8 +73,9 @@ void __init riscv_fill_hwcap(void)
struct device_node *node;
const char *isa;
char print_str[NUM_ALPHA_EXTS + 1];
- int i, j;
+ int i, j, rc;
static unsigned long isa2hwcap[256] = {0};
+ unsigned long hartid;
isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
@@ -92,7 +93,8 @@ void __init riscv_fill_hwcap(void)
DECLARE_BITMAP(this_isa, RISCV_ISA_EXT_MAX);
const char *temp;
- if (riscv_of_processor_hartid(node) < 0)
+ rc = riscv_of_processor_hartid(node, &hartid);
+ if (rc < 0)
continue;
if (of_property_read_string(node, "riscv,isa", &isa)) {
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 9cb85095fd45..0cb94992c15b 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
{
const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs;
- Elf_Rela *relas;
+ Elf64_Rela *relas;
int i, r_type;
/* String & section header string table */
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 2e5b88ca11ce..b9eda3fcbd6d 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -111,12 +111,12 @@ _save_context:
call __trace_hardirqs_off
#endif
-#ifdef CONFIG_CONTEXT_TRACKING
- /* If previous state is in user mode, call context_tracking_user_exit. */
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ /* If previous state is in user mode, call user_exit_callable(). */
li a0, SR_PP
and a0, s1, a0
bnez a0, skip_context_tracking
- call context_tracking_user_exit
+ call user_exit_callable
skip_context_tracking:
#endif
@@ -176,7 +176,7 @@ handle_syscall:
*/
csrs CSR_STATUS, SR_IE
#endif
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
/* Recover a0 - a7 for system calls */
REG_L a0, PT_A0(sp)
REG_L a1, PT_A1(sp)
@@ -269,8 +269,8 @@ resume_userspace:
andi s1, s0, _TIF_WORK_MASK
bnez s1, work_pending
-#ifdef CONFIG_CONTEXT_TRACKING
- call context_tracking_user_enter
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ call user_enter_callable
#endif
/* Save unwound kernel stack pointer in thread_info */
diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
index 20e09056d141..e6694759dbd0 100644
--- a/arch/riscv/kernel/jump_label.c
+++ b/arch/riscv/kernel/jump_label.c
@@ -39,15 +39,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
patch_text_nosync(addr, &insn, sizeof(insn));
mutex_unlock(&text_mutex);
}
-
-void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type)
-{
- /*
- * We use the same instructions in the arch_static_branch and
- * arch_static_branch_jump inline functions, so there's no
- * need to patch them up here.
- * The core will call arch_jump_label_transform when those
- * instructions need to be replaced.
- */
-}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index b5d30ea92292..018e7dc45df6 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -47,7 +47,7 @@ static struct {
unsigned long bits ____cacheline_aligned;
} ipi_data[NR_CPUS] __cacheline_aligned;
-int riscv_hartid_to_cpuid(int hartid)
+int riscv_hartid_to_cpuid(unsigned long hartid)
{
int i;
@@ -55,7 +55,7 @@ int riscv_hartid_to_cpuid(int hartid)
if (cpuid_to_hartid_map(i) == hartid)
return i;
- pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+ pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
return -ENOENT;
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f1e4948a4b52..a752c7b41683 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -72,15 +72,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init setup_smp(void)
{
struct device_node *dn;
- int hart;
+ unsigned long hart;
bool found_boot_cpu = false;
int cpuid = 1;
+ int rc;
cpu_set_ops(0);
for_each_of_cpu_node(dn) {
- hart = riscv_of_processor_hartid(dn);
- if (hart < 0)
+ rc = riscv_of_processor_hartid(dn, &hart);
+ if (rc < 0)
continue;
if (hart == cpuid_to_hartid_map(0)) {
@@ -90,7 +91,7 @@ void __init setup_smp(void)
continue;
}
if (cpuid >= NR_CPUS) {
- pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+ pr_warn("Invalid cpuid [%d] for hartid [%lu]\n",
cpuid, hart);
continue;
}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 9c0194f176fc..571556bb9261 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -18,9 +18,8 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
- if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
- if (unlikely(!(prot & PROT_READ)))
- return -EINVAL;
+ if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
+ return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index b40426509244..39d0f8bba4b4 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/kexec.h>
#include <asm/asm-prototypes.h>
#include <asm/bug.h>
@@ -44,6 +45,9 @@ void die(struct pt_regs *regs, const char *str)
ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index e5c56182f48f..019df9208bdd 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -17,6 +17,7 @@ kvm-y += mmu.o
kvm-y += vcpu.o
kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o
+kvm-y += vcpu_insn.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 1c00695ebee7..3a35b2d95697 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -54,7 +54,7 @@ static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
{
- return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pte_val(pte)));
}
static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
@@ -343,23 +343,24 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
kvm_flush_remote_tlbs(kvm);
}
-static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
- unsigned long size, bool writable)
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+ phys_addr_t hpa, unsigned long size,
+ bool writable, bool in_atomic)
{
pte_t pte;
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
- struct kvm_mmu_memory_cache pcache;
-
- memset(&pcache, 0, sizeof(pcache));
- pcache.gfp_zero = __GFP_ZERO;
+ struct kvm_mmu_memory_cache pcache = {
+ .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
+ .gfp_zero = __GFP_ZERO,
+ };
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ pte = pfn_pte(pfn, PAGE_KERNEL_IO);
if (!writable)
pte = pte_wrprotect(pte);
@@ -382,6 +383,13 @@ out:
return ret;
}
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
+{
+ spin_lock(&kvm->mmu_lock);
+ gstage_unmap_range(kvm, gpa, size, false);
+ spin_unlock(&kvm->mmu_lock);
+}
+
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
@@ -517,8 +525,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}
- ret = gstage_ioremap(kvm, gpa, pa,
- vm_end - vm_start, writable);
+ ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
+ vm_end - vm_start,
+ writable, false);
if (ret)
break;
}
@@ -611,7 +620,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
{
int ret;
kvm_pfn_t hfn;
- bool writeable;
+ bool writable;
short vma_pageshift;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
@@ -659,7 +668,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
- hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
+ hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
vma_pageshift, current);
@@ -673,14 +682,14 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
* for write faults.
*/
if (logging && !is_write)
- writeable = false;
+ writable = false;
spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
- if (writeable) {
+ if (writable) {
kvm_set_pfn_dirty(hfn);
mark_page_dirty(kvm, gfn);
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 7f4ad5e4373a..5d271b597613 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -26,6 +26,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, csr_exit_user),
+ STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, exits)
};
@@ -38,16 +40,58 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
- riscv_isa_extension_mask(f))
+#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
-#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
- riscv_isa_extension_mask(c) | \
- riscv_isa_extension_mask(i) | \
- riscv_isa_extension_mask(m))
+/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
+static const unsigned long kvm_isa_ext_arr[] = {
+ RISCV_ISA_EXT_a,
+ RISCV_ISA_EXT_c,
+ RISCV_ISA_EXT_d,
+ RISCV_ISA_EXT_f,
+ RISCV_ISA_EXT_h,
+ RISCV_ISA_EXT_i,
+ RISCV_ISA_EXT_m,
+ RISCV_ISA_EXT_SVPBMT,
+};
-#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
- KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
+static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
+{
+ unsigned long i;
+
+ for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ if (kvm_isa_ext_arr[i] == base_ext)
+ return i;
+ }
+
+ return KVM_RISCV_ISA_EXT_MAX;
+}
+
+static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
+{
+ switch (ext) {
+ case KVM_RISCV_ISA_EXT_H:
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
+{
+ switch (ext) {
+ case KVM_RISCV_ISA_EXT_A:
+ case KVM_RISCV_ISA_EXT_C:
+ case KVM_RISCV_ISA_EXT_I:
+ case KVM_RISCV_ISA_EXT_M:
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
@@ -99,13 +143,20 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
+ unsigned long host_isa, i;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+ bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
- vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
+ for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
+ host_isa = kvm_isa_ext_arr[i];
+ if (__riscv_isa_extension_available(NULL, host_isa) &&
+ kvm_riscv_vcpu_isa_enable_allowed(i))
+ set_bit(host_isa, vcpu->arch.isa);
+ }
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
@@ -199,7 +250,7 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
- reg_val = vcpu->arch.isa;
+ reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
default:
return -EINVAL;
@@ -219,7 +270,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
- unsigned long reg_val;
+ unsigned long i, isa_ext, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
@@ -227,13 +278,32 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
+ /* This ONE REG interface is only defined for single letter extensions */
+ if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+ return -EINVAL;
+
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
if (!vcpu->arch.ran_atleast_once) {
- /* Ignore the disable request for these extensions */
- vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
- vcpu->arch.isa &= riscv_isa_extension_base(NULL);
- vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
+ /* Ignore the enable/disable request for certain extensions */
+ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
+ isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
+ if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
+ reg_val &= ~BIT(i);
+ continue;
+ }
+ if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
+ if (reg_val & BIT(i))
+ reg_val &= ~BIT(i);
+ if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
+ if (!(reg_val & BIT(i)))
+ reg_val |= BIT(i);
+ }
+ reg_val &= riscv_isa_extension_base(NULL);
+ /* Do not modify anything beyond single letter extensions */
+ reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
+ (reg_val & KVM_RISCV_BASE_ISA_MASK);
+ vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
@@ -374,17 +444,6 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
return 0;
}
-/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
-static unsigned long kvm_isa_ext_arr[] = {
- RISCV_ISA_EXT_a,
- RISCV_ISA_EXT_c,
- RISCV_ISA_EXT_d,
- RISCV_ISA_EXT_f,
- RISCV_ISA_EXT_h,
- RISCV_ISA_EXT_i,
- RISCV_ISA_EXT_m,
-};
-
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -399,11 +458,12 @@ static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+ if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+ reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (__riscv_isa_extension_available(&vcpu->arch.isa, host_isa_ext))
+ if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
reg_val = 1; /* Mark the given extension as available */
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
@@ -422,12 +482,12 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val;
unsigned long host_isa_ext;
- unsigned long host_isa_ext_mask;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+ if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+ reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
@@ -437,30 +497,19 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -EOPNOTSUPP;
- if (host_isa_ext >= RISCV_ISA_EXT_BASE &&
- host_isa_ext < RISCV_ISA_EXT_MAX) {
+ if (!vcpu->arch.ran_atleast_once) {
/*
- * Multi-letter ISA extension. Currently there is no provision
- * to enable/disable the multi-letter ISA extensions for guests.
- * Return success if the request is to enable any ISA extension
- * that is available in the hardware.
- * Return -EOPNOTSUPP otherwise.
+ * All multi-letter extension and a few single letter
+ * extension can be disabled
*/
- if (!reg_val)
- return -EOPNOTSUPP;
+ if (reg_val == 1 &&
+ kvm_riscv_vcpu_isa_enable_allowed(reg_num))
+ set_bit(host_isa_ext, vcpu->arch.isa);
+ else if (!reg_val &&
+ kvm_riscv_vcpu_isa_disable_allowed(reg_num))
+ clear_bit(host_isa_ext, vcpu->arch.isa);
else
- return 0;
- }
-
- /* Single letter base ISA extension */
- if (!vcpu->arch.ran_atleast_once) {
- host_isa_ext_mask = BIT_MASK(host_isa_ext);
- if (!reg_val && (host_isa_ext_mask & KVM_RISCV_ISA_DISABLE_ALLOWED))
- vcpu->arch.isa &= ~host_isa_ext_mask;
- else
- vcpu->arch.isa |= host_isa_ext_mask;
- vcpu->arch.isa &= riscv_isa_extension_base(NULL);
- vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
+ return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
@@ -729,6 +778,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
+{
+ u64 henvcfg = 0;
+
+ if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
+ henvcfg |= ENVCFG_PBMTE;
+
+ csr_write(CSR_HENVCFG, henvcfg);
+#ifdef CONFIG_32BIT
+ csr_write(CSR_HENVCFGH, henvcfg >> 32);
+#endif
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
@@ -743,6 +805,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp);
+ kvm_riscv_vcpu_update_config(vcpu->arch.isa);
+
kvm_riscv_gstage_update_hgatp(vcpu);
kvm_riscv_vcpu_timer_restore(vcpu);
@@ -781,9 +845,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+ kvm_vcpu_srcu_read_unlock(vcpu);
rcuwait_wait_event(wait,
(!vcpu->arch.power_off) && (!vcpu->arch.pause),
TASK_INTERRUPTIBLE);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (vcpu->arch.power_off || vcpu->arch.pause) {
/*
@@ -851,22 +917,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_vcpu_srcu_read_lock(vcpu);
- /* Process MMIO value returned from user-space */
- if (run->exit_reason == KVM_EXIT_MMIO) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_MMIO:
+ /* Process MMIO value returned from user-space */
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
- if (ret) {
- kvm_vcpu_srcu_read_unlock(vcpu);
- return ret;
- }
- }
-
- /* Process SBI value returned from user-space */
- if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
+ break;
+ case KVM_EXIT_RISCV_SBI:
+ /* Process SBI value returned from user-space */
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
- if (ret) {
- kvm_vcpu_srcu_read_unlock(vcpu);
- return ret;
- }
+ break;
+ case KVM_EXIT_RISCV_CSR:
+ /* Process CSR value returned from user-space */
+ ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret) {
+ kvm_vcpu_srcu_read_unlock(vcpu);
+ return ret;
}
if (run->immediate_exit) {
@@ -888,8 +958,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_check_vcpu_requests(vcpu);
- preempt_disable();
-
local_irq_disable();
/*
@@ -926,7 +994,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
- preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@@ -960,6 +1027,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
+ preempt_disable();
+
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index dbb09afd7546..d5c36386878a 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -6,435 +6,34 @@
* Anup Patel <anup.patel@wdc.com>
*/
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
-#define INSN_OPCODE_MASK 0x007c
-#define INSN_OPCODE_SHIFT 2
-#define INSN_OPCODE_SYSTEM 28
-
-#define INSN_MASK_WFI 0xffffffff
-#define INSN_MATCH_WFI 0x10500073
-
-#define INSN_MATCH_LB 0x3
-#define INSN_MASK_LB 0x707f
-#define INSN_MATCH_LH 0x1003
-#define INSN_MASK_LH 0x707f
-#define INSN_MATCH_LW 0x2003
-#define INSN_MASK_LW 0x707f
-#define INSN_MATCH_LD 0x3003
-#define INSN_MASK_LD 0x707f
-#define INSN_MATCH_LBU 0x4003
-#define INSN_MASK_LBU 0x707f
-#define INSN_MATCH_LHU 0x5003
-#define INSN_MASK_LHU 0x707f
-#define INSN_MATCH_LWU 0x6003
-#define INSN_MASK_LWU 0x707f
-#define INSN_MATCH_SB 0x23
-#define INSN_MASK_SB 0x707f
-#define INSN_MATCH_SH 0x1023
-#define INSN_MASK_SH 0x707f
-#define INSN_MATCH_SW 0x2023
-#define INSN_MASK_SW 0x707f
-#define INSN_MATCH_SD 0x3023
-#define INSN_MASK_SD 0x707f
-
-#define INSN_MATCH_C_LD 0x6000
-#define INSN_MASK_C_LD 0xe003
-#define INSN_MATCH_C_SD 0xe000
-#define INSN_MASK_C_SD 0xe003
-#define INSN_MATCH_C_LW 0x4000
-#define INSN_MASK_C_LW 0xe003
-#define INSN_MATCH_C_SW 0xc000
-#define INSN_MASK_C_SW 0xe003
-#define INSN_MATCH_C_LDSP 0x6002
-#define INSN_MASK_C_LDSP 0xe003
-#define INSN_MATCH_C_SDSP 0xe002
-#define INSN_MASK_C_SDSP 0xe003
-#define INSN_MATCH_C_LWSP 0x4002
-#define INSN_MASK_C_LWSP 0xe003
-#define INSN_MATCH_C_SWSP 0xc002
-#define INSN_MASK_C_SWSP 0xe003
-
-#define INSN_16BIT_MASK 0x3
-
-#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
-
-#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
-
-#ifdef CONFIG_64BIT
-#define LOG_REGBYTES 3
-#else
-#define LOG_REGBYTES 2
-#endif
-#define REGBYTES (1 << LOG_REGBYTES)
-
-#define SH_RD 7
-#define SH_RS1 15
-#define SH_RS2 20
-#define SH_RS2C 2
-
-#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
-#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
- (RV_X(x, 10, 3) << 3) | \
- (RV_X(x, 5, 1) << 6))
-#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
- (RV_X(x, 5, 2) << 6))
-#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
- (RV_X(x, 12, 1) << 5) | \
- (RV_X(x, 2, 2) << 6))
-#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
- (RV_X(x, 12, 1) << 5) | \
- (RV_X(x, 2, 3) << 6))
-#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
- (RV_X(x, 7, 2) << 6))
-#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
- (RV_X(x, 7, 3) << 6))
-#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
-#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
-#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
-
-#define SHIFT_RIGHT(x, y) \
- ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
-
-#define REG_MASK \
- ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
-
-#define REG_OFFSET(insn, pos) \
- (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
-
-#define REG_PTR(insn, pos, regs) \
- ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
-
-#define GET_RM(insn) (((insn) >> 12) & 7)
-
-#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
-#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
-#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
-#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
-#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
-#define GET_SP(regs) (*REG_PTR(2, 0, regs))
-#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
-#define IMM_I(insn) ((s32)(insn) >> 20)
-#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
- (s32)(((insn) >> 7) & 0x1f))
-#define MASK_FUNCT3 0x7000
-
-static int truly_illegal_insn(struct kvm_vcpu *vcpu,
- struct kvm_run *run,
- ulong insn)
-{
- struct kvm_cpu_trap utrap = { 0 };
-
- /* Redirect trap to Guest VCPU */
- utrap.sepc = vcpu->arch.guest_context.sepc;
- utrap.scause = EXC_INST_ILLEGAL;
- utrap.stval = insn;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
-
- return 1;
-}
-
-static int system_opcode_insn(struct kvm_vcpu *vcpu,
- struct kvm_run *run,
- ulong insn)
-{
- if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
- vcpu->stat.wfi_exit_stat++;
- kvm_riscv_vcpu_wfi(vcpu);
- vcpu->arch.guest_context.sepc += INSN_LEN(insn);
- return 1;
- }
-
- return truly_illegal_insn(vcpu, run, insn);
-}
-
-static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_cpu_trap *trap)
-{
- unsigned long insn = trap->stval;
- struct kvm_cpu_trap utrap = { 0 };
- struct kvm_cpu_context *ct;
-
- if (unlikely(INSN_IS_16BIT(insn))) {
- if (insn == 0) {
- ct = &vcpu->arch.guest_context;
- insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
- ct->sepc,
- &utrap);
- if (utrap.scause) {
- utrap.sepc = ct->sepc;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
- return 1;
- }
- }
- if (INSN_IS_16BIT(insn))
- return truly_illegal_insn(vcpu, run, insn);
- }
-
- switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
- case INSN_OPCODE_SYSTEM:
- return system_opcode_insn(vcpu, run, insn);
- default:
- return truly_illegal_insn(vcpu, run, insn);
- }
-}
-
-static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long fault_addr, unsigned long htinst)
-{
- u8 data_buf[8];
- unsigned long insn;
- int shift = 0, len = 0, insn_len = 0;
- struct kvm_cpu_trap utrap = { 0 };
- struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
-
- /* Determine trapped instruction */
- if (htinst & 0x1) {
- /*
- * Bit[0] == 1 implies trapped instruction value is
- * transformed instruction or custom instruction.
- */
- insn = htinst | INSN_16BIT_MASK;
- insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
- } else {
- /*
- * Bit[0] == 0 implies trapped instruction value is
- * zero or special value.
- */
- insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
- &utrap);
- if (utrap.scause) {
- /* Redirect trap if we failed to read instruction */
- utrap.sepc = ct->sepc;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
- return 1;
- }
- insn_len = INSN_LEN(insn);
- }
-
- /* Decode length of MMIO and shift */
- if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
- len = 4;
- shift = 8 * (sizeof(ulong) - len);
- } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
- len = 1;
- shift = 8 * (sizeof(ulong) - len);
- } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
- len = 1;
- shift = 8 * (sizeof(ulong) - len);
-#ifdef CONFIG_64BIT
- } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
- len = 8;
- shift = 8 * (sizeof(ulong) - len);
- } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
- len = 4;
-#endif
- } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
- len = 2;
- shift = 8 * (sizeof(ulong) - len);
- } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
- len = 2;
-#ifdef CONFIG_64BIT
- } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
- len = 8;
- shift = 8 * (sizeof(ulong) - len);
- insn = RVC_RS2S(insn) << SH_RD;
- } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
- ((insn >> SH_RD) & 0x1f)) {
- len = 8;
- shift = 8 * (sizeof(ulong) - len);
-#endif
- } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
- len = 4;
- shift = 8 * (sizeof(ulong) - len);
- insn = RVC_RS2S(insn) << SH_RD;
- } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
- ((insn >> SH_RD) & 0x1f)) {
- len = 4;
- shift = 8 * (sizeof(ulong) - len);
- } else {
- return -EOPNOTSUPP;
- }
-
- /* Fault address should be aligned to length of MMIO */
- if (fault_addr & (len - 1))
- return -EIO;
-
- /* Save instruction decode info */
- vcpu->arch.mmio_decode.insn = insn;
- vcpu->arch.mmio_decode.insn_len = insn_len;
- vcpu->arch.mmio_decode.shift = shift;
- vcpu->arch.mmio_decode.len = len;
- vcpu->arch.mmio_decode.return_handled = 0;
-
- /* Update MMIO details in kvm_run struct */
- run->mmio.is_write = false;
- run->mmio.phys_addr = fault_addr;
- run->mmio.len = len;
-
- /* Try to handle MMIO access in the kernel */
- if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
- /* Successfully handled MMIO access in the kernel so resume */
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_kernel++;
- kvm_riscv_vcpu_mmio_return(vcpu, run);
- return 1;
- }
-
- /* Exit to userspace for MMIO emulation */
- vcpu->stat.mmio_exit_user++;
- run->exit_reason = KVM_EXIT_MMIO;
-
- return 0;
-}
-
-static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long fault_addr, unsigned long htinst)
-{
- u8 data8;
- u16 data16;
- u32 data32;
- u64 data64;
- ulong data;
- unsigned long insn;
- int len = 0, insn_len = 0;
- struct kvm_cpu_trap utrap = { 0 };
- struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
-
- /* Determine trapped instruction */
- if (htinst & 0x1) {
- /*
- * Bit[0] == 1 implies trapped instruction value is
- * transformed instruction or custom instruction.
- */
- insn = htinst | INSN_16BIT_MASK;
- insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
- } else {
- /*
- * Bit[0] == 0 implies trapped instruction value is
- * zero or special value.
- */
- insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
- &utrap);
- if (utrap.scause) {
- /* Redirect trap if we failed to read instruction */
- utrap.sepc = ct->sepc;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
- return 1;
- }
- insn_len = INSN_LEN(insn);
- }
-
- data = GET_RS2(insn, &vcpu->arch.guest_context);
- data8 = data16 = data32 = data64 = data;
-
- if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
- len = 4;
- } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
- len = 1;
-#ifdef CONFIG_64BIT
- } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
- len = 8;
-#endif
- } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
- len = 2;
-#ifdef CONFIG_64BIT
- } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
- len = 8;
- data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
- ((insn >> SH_RD) & 0x1f)) {
- len = 8;
- data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
-#endif
- } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
- len = 4;
- data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
- ((insn >> SH_RD) & 0x1f)) {
- len = 4;
- data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
- } else {
- return -EOPNOTSUPP;
- }
-
- /* Fault address should be aligned to length of MMIO */
- if (fault_addr & (len - 1))
- return -EIO;
-
- /* Save instruction decode info */
- vcpu->arch.mmio_decode.insn = insn;
- vcpu->arch.mmio_decode.insn_len = insn_len;
- vcpu->arch.mmio_decode.shift = 0;
- vcpu->arch.mmio_decode.len = len;
- vcpu->arch.mmio_decode.return_handled = 0;
-
- /* Copy data to kvm_run instance */
- switch (len) {
- case 1:
- *((u8 *)run->mmio.data) = data8;
- break;
- case 2:
- *((u16 *)run->mmio.data) = data16;
- break;
- case 4:
- *((u32 *)run->mmio.data) = data32;
- break;
- case 8:
- *((u64 *)run->mmio.data) = data64;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /* Update MMIO details in kvm_run struct */
- run->mmio.is_write = true;
- run->mmio.phys_addr = fault_addr;
- run->mmio.len = len;
-
- /* Try to handle MMIO access in the kernel */
- if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
- fault_addr, len, run->mmio.data)) {
- /* Successfully handled MMIO access in the kernel so resume */
- vcpu->stat.mmio_exit_kernel++;
- kvm_riscv_vcpu_mmio_return(vcpu, run);
- return 1;
- }
-
- /* Exit to userspace for MMIO emulation */
- vcpu->stat.mmio_exit_user++;
- run->exit_reason = KVM_EXIT_MMIO;
-
- return 0;
-}
-
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
struct kvm_memory_slot *memslot;
unsigned long hva, fault_addr;
- bool writeable;
+ bool writable;
gfn_t gfn;
int ret;
fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
gfn = fault_addr >> PAGE_SHIFT;
memslot = gfn_to_memslot(vcpu->kvm, gfn);
- hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
+ hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
if (kvm_is_error_hva(hva) ||
- (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) {
+ (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
switch (trap->scause) {
case EXC_LOAD_GUEST_PAGE_FAULT:
- return emulate_load(vcpu, run, fault_addr,
- trap->htinst);
+ return kvm_riscv_vcpu_mmio_load(vcpu, run,
+ fault_addr,
+ trap->htinst);
case EXC_STORE_GUEST_PAGE_FAULT:
- return emulate_store(vcpu, run, fault_addr,
- trap->htinst);
+ return kvm_riscv_vcpu_mmio_store(vcpu, run,
+ fault_addr,
+ trap->htinst);
default:
return -EOPNOTSUPP;
};
@@ -449,21 +48,6 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
/**
- * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
- *
- * @vcpu: The VCPU pointer
- */
-void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
-{
- if (!kvm_arch_vcpu_runnable(vcpu)) {
- kvm_vcpu_srcu_read_unlock(vcpu);
- kvm_vcpu_halt(vcpu);
- kvm_vcpu_srcu_read_lock(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- }
-}
-
-/**
* kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
*
* @vcpu: The VCPU pointer
@@ -601,66 +185,6 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
}
-/**
- * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
- * or in-kernel IO emulation
- *
- * @vcpu: The VCPU pointer
- * @run: The VCPU run struct containing the mmio data
- */
-int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- u8 data8;
- u16 data16;
- u32 data32;
- u64 data64;
- ulong insn;
- int len, shift;
-
- if (vcpu->arch.mmio_decode.return_handled)
- return 0;
-
- vcpu->arch.mmio_decode.return_handled = 1;
- insn = vcpu->arch.mmio_decode.insn;
-
- if (run->mmio.is_write)
- goto done;
-
- len = vcpu->arch.mmio_decode.len;
- shift = vcpu->arch.mmio_decode.shift;
-
- switch (len) {
- case 1:
- data8 = *((u8 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data8 << shift >> shift);
- break;
- case 2:
- data16 = *((u16 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data16 << shift >> shift);
- break;
- case 4:
- data32 = *((u32 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data32 << shift >> shift);
- break;
- case 8:
- data64 = *((u64 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data64 << shift >> shift);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
-done:
- /* Move to next instruction */
- vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
-
- return 0;
-}
-
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
@@ -680,7 +204,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (trap->scause) {
case EXC_VIRTUAL_INST_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
- ret = virtual_inst_fault(vcpu, run, trap);
+ ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
break;
case EXC_INST_GUEST_PAGE_FAULT:
case EXC_LOAD_GUEST_PAGE_FAULT:
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index d4308c512007..9d8cbc42057a 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -16,12 +16,11 @@
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
- unsigned long isa = vcpu->arch.isa;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_FS;
- if (riscv_isa_extension_available(&isa, f) ||
- riscv_isa_extension_available(&isa, d))
+ if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
+ riscv_isa_extension_available(vcpu->arch.isa, d))
cntx->sstatus |= SR_FS_INITIAL;
else
cntx->sstatus |= SR_FS_OFF;
@@ -34,24 +33,24 @@ static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
}
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
- unsigned long isa)
+ const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
- if (riscv_isa_extension_available(&isa, d))
+ if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_save(cntx);
- else if (riscv_isa_extension_available(&isa, f))
+ else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_save(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
- unsigned long isa)
+ const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
- if (riscv_isa_extension_available(&isa, d))
+ if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_restore(cntx);
- else if (riscv_isa_extension_available(&isa, f))
+ else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_restore(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
@@ -80,7 +79,6 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
@@ -89,7 +87,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
- riscv_isa_extension_available(&isa, f)) {
+ riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
@@ -100,7 +98,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
- riscv_isa_extension_available(&isa, d)) {
+ riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
@@ -126,7 +124,6 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
@@ -135,7 +132,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
- riscv_isa_extension_available(&isa, f)) {
+ riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
@@ -146,7 +143,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
- riscv_isa_extension_available(&isa, d)) {
+ riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c
new file mode 100644
index 000000000000..7eb90a47b571
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_insn.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kvm_host.h>
+
+#define INSN_OPCODE_MASK 0x007c
+#define INSN_OPCODE_SHIFT 2
+#define INSN_OPCODE_SYSTEM 28
+
+#define INSN_MASK_WFI 0xffffffff
+#define INSN_MATCH_WFI 0x10500073
+
+#define INSN_MATCH_CSRRW 0x1073
+#define INSN_MASK_CSRRW 0x707f
+#define INSN_MATCH_CSRRS 0x2073
+#define INSN_MASK_CSRRS 0x707f
+#define INSN_MATCH_CSRRC 0x3073
+#define INSN_MASK_CSRRC 0x707f
+#define INSN_MATCH_CSRRWI 0x5073
+#define INSN_MASK_CSRRWI 0x707f
+#define INSN_MATCH_CSRRSI 0x6073
+#define INSN_MASK_CSRRSI 0x707f
+#define INSN_MATCH_CSRRCI 0x7073
+#define INSN_MASK_CSRRCI 0x707f
+
+#define INSN_MATCH_LB 0x3
+#define INSN_MASK_LB 0x707f
+#define INSN_MATCH_LH 0x1003
+#define INSN_MASK_LH 0x707f
+#define INSN_MATCH_LW 0x2003
+#define INSN_MASK_LW 0x707f
+#define INSN_MATCH_LD 0x3003
+#define INSN_MASK_LD 0x707f
+#define INSN_MATCH_LBU 0x4003
+#define INSN_MASK_LBU 0x707f
+#define INSN_MATCH_LHU 0x5003
+#define INSN_MASK_LHU 0x707f
+#define INSN_MATCH_LWU 0x6003
+#define INSN_MASK_LWU 0x707f
+#define INSN_MATCH_SB 0x23
+#define INSN_MASK_SB 0x707f
+#define INSN_MATCH_SH 0x1023
+#define INSN_MASK_SH 0x707f
+#define INSN_MATCH_SW 0x2023
+#define INSN_MASK_SW 0x707f
+#define INSN_MATCH_SD 0x3023
+#define INSN_MASK_SD 0x707f
+
+#define INSN_MATCH_C_LD 0x6000
+#define INSN_MASK_C_LD 0xe003
+#define INSN_MATCH_C_SD 0xe000
+#define INSN_MASK_C_SD 0xe003
+#define INSN_MATCH_C_LW 0x4000
+#define INSN_MASK_C_LW 0xe003
+#define INSN_MATCH_C_SW 0xc000
+#define INSN_MASK_C_SW 0xe003
+#define INSN_MATCH_C_LDSP 0x6002
+#define INSN_MASK_C_LDSP 0xe003
+#define INSN_MATCH_C_SDSP 0xe002
+#define INSN_MASK_C_SDSP 0xe003
+#define INSN_MATCH_C_LWSP 0x4002
+#define INSN_MASK_C_LWSP 0xe003
+#define INSN_MATCH_C_SWSP 0xc002
+#define INSN_MASK_C_SWSP 0xe003
+
+#define INSN_16BIT_MASK 0x3
+
+#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
+
+#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
+
+#ifdef CONFIG_64BIT
+#define LOG_REGBYTES 3
+#else
+#define LOG_REGBYTES 2
+#endif
+#define REGBYTES (1 << LOG_REGBYTES)
+
+#define SH_RD 7
+#define SH_RS1 15
+#define SH_RS2 20
+#define SH_RS2C 2
+#define MASK_RX 0x1f
+
+#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
+#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
+ (RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 5, 1) << 6))
+#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 5, 2) << 6))
+#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
+ (RV_X(x, 12, 1) << 5) | \
+ (RV_X(x, 2, 2) << 6))
+#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
+ (RV_X(x, 12, 1) << 5) | \
+ (RV_X(x, 2, 3) << 6))
+#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
+ (RV_X(x, 7, 2) << 6))
+#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 7, 3) << 6))
+#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
+#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
+#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
+
+#define SHIFT_RIGHT(x, y) \
+ ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
+
+#define REG_MASK \
+ ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
+
+#define REG_OFFSET(insn, pos) \
+ (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
+
+#define REG_PTR(insn, pos, regs) \
+ ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
+
+#define GET_FUNCT3(insn) (((insn) >> 12) & 7)
+
+#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
+#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
+#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
+#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
+#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
+#define GET_SP(regs) (*REG_PTR(2, 0, regs))
+#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
+#define IMM_I(insn) ((s32)(insn) >> 20)
+#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
+ (s32)(((insn) >> 7) & 0x1f))
+
+struct insn_func {
+ unsigned long mask;
+ unsigned long match;
+ /*
+ * Possible return values are as follows:
+ * 1) Returns < 0 for error case
+ * 2) Returns 0 for exit to user-space
+ * 3) Returns 1 to continue with next sepc
+ * 4) Returns 2 to continue with same sepc
+ * 5) Returns 3 to inject illegal instruction trap and continue
+ * 6) Returns 4 to inject virtual instruction trap and continue
+ *
+ * Use enum kvm_insn_return for return values
+ */
+ int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
+};
+
+static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ ulong insn)
+{
+ struct kvm_cpu_trap utrap = { 0 };
+
+ /* Redirect trap to Guest VCPU */
+ utrap.sepc = vcpu->arch.guest_context.sepc;
+ utrap.scause = EXC_INST_ILLEGAL;
+ utrap.stval = insn;
+ utrap.htval = 0;
+ utrap.htinst = 0;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+
+ return 1;
+}
+
+static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ ulong insn)
+{
+ struct kvm_cpu_trap utrap = { 0 };
+
+ /* Redirect trap to Guest VCPU */
+ utrap.sepc = vcpu->arch.guest_context.sepc;
+ utrap.scause = EXC_VIRTUAL_INST_FAULT;
+ utrap.stval = insn;
+ utrap.htval = 0;
+ utrap.htinst = 0;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+
+ return 1;
+}
+
+/**
+ * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
+ *
+ * @vcpu: The VCPU pointer
+ */
+void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ kvm_vcpu_srcu_read_unlock(vcpu);
+ kvm_vcpu_halt(vcpu);
+ kvm_vcpu_srcu_read_lock(vcpu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ }
+}
+
+static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
+{
+ vcpu->stat.wfi_exit_stat++;
+ kvm_riscv_vcpu_wfi(vcpu);
+ return KVM_INSN_CONTINUE_NEXT_SEPC;
+}
+
+struct csr_func {
+ unsigned int base;
+ unsigned int count;
+ /*
+ * Possible return values are as same as "func" callback in
+ * "struct insn_func".
+ */
+ int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+};
+
+static const struct csr_func csr_funcs[] = { };
+
+/**
+ * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
+ * emulation or in-kernel emulation
+ *
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the CSR data
+ *
+ * Returns > 0 upon failure and 0 upon success
+ */
+int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ ulong insn;
+
+ if (vcpu->arch.csr_decode.return_handled)
+ return 0;
+ vcpu->arch.csr_decode.return_handled = 1;
+
+ /* Update destination register for CSR reads */
+ insn = vcpu->arch.csr_decode.insn;
+ if ((insn >> SH_RD) & MASK_RX)
+ SET_RD(insn, &vcpu->arch.guest_context,
+ run->riscv_csr.ret_value);
+
+ /* Move to next instruction */
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+
+ return 0;
+}
+
+static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
+{
+ int i, rc = KVM_INSN_ILLEGAL_TRAP;
+ unsigned int csr_num = insn >> SH_RS2;
+ unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
+ ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
+ const struct csr_func *tcfn, *cfn = NULL;
+ ulong val = 0, wr_mask = 0, new_val = 0;
+
+ /* Decode the CSR instruction */
+ switch (GET_FUNCT3(insn)) {
+ case GET_FUNCT3(INSN_MATCH_CSRRW):
+ wr_mask = -1UL;
+ new_val = rs1_val;
+ break;
+ case GET_FUNCT3(INSN_MATCH_CSRRS):
+ wr_mask = rs1_val;
+ new_val = -1UL;
+ break;
+ case GET_FUNCT3(INSN_MATCH_CSRRC):
+ wr_mask = rs1_val;
+ new_val = 0;
+ break;
+ case GET_FUNCT3(INSN_MATCH_CSRRWI):
+ wr_mask = -1UL;
+ new_val = rs1_num;
+ break;
+ case GET_FUNCT3(INSN_MATCH_CSRRSI):
+ wr_mask = rs1_num;
+ new_val = -1UL;
+ break;
+ case GET_FUNCT3(INSN_MATCH_CSRRCI):
+ wr_mask = rs1_num;
+ new_val = 0;
+ break;
+ default:
+ return rc;
+ }
+
+ /* Save instruction decode info */
+ vcpu->arch.csr_decode.insn = insn;
+ vcpu->arch.csr_decode.return_handled = 0;
+
+ /* Update CSR details in kvm_run struct */
+ run->riscv_csr.csr_num = csr_num;
+ run->riscv_csr.new_value = new_val;
+ run->riscv_csr.write_mask = wr_mask;
+ run->riscv_csr.ret_value = 0;
+
+ /* Find in-kernel CSR function */
+ for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
+ tcfn = &csr_funcs[i];
+ if ((tcfn->base <= csr_num) &&
+ (csr_num < (tcfn->base + tcfn->count))) {
+ cfn = tcfn;
+ break;
+ }
+ }
+
+ /* First try in-kernel CSR emulation */
+ if (cfn && cfn->func) {
+ rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
+ if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
+ if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
+ run->riscv_csr.ret_value = val;
+ vcpu->stat.csr_exit_kernel++;
+ kvm_riscv_vcpu_csr_return(vcpu, run);
+ rc = KVM_INSN_CONTINUE_SAME_SEPC;
+ }
+ return rc;
+ }
+ }
+
+ /* Exit to user-space for CSR emulation */
+ if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
+ vcpu->stat.csr_exit_user++;
+ run->exit_reason = KVM_EXIT_RISCV_CSR;
+ }
+
+ return rc;
+}
+
+static const struct insn_func system_opcode_funcs[] = {
+ {
+ .mask = INSN_MASK_CSRRW,
+ .match = INSN_MATCH_CSRRW,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRS,
+ .match = INSN_MATCH_CSRRS,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRC,
+ .match = INSN_MATCH_CSRRC,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRWI,
+ .match = INSN_MATCH_CSRRWI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRSI,
+ .match = INSN_MATCH_CSRRSI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_CSRRCI,
+ .match = INSN_MATCH_CSRRCI,
+ .func = csr_insn,
+ },
+ {
+ .mask = INSN_MASK_WFI,
+ .match = INSN_MATCH_WFI,
+ .func = wfi_insn,
+ },
+};
+
+static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ ulong insn)
+{
+ int i, rc = KVM_INSN_ILLEGAL_TRAP;
+ const struct insn_func *ifn;
+
+ for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
+ ifn = &system_opcode_funcs[i];
+ if ((insn & ifn->mask) == ifn->match) {
+ rc = ifn->func(vcpu, run, insn);
+ break;
+ }
+ }
+
+ switch (rc) {
+ case KVM_INSN_ILLEGAL_TRAP:
+ return truly_illegal_insn(vcpu, run, insn);
+ case KVM_INSN_VIRTUAL_TRAP:
+ return truly_virtual_insn(vcpu, run, insn);
+ case KVM_INSN_CONTINUE_NEXT_SEPC:
+ vcpu->arch.guest_context.sepc += INSN_LEN(insn);
+ break;
+ default:
+ break;
+ }
+
+ return (rc <= 0) ? rc : 1;
+}
+
+/**
+ * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
+ *
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the mmio data
+ * @trap: Trap details
+ *
+ * Returns > 0 to continue run-loop
+ * Returns 0 to exit run-loop and handle in user-space.
+ * Returns < 0 to report failure and exit run-loop
+ */
+int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap)
+{
+ unsigned long insn = trap->stval;
+ struct kvm_cpu_trap utrap = { 0 };
+ struct kvm_cpu_context *ct;
+
+ if (unlikely(INSN_IS_16BIT(insn))) {
+ if (insn == 0) {
+ ct = &vcpu->arch.guest_context;
+ insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
+ ct->sepc,
+ &utrap);
+ if (utrap.scause) {
+ utrap.sepc = ct->sepc;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+ return 1;
+ }
+ }
+ if (INSN_IS_16BIT(insn))
+ return truly_illegal_insn(vcpu, run, insn);
+ }
+
+ switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
+ case INSN_OPCODE_SYSTEM:
+ return system_opcode_insn(vcpu, run, insn);
+ default:
+ return truly_illegal_insn(vcpu, run, insn);
+ }
+}
+
+/**
+ * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
+ *
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the mmio data
+ * @fault_addr: Guest physical address to load
+ * @htinst: Transformed encoding of the load instruction
+ *
+ * Returns > 0 to continue run-loop
+ * Returns 0 to exit run-loop and handle in user-space.
+ * Returns < 0 to report failure and exit run-loop
+ */
+int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst)
+{
+ u8 data_buf[8];
+ unsigned long insn;
+ int shift = 0, len = 0, insn_len = 0;
+ struct kvm_cpu_trap utrap = { 0 };
+ struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
+
+ /* Determine trapped instruction */
+ if (htinst & 0x1) {
+ /*
+ * Bit[0] == 1 implies trapped instruction value is
+ * transformed instruction or custom instruction.
+ */
+ insn = htinst | INSN_16BIT_MASK;
+ insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
+ } else {
+ /*
+ * Bit[0] == 0 implies trapped instruction value is
+ * zero or special value.
+ */
+ insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
+ &utrap);
+ if (utrap.scause) {
+ /* Redirect trap if we failed to read instruction */
+ utrap.sepc = ct->sepc;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+ return 1;
+ }
+ insn_len = INSN_LEN(insn);
+ }
+
+ /* Decode length of MMIO and shift */
+ if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
+ len = 1;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
+ len = 1;
+ shift = 8 * (sizeof(ulong) - len);
+#ifdef CONFIG_64BIT
+ } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
+ len = 4;
+#endif
+ } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
+ len = 2;
+#ifdef CONFIG_64BIT
+ } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+#endif
+ } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* Fault address should be aligned to length of MMIO */
+ if (fault_addr & (len - 1))
+ return -EIO;
+
+ /* Save instruction decode info */
+ vcpu->arch.mmio_decode.insn = insn;
+ vcpu->arch.mmio_decode.insn_len = insn_len;
+ vcpu->arch.mmio_decode.shift = shift;
+ vcpu->arch.mmio_decode.len = len;
+ vcpu->arch.mmio_decode.return_handled = 0;
+
+ /* Update MMIO details in kvm_run struct */
+ run->mmio.is_write = false;
+ run->mmio.phys_addr = fault_addr;
+ run->mmio.len = len;
+
+ /* Try to handle MMIO access in the kernel */
+ if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
+ /* Successfully handled MMIO access in the kernel so resume */
+ memcpy(run->mmio.data, data_buf, len);
+ vcpu->stat.mmio_exit_kernel++;
+ kvm_riscv_vcpu_mmio_return(vcpu, run);
+ return 1;
+ }
+
+ /* Exit to userspace for MMIO emulation */
+ vcpu->stat.mmio_exit_user++;
+ run->exit_reason = KVM_EXIT_MMIO;
+
+ return 0;
+}
+
+/**
+ * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
+ *
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the mmio data
+ * @fault_addr: Guest physical address to store
+ * @htinst: Transformed encoding of the store instruction
+ *
+ * Returns > 0 to continue run-loop
+ * Returns 0 to exit run-loop and handle in user-space.
+ * Returns < 0 to report failure and exit run-loop
+ */
+int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst)
+{
+ u8 data8;
+ u16 data16;
+ u32 data32;
+ u64 data64;
+ ulong data;
+ unsigned long insn;
+ int len = 0, insn_len = 0;
+ struct kvm_cpu_trap utrap = { 0 };
+ struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
+
+ /* Determine trapped instruction */
+ if (htinst & 0x1) {
+ /*
+ * Bit[0] == 1 implies trapped instruction value is
+ * transformed instruction or custom instruction.
+ */
+ insn = htinst | INSN_16BIT_MASK;
+ insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
+ } else {
+ /*
+ * Bit[0] == 0 implies trapped instruction value is
+ * zero or special value.
+ */
+ insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
+ &utrap);
+ if (utrap.scause) {
+ /* Redirect trap if we failed to read instruction */
+ utrap.sepc = ct->sepc;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+ return 1;
+ }
+ insn_len = INSN_LEN(insn);
+ }
+
+ data = GET_RS2(insn, &vcpu->arch.guest_context);
+ data8 = data16 = data32 = data64 = data;
+
+ if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
+ len = 4;
+ } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
+ len = 1;
+#ifdef CONFIG_64BIT
+ } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
+ len = 8;
+#endif
+ } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
+ len = 2;
+#ifdef CONFIG_64BIT
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
+#endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* Fault address should be aligned to length of MMIO */
+ if (fault_addr & (len - 1))
+ return -EIO;
+
+ /* Save instruction decode info */
+ vcpu->arch.mmio_decode.insn = insn;
+ vcpu->arch.mmio_decode.insn_len = insn_len;
+ vcpu->arch.mmio_decode.shift = 0;
+ vcpu->arch.mmio_decode.len = len;
+ vcpu->arch.mmio_decode.return_handled = 0;
+
+ /* Copy data to kvm_run instance */
+ switch (len) {
+ case 1:
+ *((u8 *)run->mmio.data) = data8;
+ break;
+ case 2:
+ *((u16 *)run->mmio.data) = data16;
+ break;
+ case 4:
+ *((u32 *)run->mmio.data) = data32;
+ break;
+ case 8:
+ *((u64 *)run->mmio.data) = data64;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Update MMIO details in kvm_run struct */
+ run->mmio.is_write = true;
+ run->mmio.phys_addr = fault_addr;
+ run->mmio.len = len;
+
+ /* Try to handle MMIO access in the kernel */
+ if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ fault_addr, len, run->mmio.data)) {
+ /* Successfully handled MMIO access in the kernel so resume */
+ vcpu->stat.mmio_exit_kernel++;
+ kvm_riscv_vcpu_mmio_return(vcpu, run);
+ return 1;
+ }
+
+ /* Exit to userspace for MMIO emulation */
+ vcpu->stat.mmio_exit_user++;
+ run->exit_reason = KVM_EXIT_MMIO;
+
+ return 0;
+}
+
+/**
+ * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
+ * or in-kernel IO emulation
+ *
+ * @vcpu: The VCPU pointer
+ * @run: The VCPU run struct containing the mmio data
+ */
+int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u8 data8;
+ u16 data16;
+ u32 data32;
+ u64 data64;
+ ulong insn;
+ int len, shift;
+
+ if (vcpu->arch.mmio_decode.return_handled)
+ return 0;
+
+ vcpu->arch.mmio_decode.return_handled = 1;
+ insn = vcpu->arch.mmio_decode.insn;
+
+ if (run->mmio.is_write)
+ goto done;
+
+ len = vcpu->arch.mmio_decode.len;
+ shift = vcpu->arch.mmio_decode.shift;
+
+ switch (len) {
+ case 1:
+ data8 = *((u8 *)run->mmio.data);
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data8 << shift >> shift);
+ break;
+ case 2:
+ data16 = *((u16 *)run->mmio.data);
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data16 << shift >> shift);
+ break;
+ case 4:
+ data32 = *((u32 *)run->mmio.data);
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data32 << shift >> shift);
+ break;
+ case 8:
+ data64 = *((u64 *)run->mmio.data);
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data64 << shift >> shift);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+done:
+ /* Move to next instruction */
+ vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
+
+ return 0;
+}
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 5c4c37ff2d48..595043857049 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -214,12 +214,10 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
#endif
}
-int kvm_riscv_guest_timer_init(struct kvm *kvm)
+void kvm_riscv_guest_timer_init(struct kvm *kvm)
{
struct kvm_guest_timer *gt = &kvm->arch.timer;
riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
gt->time_delta = -get_cycles64();
-
- return 0;
}
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index 945a2bf5e3f6..65a964d7e70d 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -41,7 +41,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return r;
}
- return kvm_riscv_guest_timer_init(kvm);
+ kvm_riscv_guest_timer_init(kvm);
+
+ return 0;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index 9f764df125db..6cd93995fb65 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -97,7 +97,7 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
* We ran out of VMIDs so we increment vmid_version and
* start assigning VMIDs from 1.
*
- * This also means existing VMIDs assignement to all Guest
+ * This also means existing VMIDs assignment to all Guest
* instances is invalid and we have force VMID re-assignement
* for all Guest instances. The Guest instances that were not
* running will automatically pick-up new VMIDs because will
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 8c475f4da308..ec486e5369d9 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -175,7 +175,7 @@ ENTRY(__asm_copy_from_user)
/* Exception fixup code */
10:
/* Disable access to user memory */
- csrs CSR_STATUS, t6
+ csrc CSR_STATUS, t6
mv a0, t5
ret
ENDPROC(__asm_copy_to_user)
@@ -227,7 +227,7 @@ ENTRY(__clear_user)
/* Exception fixup code */
11:
/* Disable access to user memory */
- csrs CSR_STATUS, t6
+ csrc CSR_STATUS, t6
mv a0, a1
ret
ENDPROC(__clear_user)
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index a8dc0bd9078d..cd2225304c82 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -83,9 +83,13 @@ void riscv_init_cbom_blocksize(void)
u32 val;
for_each_of_cpu_node(node) {
- int hartid = riscv_of_processor_hartid(node);
+ unsigned long hartid;
int cbom_hartid;
+ ret = riscv_of_processor_hartid(node, &hartid);
+ if (ret)
+ continue;
+
if (hartid < 0)
continue;
@@ -99,7 +103,7 @@ void riscv_init_cbom_blocksize(void)
cbom_hartid = hartid;
} else {
if (riscv_cbom_block_size != val)
- pr_warn("cbom-block-size mismatched between harts %d and %d\n",
+ pr_warn("cbom-block-size mismatched between harts %d and %lu\n",
cbom_hartid, hartid);
}
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 40694f0cab9e..f2fbd1400b7c 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -326,6 +326,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index d466ec670e1f..a88b7dc31a68 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -288,6 +288,26 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
#endif /* CONFIG_XIP_KERNEL */
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 2a3715bf29fe..d926e0f7ef57 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -69,6 +69,7 @@ struct rv_jit_context {
struct bpf_prog *prog;
u16 *insns; /* RV insns */
int ninsns;
+ int body_len;
int epilogue_offset;
int *offset; /* BPF to RV */
int nexentries;
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index be743d700aa7..737baf8715da 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int prog_size = 0, extable_size = 0;
bool tmp_blinded = false, extra_pass = false;
struct bpf_prog *tmp, *orig_prog = prog;
- int pass = 0, prev_ninsns = 0, i;
+ int pass = 0, prev_ninsns = 0, prologue_len, i;
struct rv_jit_data *jit_data;
struct rv_jit_context *ctx;
@@ -95,6 +95,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog;
goto out_offset;
}
+ ctx->body_len = ctx->ninsns;
bpf_jit_build_prologue(ctx);
ctx->epilogue_offset = ctx->ninsns;
bpf_jit_build_epilogue(ctx);
@@ -161,6 +162,11 @@ skip_init_ctx:
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(jit_data->header);
+ prologue_len = ctx->epilogue_offset - ctx->body_len;
+ for (i = 0; i < prog->len; i++)
+ ctx->offset[i] = ninsns_rvoff(prologue_len +
+ ctx->offset[i]);
+ bpf_prog_fill_jited_linfo(prog, ctx->offset);
out_offset:
kfree(ctx->offset);
kfree(jit_data);