diff options
Diffstat (limited to 'arch/riscv/include')
151 files changed, 8154 insertions, 1703 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 504f8b7e72d4..bd5fc9403295 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -1,11 +1,18 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += syscall_table_32.h +syscall-y += syscall_table_64.h + generic-y += early_ioremap.h generic-y += flat.h +generic-y += fprobe.h generic-y += kvm_para.h +generic-y += mmzone.h +generic-y += mcs_spinlock.h generic-y += parport.h -generic-y += spinlock.h generic-y += spinlock_types.h +generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/acenv.h b/arch/riscv/include/asm/acenv.h new file mode 100644 index 000000000000..43ae2e32c779 --- /dev/null +++ b/arch/riscv/include/asm/acenv.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * RISC-V specific ACPICA environments and implementation + */ + +#ifndef _ASM_ACENV_H +#define _ASM_ACENV_H + +/* This header is required unconditionally by the ACPI core */ + +#endif /* _ASM_ACENV_H */ diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h new file mode 100644 index 000000000000..6e13695120bc --- /dev/null +++ b/arch/riscv/include/asm/acpi.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013-2014, Linaro Ltd. + * Author: Al Stone <al.stone@linaro.org> + * Author: Graeme Gregory <graeme.gregory@linaro.org> + * Author: Hanjun Guo <hanjun.guo@linaro.org> + * + * Copyright (C) 2021-2023, Ventana Micro Systems Inc. + * Author: Sunil V L <sunilvl@ventanamicro.com> + */ + +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + +/* Basic configuration for ACPI */ +#ifdef CONFIG_ACPI + +typedef u64 phys_cpuid_t; +#define PHYS_CPUID_INVALID INVALID_HARTID + +/* ACPI table mapping after acpi_permanent_mmap is set */ +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +#define acpi_os_ioremap acpi_os_ioremap + +#define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */ +extern int acpi_disabled; +extern int acpi_noirq; +extern int acpi_pci_disabled; + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = 0; + acpi_pci_disabled = 0; + acpi_noirq = 0; +} + +/* + * The ACPI processor driver for ACPI core code needs this macro + * to find out whether this cpu was already mapped (mapping from CPU hardware + * ID to CPU logical ID) or not. + */ +#define cpu_physical_id(cpu) cpuid_to_hartid_map(cpu) + +/* + * Since MADT must provide at least one RINTC structure, the + * CPU will be always available in MADT on RISC-V. + */ +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } + +void acpi_init_rintc_map(void); +struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); +static inline u32 get_acpi_id_for_cpu(int cpu) +{ + return acpi_cpu_get_madt_rintc(cpu)->uid; +} + +int acpi_get_riscv_isa(struct acpi_table_header *table, + unsigned int cpu, const char **isa); + +void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, + u32 *cboz_size, u32 *cbop_size); +#else +static inline void acpi_init_rintc_map(void) { } +static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) +{ + return NULL; +} + +static inline int acpi_get_riscv_isa(struct acpi_table_header *table, + unsigned int cpu, const char **isa) +{ + return -EINVAL; +} + +static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, + u32 *cbom_size, u32 *cboz_size, + u32 *cbop_size) { } + +#endif /* CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_NUMA +void acpi_map_cpus_to_nodes(void); +#else +static inline void acpi_map_cpus_to_nodes(void) { } +#endif /* CONFIG_ACPI_NUMA */ + +#endif /*_ASM_ACPI_H*/ diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index ec2f3f1b836f..231d777d936c 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -6,18 +6,18 @@ #ifdef __ASSEMBLY__ -.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len - RISCV_PTR \oldptr - RISCV_PTR \newptr - REG_ASM \vendor_id - REG_ASM \new_len - .word \errata_id +.macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len + .4byte \oldptr - . + .4byte \newptr - . + .2byte \vendor_id + .2byte \new_len + .4byte \patch_id .endm -.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg +.macro ALT_NEW_CONTENT vendor_id, patch_id, enable = 1, new_c .if \enable .pushsection .alternative, "a" - ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + ALT_ENTRY 886b, 888f, \vendor_id, \patch_id, 889f - 888f .popsection .subsection 1 888 : @@ -33,7 +33,7 @@ .endif .endm -.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, patch_id, enable 886 : .option push .option norvc @@ -41,50 +41,34 @@ \old_c .option pop 887 : - ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c + ALT_NEW_CONTENT \vendor_id, \patch_id, \enable, "\new_c" .endm -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) - -.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ - new_c_2, vendor_id_2, errata_id_2, enable_2 -886 : - .option push - .option norvc - .option norelax - \old_c - .option pop -887 : - ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 - ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 +.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \ + new_c_2, vendor_id_2, patch_id_2, enable_2 + ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \patch_id_1, \enable_1 + ALT_NEW_CONTENT \vendor_id_2, \patch_id_2, \enable_2, "\new_c_2" .endm -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \ - IS_ENABLED(CONFIG_k_1), \ - new_c_2, vendor_id_2, errata_id_2, \ - IS_ENABLED(CONFIG_k_2) +#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__ +#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__ #else /* !__ASSEMBLY__ */ #include <asm/asm.h> #include <linux/stringify.h> -#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ - RISCV_PTR " " oldptr "\n" \ - RISCV_PTR " " newptr "\n" \ - REG_ASM " " vendor_id "\n" \ - REG_ASM " " newlen "\n" \ - ".word " errata_id "\n" +#define ALT_ENTRY(oldptr, newptr, vendor_id, patch_id, newlen) \ + ".4byte ((" oldptr ") - .) \n" \ + ".4byte ((" newptr ") - .) \n" \ + ".2byte " vendor_id "\n" \ + ".2byte " newlen "\n" \ + ".4byte " patch_id "\n" -#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \ +#define ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) \ ".if " __stringify(enable) " == 1\n" \ ".pushsection .alternative, \"a\"\n" \ - ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(patch_id), "889f - 888f") \ ".popsection\n" \ ".subsection 1\n" \ "888 :\n" \ @@ -99,7 +83,7 @@ ".previous\n" \ ".endif\n" -#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, enable) \ "886 :\n" \ ".option push\n" \ ".option norvc\n" \ @@ -107,84 +91,60 @@ old_c "\n" \ ".option pop\n" \ "887 :\n" \ - ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) + ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) - -#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - enable_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - enable_2) \ - "886 :\n" \ - ".option push\n" \ - ".option norvc\n" \ - ".option norelax\n" \ - old_c "\n" \ - ".option pop\n" \ - "887 :\n" \ - ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \ - ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2) - -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - IS_ENABLED(CONFIG_k_1), \ - new_c_2, vendor_id_2, errata_id_2, \ - IS_ENABLED(CONFIG_k_2)) +#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \ + new_c_2, vendor_id_2, patch_id_2, enable_2) \ + __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \ + ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2) #endif /* __ASSEMBLY__ */ +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k)) + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_c_2, vendor_id_2, patch_id_2, CONFIG_k_2) \ + __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, IS_ENABLED(CONFIG_k_1), \ + new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2)) + #else /* CONFIG_RISCV_ALTERNATIVE */ #ifdef __ASSEMBLY__ -.macro __ALTERNATIVE_CFG old_c +.macro ALTERNATIVE_CFG old_c \old_c .endm -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG old_c - -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG old_c +#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c +#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c #else /* !__ASSEMBLY__ */ -#define __ALTERNATIVE_CFG(old_c) \ - old_c "\n" +#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n" +#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n" -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG(old_c) +#endif /* __ASSEMBLY__ */ -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG(old_c) +#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c) +#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c) -#endif /* __ASSEMBLY__ */ #endif /* CONFIG_RISCV_ALTERNATIVE */ /* * Usage: - * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) * in the assembly code. Otherwise, - * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * asm(ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k)); * * old_content: The old content which is probably replaced with new content. * new_content: The new content. * vendor_id: The CPU vendor ID. - * errata_id: The errata ID. - * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old - * content will alwyas be executed. + * patch_id: The patch ID (erratum ID or cpufeature ID). + * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old + * content will always be executed. */ -#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ - _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) +#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k) /* * A vendor wants to replace an old_content, but another vendor has used @@ -193,13 +153,9 @@ * on the following sample code and then replace ALTERNATIVE() with * ALTERNATIVE_2() to append its customized content. */ -#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \ - errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, \ - errata_id_2, CONFIG_k_2) \ - _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \ - errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, \ - errata_id_2, CONFIG_k_2) +#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) \ + _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) #endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h index 6511dd73e812..3c2b59b25017 100644 --- a/arch/riscv/include/asm/alternative.h +++ b/arch/riscv/include/asm/alternative.h @@ -6,8 +6,6 @@ #ifndef __ASM_ALTERNATIVE_H #define __ASM_ALTERNATIVE_H -#define ERRATA_STRING_LENGTH_MAX 32 - #include <asm/alternative-macros.h> #ifndef __ASSEMBLY__ @@ -15,31 +13,41 @@ #ifdef CONFIG_RISCV_ALTERNATIVE #include <linux/init.h> +#include <linux/kernel.h> #include <linux/types.h> #include <linux/stddef.h> #include <asm/hwcap.h> +#define PATCH_ID_CPUFEATURE_ID(p) lower_16_bits(p) +#define PATCH_ID_CPUFEATURE_VALUE(p) upper_16_bits(p) + #define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */ #define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */ #define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */ +/* add the relative offset to the address of the offset to get the absolute address */ +#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) +#define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset) +#define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset) + void __init apply_boot_alternatives(void); void __init apply_early_boot_alternatives(void); void apply_module_alternatives(void *start, size_t length); +void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len, + int patch_offset); + struct alt_entry { - void *old_ptr; /* address of original instruciton or data */ - void *alt_ptr; /* address of replacement instruction or data */ - unsigned long vendor_id; /* cpu vendor id */ - unsigned long alt_len; /* The replacement size */ - unsigned int errata_id; /* The errata id */ -} __packed; - -struct errata_checkfunc_id { - unsigned long vendor_id; - bool (*func)(struct alt_entry *alt); + s32 old_offset; /* offset relative to original instruction or data */ + s32 alt_offset; /* offset relative to replacement instruction or data */ + u16 vendor_id; /* CPU vendor ID */ + u16 alt_len; /* The replacement size */ + u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */ }; +void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h new file mode 100644 index 000000000000..0e7cdbbec8ef --- /dev/null +++ b/arch/riscv/include/asm/arch_hweight.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Based on arch/x86/include/asm/arch_hweight.h + */ + +#ifndef _ASM_RISCV_HWEIGHT_H +#define _ASM_RISCV_HWEIGHT_H + +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> + +#if (BITS_PER_LONG == 64) +#define CPOPW "cpopw " +#elif (BITS_PER_LONG == 32) +#define CPOPW "cpop " +#else +#error "Unexpected BITS_PER_LONG" +#endif + +static __always_inline unsigned int __arch_hweight32(unsigned int w) +{ +#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB) + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm (".option push\n" + ".option arch,+zbb\n" + CPOPW "%0, %1\n" + ".option pop\n" + : "=r" (w) : "r" (w) :); + + return w; + +legacy: +#endif + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} + +#if BITS_PER_LONG == 64 +static __always_inline unsigned long __arch_hweight64(__u64 w) +{ +#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB) + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm (".option push\n" + ".option arch,+zbb\n" + "cpop %0, %1\n" + ".option pop\n" + : "=r" (w) : "r" (w) :); + + return w; + +legacy: +#endif + return __sw_hweight64(w); +} +#else /* BITS_PER_LONG == 64 */ +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +} +#endif /* !(BITS_PER_LONG == 64) */ + +#endif /* _ASM_RISCV_HWEIGHT_H */ diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h new file mode 100644 index 000000000000..5345360adfb9 --- /dev/null +++ b/arch/riscv/include/asm/archrandom.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel interface for the RISCV arch_random_* functions + * + * Copyright (c) 2023 Rivos Inc. + * + */ + +#ifndef ASM_RISCV_ARCHRANDOM_H +#define ASM_RISCV_ARCHRANDOM_H + +#include <asm/csr.h> +#include <asm/processor.h> + +#define SEED_RETRY_LOOPS 100 + +static inline bool __must_check csr_seed_long(unsigned long *v) +{ + unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0; + const int needed_seeds = sizeof(long) / sizeof(u16); + u16 *entropy = (u16 *)v; + + do { + /* + * The SEED CSR must be accessed with a read-write instruction. + */ + unsigned long csr_seed = csr_swap(CSR_SEED, 0); + unsigned long opst = csr_seed & SEED_OPST_MASK; + + switch (opst) { + case SEED_OPST_ES16: + entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK; + if (valid_seeds == needed_seeds) + return true; + break; + + case SEED_OPST_DEAD: + pr_err_once("archrandom: Unrecoverable error\n"); + return false; + + case SEED_OPST_BIST: + case SEED_OPST_WAIT: + default: + cpu_relax(); + continue; + } + } while (--retry); + + return false; +} + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + if (!max_longs) + return 0; + + /* + * If Zkr is supported and csr_seed_long succeeds, we return one long + * worth of entropy. + */ + if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v)) + return 1; + + return 0; +} + +#endif /* ASM_RISCV_ARCHRANDOM_H */ diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h index 14be0673f5b5..0c8bfd54fc4e 100644 --- a/arch/riscv/include/asm/asm-extable.h +++ b/arch/riscv/include/asm/asm-extable.h @@ -6,6 +6,9 @@ #define EX_TYPE_FIXUP 1 #define EX_TYPE_BPF 2 #define EX_TYPE_UACCESS_ERR_ZERO 3 +#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 + +#ifdef CONFIG_MMU #ifdef __ASSEMBLY__ @@ -45,6 +48,11 @@ #define EX_DATA_REG_ZERO_SHIFT 5 #define EX_DATA_REG_ZERO GENMASK(9, 5) +#define EX_DATA_REG_DATA_SHIFT 0 +#define EX_DATA_REG_DATA GENMASK(4, 0) +#define EX_DATA_REG_ADDR_SHIFT 5 +#define EX_DATA_REG_ADDR GENMASK(9, 5) + #define EX_DATA_REG(reg, gpr) \ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" @@ -60,6 +68,19 @@ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) +#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \ + "(" \ + EX_DATA_REG(DATA, data) " | " \ + EX_DATA_REG(ADDR, addr) \ + ")") + #endif /* __ASSEMBLY__ */ +#else /* CONFIG_MMU */ + #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) +#endif /* CONFIG_MMU */ + #endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index ef386fcf3939..a9988bf21ec8 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -9,6 +9,33 @@ long long __lshrti3(long long a, int b); long long __ashrti3(long long a, int b); long long __ashlti3(long long a, int b); +#ifdef CONFIG_RISCV_ISA_V + +#ifdef CONFIG_MMU +asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n, bool enable_sum); +#endif /* CONFIG_MMU */ + +void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs); +asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs); +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + +#endif /* CONFIG_RISCV_ISA_V */ #define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) @@ -25,7 +52,10 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s); DECLARE_DO_ERROR_INFO(do_trap_ecall_m); DECLARE_DO_ERROR_INFO(do_trap_break); -asmlinkage unsigned long get_overflow_stack(void); +asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs); +asmlinkage void ret_from_fork_user(struct pt_regs *regs); asmlinkage void handle_bad_stack(struct pt_regs *regs); +asmlinkage void do_page_fault(struct pt_regs *regs); +asmlinkage void do_irq(struct pt_regs *regs); #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 618d7c5af1a2..a8a2af6dfe9d 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -23,9 +23,11 @@ #define REG_L __REG_SEL(ld, lw) #define REG_S __REG_SEL(sd, sw) #define REG_SC __REG_SEL(sc.d, sc.w) +#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq) #define REG_ASM __REG_SEL(.dword, .word) #define SZREG __REG_SEL(8, 4) #define LGREG __REG_SEL(3, 2) +#define SRLI __REG_SEL(srliw, srli) #if __SIZEOF_POINTER__ == 8 #ifdef __ASSEMBLY__ @@ -67,4 +69,131 @@ #error "Unexpected __SIZEOF_SHORT__" #endif +#ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> + +/* Common assembly source macros */ + +/* + * NOP sequence + */ +.macro nops, num + .rept \num + nop + .endr +.endm + +#ifdef CONFIG_SMP +#ifdef CONFIG_32BIT +#define PER_CPU_OFFSET_SHIFT 2 +#else +#define PER_CPU_OFFSET_SHIFT 3 +#endif + +.macro asm_per_cpu dst sym tmp + REG_L \tmp, TASK_TI_CPU_NUM(tp) + slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT + la \dst, __per_cpu_offset + add \dst, \dst, \tmp + REG_L \tmp, 0(\dst) + la \dst, \sym + add \dst, \dst, \tmp +.endm +#else /* CONFIG_SMP */ +.macro asm_per_cpu dst sym tmp + la \dst, \sym +.endm +#endif /* CONFIG_SMP */ + +.macro load_per_cpu dst ptr tmp + asm_per_cpu \dst \ptr \tmp + REG_L \dst, 0(\dst) +.endm + +#ifdef CONFIG_SHADOW_CALL_STACK +/* gp is used as the shadow call stack pointer instead */ +.macro load_global_pointer +.endm +#else +/* load __global_pointer to gp */ +.macro load_global_pointer +.option push +.option norelax + la gp, __global_pointer$ +.option pop +.endm +#endif /* CONFIG_SHADOW_CALL_STACK */ + + /* save all GPs except x1 ~ x5 */ + .macro save_from_x6_to_x31 + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + .endm + + /* restore all GPs except x1 ~ x5 */ + .macro restore_from_x6_to_x31 + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + .endm + +/* Annotate a function as being unsuitable for kprobes. */ +#ifdef CONFIG_KPROBES +#define ASM_NOKPROBE(name) \ + .pushsection "_kprobe_blacklist", "aw"; \ + RISCV_PTR name; \ + .popsection +#else +#define ASM_NOKPROBE(name) +#endif + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/assembler.h b/arch/riscv/include/asm/assembler.h new file mode 100644 index 000000000000..44b1457d3e95 --- /dev/null +++ b/arch/riscv/include/asm/assembler.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * + * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com> + */ + +#ifndef __ASSEMBLY__ +#error "Only include this from assembly code" +#endif + +#ifndef __ASM_ASSEMBLER_H +#define __ASM_ASSEMBLER_H + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/csr.h> + +/* + * suspend_restore_csrs - restore CSRs + */ + .macro suspend_restore_csrs + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) + csrw CSR_EPC, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0) + csrw CSR_STATUS, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0) + csrw CSR_TVAL, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0) + csrw CSR_CAUSE, t0 + .endm + +/* + * suspend_restore_regs - Restore registers (except A0 and T0-T6) + */ + .macro suspend_restore_regs + REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) + REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) + REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0) + REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0) + REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0) + REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0) + REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0) + REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0) + REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0) + REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0) + REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0) + REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0) + REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0) + REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0) + REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0) + REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0) + REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0) + REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0) + REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0) + REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0) + REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0) + REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0) + REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0) + .endm + +/* + * copy_page - copy 1 page (4KB) of data from source to destination + * @a0 - destination + * @a1 - source + */ + .macro copy_page a0, a1 + lui a2, 0x1 + add a2, a2, a0 +1 : + REG_L t0, 0(a1) + REG_L t1, SZREG(a1) + + REG_S t0, 0(a0) + REG_S t1, SZREG(a0) + + addi a0, a0, 2 * SZREG + addi a1, a1, 2 * SZREG + bne a2, a0, 1b + .endm + +#endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 0dfe9d857a76..5b96c2f61adb 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -17,7 +17,6 @@ #endif #include <asm/cmpxchg.h> -#include <asm/barrier.h> #define __atomic_acquire_fence() \ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") @@ -196,22 +195,28 @@ ATOMIC_OPS(xor, xor, i) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " beq %[p], %[u], 1f\n" \ + " add %[rc], %[p], %[a]\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : [a]"r" (_a), [u]"r" (_u) \ + : "memory"); \ +}) + /* This is required to provide a full barrier on success. */ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " beq %[p], %[u], 1f\n" - " add %[rc], %[p], %[a]\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : [a]"r" (a), [u]"r" (u) - : "memory"); + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w"); + return prev; } #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless @@ -222,149 +227,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " beq %[p], %[u], 1f\n" - " add %[rc], %[p], %[a]\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : [a]"r" (a), [u]"r" (u) - : "memory"); + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d"); + return prev; } #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif -/* - * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as - * {cmp,}xchg and the operations that return, so they need a full barrier. - */ -#define ATOMIC_OP(c_t, prefix, size) \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_relaxed(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_acquire(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_release(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_relaxed(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_acquire(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_release(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ -{ \ - return __cmpxchg(&(v->counter), o, n, size); \ -} - -#ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS() \ - ATOMIC_OP(int, , 4) -#else -#define ATOMIC_OPS() \ - ATOMIC_OP(int, , 4) \ - ATOMIC_OP(s64, 64, 8) -#endif - -ATOMIC_OPS() - -#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed -#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire -#define arch_atomic_xchg_release arch_atomic_xchg_release -#define arch_atomic_xchg arch_atomic_xchg -#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release -#define arch_atomic_cmpxchg arch_atomic_cmpxchg - -#undef ATOMIC_OPS -#undef ATOMIC_OP +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " bltz %[p], 1f\n" \ + " addi %[rc], %[p], 1\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " bltz %[p], 1f\n" - " addi %[rc], %[p], 1\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w"); + return !(prev < 0); } #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " bgtz %[p], 1f\n" \ + " addi %[rc], %[p], -1\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) + static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " bgtz %[p], 1f\n" - " addi %[rc], %[p], -1\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w"); + return !(prev > 0); } #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \ +({ \ + __asm__ __volatile__ ( \ + "0: lr." sfx " %[p], %[c]\n" \ + " addi %[rc], %[p], -1\n" \ + " bltz %[rc], 1f\n" \ + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \ + " bnez %[rc], 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \ + : \ + : "memory"); \ +}) + static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) { int prev, rc; - __asm__ __volatile__ ( - "0: lr.w %[p], %[c]\n" - " addi %[rc], %[p], -1\n" - " bltz %[rc], 1f\n" - " sc.w.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w"); + return prev - 1; } @@ -376,17 +318,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " bltz %[p], 1f\n" - " addi %[rc], %[p], 1\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d"); + return !(prev < 0); } @@ -397,17 +330,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " bgtz %[p], 1f\n" - " addi %[rc], %[p], -1\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d"); + return !(prev > 0); } @@ -418,17 +342,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) s64 prev; long rc; - __asm__ __volatile__ ( - "0: lr.d %[p], %[c]\n" - " addi %[rc], %[p], -1\n" - " bltz %[rc], 1f\n" - " sc.d.rl %[rc], %[rc], %[c]\n" - " bnez %[rc], 0b\n" - " fence rw, rw\n" - "1:\n" - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) - : - : "memory"); + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d"); + return prev - 1; } diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index d0e24aaa2aa0..b8c5726d86ac 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -11,36 +11,18 @@ #define _ASM_RISCV_BARRIER_H #ifndef __ASSEMBLY__ - -#define nop() __asm__ __volatile__ ("nop") - -#define RISCV_FENCE(p, s) \ - __asm__ __volatile__ ("fence " #p "," #s : : : "memory") +#include <asm/cmpxchg.h> +#include <asm/fence.h> /* These barriers need to enforce ordering on both devices or memory. */ -#define mb() RISCV_FENCE(iorw,iorw) -#define rmb() RISCV_FENCE(ir,ir) -#define wmb() RISCV_FENCE(ow,ow) +#define __mb() RISCV_FENCE(iorw, iorw) +#define __rmb() RISCV_FENCE(ir, ir) +#define __wmb() RISCV_FENCE(ow, ow) /* These barriers do not need to enforce ordering on devices, just memory. */ -#define __smp_mb() RISCV_FENCE(rw,rw) -#define __smp_rmb() RISCV_FENCE(r,r) -#define __smp_wmb() RISCV_FENCE(w,w) - -#define __smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw,w); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define __smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(r,rw); \ - ___p1; \ -}) +#define __smp_mb() RISCV_FENCE(rw, rw) +#define __smp_rmb() RISCV_FENCE(r, r) +#define __smp_wmb() RISCV_FENCE(w, w) /* * This is a very specific barrier: it's currently only used in two places in @@ -67,7 +49,36 @@ do { \ * instances the scheduler pairs this with an mb(), so nothing is necessary on * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw, w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r, rw); \ + ___p1; \ +}) + +#ifdef CONFIG_RISCV_ISA_ZAWRS +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif #include <asm-generic/barrier.h> diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 3540b690944b..d59310f74c2b 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -15,15 +15,175 @@ #include <asm/barrier.h> #include <asm/bitsperlong.h> +#if !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE) #include <asm-generic/bitops/__ffs.h> -#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/fls.h> + +#else +#define __HAVE_ARCH___FFS +#define __HAVE_ARCH___FLS +#define __HAVE_ARCH_FFS +#define __HAVE_ARCH_FLS + +#include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> + +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> + +#if (BITS_PER_LONG == 64) +#define CTZW "ctzw " +#define CLZW "clzw " +#elif (BITS_PER_LONG == 32) +#define CTZW "ctz " +#define CLZW "clz " +#else +#error "Unexpected BITS_PER_LONG" +#endif + +static __always_inline unsigned long variable__ffs(unsigned long word) +{ + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "ctz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return word; + +legacy: + return generic___ffs(word); +} + +/** + * __ffs - find first set bit in a long word + * @word: The word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __ffs(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)__builtin_ctzl(word) : \ + variable__ffs(word)) + +static __always_inline unsigned long variable__fls(unsigned long word) +{ + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "clz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return BITS_PER_LONG - 1 - word; + +legacy: + return generic___fls(word); +} + +/** + * __fls - find last set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __fls(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ + variable__fls(word)) + +static __always_inline int variable_ffs(int x) +{ + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + if (!x) + return 0; + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CTZW "%0, %1\n" + ".option pop\n" + : "=r" (x) : "r" (x) :); + + return x + 1; + +legacy: + return generic_ffs(x); +} + +/** + * ffs - find first set bit in a word + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs routines. + * + * ffs(value) returns 0 if value is 0 or the position of the first set bit if + * value is nonzero. The first (least significant) bit is at position 1. + */ +#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) + +static __always_inline int variable_fls(unsigned int x) +{ + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + if (!x) + return 0; + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CLZW "%0, %1\n" + ".option pop\n" + : "=r" (x) : "r" (x) :); + + return 32 - x; + +legacy: + return generic_fls(x); +} + +/** + * fls - find last set bit in a word + * @x: the word to search + * + * This is defined in a similar way as ffs, but returns the position of the most + * significant set bit. + * + * fls(value) returns 0 if value is 0 or the position of the last set bit if + * value is nonzero. The last (most significant) bit is at position 32. + */ +#define fls(x) \ +({ \ + typeof(x) x_ = (x); \ + __builtin_constant_p(x_) ? \ + ((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ + : \ + variable_fls(x_); \ +}) + +#endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE) */ + +#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ffs.h> -#include <asm-generic/bitops/hweight.h> +#include <asm/arch_hweight.h> + +#include <asm-generic/bitops/const_hweight.h> #if (BITS_PER_LONG == 64) #define __AMO(op) "amo" #op ".d" @@ -62,44 +222,44 @@ #define __NOT(x) (~(x)) /** - * test_and_set_bit - Set a bit and return its old value + * arch_test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * - * This operation may be reordered on other architectures than x86. + * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(or, __NOP, nr, addr); } /** - * test_and_clear_bit - Clear a bit and return its old value + * arch_test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * - * This operation can be reordered on other architectures other than x86. + * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(and, __NOT, nr, addr); } /** - * test_and_change_bit - Change a bit and return its old value + * arch_test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(xor, __NOP, nr, addr); } /** - * set_bit - Atomically set a bit in memory + * arch_set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -110,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(int nr, volatile unsigned long *addr) +static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr) { __op_bit(or, __NOP, nr, addr); } /** - * clear_bit - Clears a bit in memory + * arch_clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * @@ -124,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr) { __op_bit(and, __NOT, nr, addr); } /** - * change_bit - Toggle a bit in memory + * arch_change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * @@ -138,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile unsigned long *addr) +static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr) { __op_bit(xor, __NOP, nr, addr); } /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock( +static __always_inline int arch_test_and_set_bit_lock( unsigned long nr, volatile unsigned long *addr) { return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock( +static __always_inline void arch_clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { __op_bit_ord(and, __NOT, nr, addr, .rl); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -185,10 +345,22 @@ static inline void clear_bit_unlock( * non-atomic property here: it's a lot more instructions and we still have to * provide release semantics anyway. */ -static inline void __clear_bit_unlock( +static __always_inline void arch___clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { - clear_bit_unlock(nr, addr); + arch_clear_bit_unlock(nr, addr); +} + +static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) +{ + unsigned long res; + __asm__ __volatile__ ( + __AMO(xor) ".rl %0, %2, %1" + : "=r" (res), "+A" (*addr) + : "r" (__NOP(mask)) + : "memory"); + return (res & BIT(7)) != 0; } #undef __test_and_op_bit @@ -197,6 +369,9 @@ static inline void __clear_bit_unlock( #undef __NOT #undef __AMO +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> + #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> diff --git a/arch/riscv/include/asm/bugs.h b/arch/riscv/include/asm/bugs.h new file mode 100644 index 000000000000..17ca0a947730 --- /dev/null +++ b/arch/riscv/include/asm/bugs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Interface for managing mitigations for riscv vulnerabilities. + * + * Copyright (C) 2024 Rivos Inc. + */ + +#ifndef __ASM_BUGS_H +#define __ASM_BUGS_H + +/* Watch out, ordering is important here. */ +enum mitigation_state { + UNAFFECTED, + MITIGATED, + VULNERABLE, +}; + +void ghostwrite_set_vulnerable(void); +bool ghostwrite_enable_mitigation(void); +enum mitigation_state ghostwrite_get_state(void); + +#endif /* __ASM_BUGS_H */ diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index 9b58b104559e..570e9d8acad1 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -11,6 +11,11 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_KMALLOC_MINALIGN (8) +#endif + /* * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that * the flat loader aligns it accordingly. @@ -19,4 +24,17 @@ #define ARCH_SLAB_MINALIGN 16 #endif +#ifndef __ASSEMBLY__ + +extern int dma_cache_alignment; +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +#define dma_get_cache_alignment dma_get_cache_alignment +static inline int dma_get_cache_alignment(void) +{ + return dma_cache_alignment; +} +#endif + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_RISCV_CACHE_H */ diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 23ff70350992..6086b38d5427 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -13,22 +13,53 @@ static inline void local_flush_icache_all(void) asm volatile ("fence.i" ::: "memory"); } +static inline void local_flush_icache_range(unsigned long start, + unsigned long end) +{ + local_flush_icache_all(); +} + #define PG_dcache_clean PG_arch_1 -static inline void flush_dcache_page(struct page *page) +static inline void flush_dcache_folio(struct folio *folio) { - if (test_bit(PG_dcache_clean, &page->flags)) - clear_bit(PG_dcache_clean, &page->flags); + if (test_bit(PG_dcache_clean, &folio->flags)) + clear_bit(PG_dcache_clean, &folio->flags); } +#define flush_dcache_folio flush_dcache_folio #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -/* - * RISC-V doesn't have an instruction to flush parts of the instruction cache, - * so instead we just flush the whole thing. - */ -#define flush_icache_range(start, end) flush_icache_all() -#define flush_icache_user_page(vma, pg, addr, len) \ - flush_icache_mm(vma->vm_mm, 0) +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} + +#define flush_icache_user_page(vma, pg, addr, len) \ +do { \ + if (vma->vm_flags & VM_EXEC) \ + flush_icache_mm(vma->vm_mm, 0); \ +} while (0) + +#ifdef CONFIG_64BIT +extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; +extern char _end[]; +#define flush_cache_vmap flush_cache_vmap +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + if (is_vmalloc_or_module_addr((void *)start)) { + int i; + + /* + * We don't care if concurrently a cpu resets this value since + * the only place this can happen is in handle_exception() where + * an sfence.vma is emitted. + */ + for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i) + new_vmalloc[i] = -1ULL; + } +} +#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) +#endif #ifndef CONFIG_SMP @@ -43,6 +74,29 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ /* + * RISC-V doesn't have an instruction to flush parts of the instruction cache, + * so instead we just flush the whole thing. + */ +#define flush_icache_range flush_icache_range +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + flush_icache_all(); +} + +extern unsigned int riscv_cbom_block_size; +extern unsigned int riscv_cboz_block_size; +extern unsigned int riscv_cbop_block_size; +void riscv_init_cbo_blocksizes(void); + +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +void riscv_noncoherent_supported(void); +void __init riscv_set_dma_cache_alignment(void); +#else +static inline void riscv_noncoherent_supported(void) {} +static inline void riscv_set_dma_cache_alignment(void) {} +#endif + +/* * Bits in sys_riscv_flush_icache()'s flags argument. */ #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h new file mode 100644 index 000000000000..fb9696d7a3f2 --- /dev/null +++ b/arch/riscv/include/asm/cfi.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_CFI_H +#define _ASM_RISCV_CFI_H + +/* + * Clang Control Flow Integrity (CFI) support. + * + * Copyright (C) 2023 Google LLC + */ +#include <linux/bug.h> + +struct pt_regs; + +#ifdef CONFIG_CFI_CLANG +enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); +#define __bpfcall +static inline int cfi_get_offset(void) +{ + return 4; +} + +#define cfi_get_offset cfi_get_offset +extern u32 cfi_bpf_hash; +extern u32 cfi_bpf_subprog_hash; +extern u32 cfi_get_func_hash(void *func); +#else +static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_NONE; +} + +#define cfi_bpf_hash 0U +#define cfi_bpf_subprog_hash 0U +static inline u32 cfi_get_func_hash(void *func) +{ + return 0; +} +#endif /* CONFIG_CFI_CLANG */ + +#endif /* _ASM_RISCV_CFI_H */ diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h new file mode 100644 index 000000000000..da378856f1d5 --- /dev/null +++ b/arch/riscv/include/asm/checksum.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Checksum routines + * + * Copyright (C) 2023 Rivos Inc. + */ +#ifndef __ASM_RISCV_CHECKSUM_H +#define __ASM_RISCV_CHECKSUM_H + +#include <linux/in6.h> +#include <linux/uaccess.h> + +#define ip_fast_csum ip_fast_csum + +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + +/* Default version is sufficient for 32 bit */ +#ifndef CONFIG_32BIT +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); +#endif + +/* Define riscv versions of functions before importing asm-generic/checksum.h */ +#include <asm-generic/checksum.h> + +/** + * Quickly compute an IP checksum with the assumption that IPv4 headers will + * always be in multiples of 32-bits, and have an ihl of at least 5. + * + * @ihl: the number of 32 bit segments and must be greater than or equal to 5. + * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on + * riscv, defining IP headers to be aligned. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned long csum = 0; + int pos = 0; + + do { + csum += ((const unsigned int *)iph)[pos]; + if (IS_ENABLED(CONFIG_32BIT)) + csum += csum < ((const unsigned int *)iph)[pos]; + } while (++pos < ihl); + + /* + * ZBB only saves three instructions on 32-bit and five on 64-bit so not + * worth checking if supported without Alternatives. + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) { + unsigned long fold_temp; + + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + + if (IS_ENABLED(CONFIG_32BIT)) { + asm(".option push \n\ + .option arch,+zbb \n\ + not %[fold_temp], %[csum] \n\ + rori %[csum], %[csum], 16 \n\ + sub %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } else { + asm(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 32 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + srli %[csum], %[csum], 32 \n\ + not %[fold_temp], %[csum] \n\ + roriw %[csum], %[csum], 16 \n\ + subw %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } + return (__force __sum16)(csum >> 16); + } +no_zbb: +#ifndef CONFIG_32BIT + csum += ror64(csum, 32); + csum >>= 32; +#endif + return csum_fold((__force __wsum)csum); +} + +#endif /* __ASM_RISCV_CHECKSUM_H */ diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 12debce235e5..0b749e710216 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -8,143 +8,106 @@ #include <linux/bug.h> -#include <asm/barrier.h> +#include <asm/alternative-macros.h> #include <asm/fence.h> +#include <asm/hwcap.h> +#include <asm/insn-def.h> +#include <asm/cpufeature-macros.h> +#include <asm/processor.h> -#define __xchg_relaxed(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ +#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ + swap_append, r, p, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" swap_sfx " %0, %z2, %1\n" \ + swap_append \ + : "=&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + PREFETCHW_ASM(%5) \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z4\n" \ + " or %1, %1, %z3\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" (__newx), "rJ" (~__mask), "rJ" (__ptr32b) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) -#define arch_xchg_relaxed(ptr, x) \ +#define __arch_xchg(sfx, prepend, append, r, p, n) \ ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ - _x_, sizeof(*(ptr))); \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" sfx " %0, %2, %1\n" \ + append \ + : "=r" (r), "+A" (*(p)) \ + : "r" (n) \ + : "memory"); \ }) -#define __xchg_acquire(ptr, new, size) \ +#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ + sc_append, swap_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + __typeof__(*(__ptr)) __new = (new); \ + __typeof__(*(__ptr)) __ret; \ + \ + switch (sizeof(*__ptr)) { \ + case 1: \ + __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \ + prepend, sc_append, swap_append, \ + __ret, __ptr, __new); \ break; \ - case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + case 2: \ + __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \ + prepend, sc_append, swap_append, \ + __ret, __ptr, __new); \ break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) - -#define arch_xchg_acquire(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_acquire((ptr), \ - _x_, sizeof(*(ptr))); \ -}) - -#define __xchg_release(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ case 4: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.w %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + __arch_xchg(".w" swap_sfx, prepend, swap_append, \ + __ret, __ptr, __new); \ break; \ case 8: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.d %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ + __arch_xchg(".d" swap_sfx, prepend, swap_append, \ + __ret, __ptr, __new); \ break; \ default: \ BUILD_BUG(); \ } \ - __ret; \ + (__typeof__(*(__ptr)))__ret; \ }) -#define arch_xchg_release(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_release((ptr), \ - _x_, sizeof(*(ptr))); \ -}) +#define arch_xchg_relaxed(ptr, x) \ + _arch_xchg(ptr, x, "", "", "", "", "") -#define __xchg(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w.aqrl %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d.aqrl %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) +#define arch_xchg_acquire(ptr, x) \ + _arch_xchg(ptr, x, "", "", "", \ + RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) + +#define arch_xchg_release(ptr, x) \ + _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") #define arch_xchg(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ -}) + _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") #define xchg32(ptr, x) \ ({ \ @@ -163,190 +126,164 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ -#define __cmpxchg_relaxed(ptr, old, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) - -#define arch_cmpxchg_relaxed(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, o, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ + \ + __asm__ __volatile__ ( \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __oldx = (ulong)(o) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + sc_prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ + " and %1, %0, %z6\n" \ + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ + "rJ" (__mask), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) -#define __cmpxchg_acquire(ptr, old, new, size) \ +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, co, o, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ - case 4: \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ + \ __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - RISCV_ACQUIRE_BARRIER \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ : "memory"); \ - break; \ - case 8: \ + } else { \ + register unsigned int __rc; \ + \ __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ + sc_prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc" sc_sfx " %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - RISCV_ACQUIRE_BARRIER \ + sc_append \ "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ + : "rJ" (co o), "rJ" (n) \ : "memory"); \ - break; \ - default: \ - BUILD_BUG(); \ } \ - __ret; \ }) -#define arch_cmpxchg_acquire(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) - -#define __cmpxchg_release(ptr, old, new, size) \ +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ + __typeof__(*(__ptr)) __old = (old); \ + __typeof__(*(__ptr)) __new = (new); \ + __typeof__(*(__ptr)) __ret; \ + \ + switch (sizeof(*__ptr)) { \ + case 1: \ + __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ - case 8: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ + case 2: \ + __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ - default: \ - BUILD_BUG(); \ - } \ - __ret; \ -}) - -#define arch_cmpxchg_release(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) - -#define __cmpxchg(ptr, old, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - register unsigned int __rc; \ - switch (size) { \ case 4: \ - __asm__ __volatile__ ( \ - "0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ + __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, (long)(int)(long), __old, __new); \ break; \ case 8: \ - __asm__ __volatile__ ( \ - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ + __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ } \ - __ret; \ + (__typeof__(*(__ptr)))__ret; \ }) +/* + * These macros are here to improve the readability of the arch_cmpxchg_XXX() + * macros. + */ +#define SC_SFX(x) x +#define CAS_SFX(x) x +#define SC_PREPEND(x) x +#define SC_APPEND(x) x +#define CAS_PREPEND(x) x +#define CAS_APPEND(x) x + +#define arch_cmpxchg_relaxed(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(""), \ + CAS_PREPEND(""), CAS_APPEND("")) + +#define arch_cmpxchg_acquire(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) + +#define arch_cmpxchg_release(ptr, o, n) \ + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ + CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) + #define arch_cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(".rl"), CAS_SFX(".aqrl"), \ + SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_local(ptr, o, n) \ - (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) + arch_cmpxchg_relaxed((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) \ ({ \ @@ -360,4 +297,152 @@ arch_cmpxchg_relaxed((ptr), (o), (n)); \ }) +#define arch_cmpxchg64_relaxed(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ +}) + +#define arch_cmpxchg64_acquire(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_acquire((ptr), (o), (n)); \ +}) + +#define arch_cmpxchg64_release(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_release((ptr), (o), (n)); \ +}) + +#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS) + +#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS) + +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __arch_cmpxchg128(p, o, n, cas_sfx) \ +({ \ + __typeof__(*(p)) __o = (o); \ + union __u128_halves __hn = { .full = (n) }; \ + union __u128_halves __ho = { .full = (__o) }; \ + register unsigned long t1 asm ("t1") = __hn.low; \ + register unsigned long t2 asm ("t2") = __hn.high; \ + register unsigned long t3 asm ("t3") = __ho.low; \ + register unsigned long t4 asm ("t4") = __ho.high; \ + \ + __asm__ __volatile__ ( \ + " amocas.q" cas_sfx " %0, %z3, %2" \ + : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \ + : "rJ" (t1), "rJ" (t2) \ + : "memory"); \ + \ + ((u128)t4 << 64) | t3; \ +}) + +#define arch_cmpxchg128(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), ".aqrl") + +#define arch_cmpxchg128_local(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), "") + +#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */ + +#ifdef CONFIG_RISCV_ISA_ZAWRS +/* + * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to + * @val we expect it to still terminate within a "reasonable" amount of time + * for an implementation-specific other reason, a pending, locally-enabled + * interrupt, or because it has been configured to raise an illegal + * instruction exception. + */ +static __always_inline void __cmpwait(volatile void *ptr, + unsigned long val, + int size) +{ + unsigned long tmp; + + u32 *__ptr32b; + ulong __s, __val, __mask; + + asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", + 0, RISCV_ISA_EXT_ZAWRS, 1) + : : : : no_zawrs); + + switch (size) { + case 1: + __ptr32b = (u32 *)((ulong)(ptr) & ~0x3); + __s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE; + __val = val << __s; + __mask = 0xff << __s; + + asm volatile( + " lr.w %0, %1\n" + " and %0, %0, %3\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(__ptr32b)) + : "r" (__val), "r" (__mask) + : "memory"); + break; + case 2: + __ptr32b = (u32 *)((ulong)(ptr) & ~0x3); + __s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE; + __val = val << __s; + __mask = 0xffff << __s; + + asm volatile( + " lr.w %0, %1\n" + " and %0, %0, %3\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(__ptr32b)) + : "r" (__val), "r" (__mask) + : "memory"); + break; + case 4: + asm volatile( + " lr.w %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u32 *)ptr) + : "r" (val)); + break; +#if __riscv_xlen == 64 + case 8: + asm volatile( + " lr.d %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u64 *)ptr) + : "r" (val)); + break; +#endif + default: + BUILD_BUG(); + } + + return; + +no_zawrs: + asm volatile(RISCV_PAUSE : : : "memory"); +} + +#define __cmpwait_relaxed(ptr, val) \ + __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) +#endif + #endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 2ac955b51148..6081327e55f5 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -9,14 +9,32 @@ */ #include <linux/types.h> #include <linux/sched.h> -#include <linux/sched/task_stack.h> #include <asm-generic/compat.h> static inline int is_compat_task(void) { + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + return test_thread_flag(TIF_32BIT); } +static inline int is_compat_thread(struct thread_info *thread) +{ + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + + return test_ti_thread_flag(thread, TIF_32BIT); +} + +static inline void set_compat_task(bool is_compat) +{ + if (is_compat) + set_thread_flag(TIF_32BIT); + else + clear_thread_flag(TIF_32BIT); +} + struct compat_user_regs_struct { compat_ulong_t pc; compat_ulong_t ra; diff --git a/arch/riscv/include/asm/cpu.h b/arch/riscv/include/asm/cpu.h new file mode 100644 index 000000000000..28d45a6678ce --- /dev/null +++ b/arch/riscv/include/asm/cpu.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_CPU_H +#define _ASM_CPU_H + +/* This header is required unconditionally by the ACPI core */ + +#endif /* _ASM_CPU_H */ diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h index 134590f1b843..176b570ef982 100644 --- a/arch/riscv/include/asm/cpu_ops.h +++ b/arch/riscv/include/asm/cpu_ops.h @@ -13,32 +13,23 @@ /** * struct cpu_operations - Callback operations for hotplugging CPUs. * - * @name: Name of the boot protocol. - * @cpu_prepare: Early one-time preparation step for a cpu. If there - * is a mechanism for doing so, tests whether it is - * possible to boot the given HART. * @cpu_start: Boots a cpu into the kernel. - * @cpu_disable: Prepares a cpu to die. May fail for some - * mechanism-specific reason, which will cause the hot - * unplug to be aborted. Called from the cpu to be killed. * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from * the cpu being stopped. * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another * cpu. */ struct cpu_operations { - const char *name; - int (*cpu_prepare)(unsigned int cpu); int (*cpu_start)(unsigned int cpu, struct task_struct *tidle); #ifdef CONFIG_HOTPLUG_CPU - int (*cpu_disable)(unsigned int cpu); void (*cpu_stop)(void); int (*cpu_is_stopped)(unsigned int cpu); #endif }; -extern const struct cpu_operations *cpu_ops[NR_CPUS]; -void __init cpu_set_ops(int cpu); +extern const struct cpu_operations cpu_ops_spinwait; +extern const struct cpu_operations *cpu_ops; +void __init cpu_set_ops(void); #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h index 56e4b76d09ff..d6e4665b3195 100644 --- a/arch/riscv/include/asm/cpu_ops_sbi.h +++ b/arch/riscv/include/asm/cpu_ops_sbi.h @@ -10,6 +10,8 @@ #include <linux/sched.h> #include <linux/threads.h> +extern const struct cpu_operations cpu_ops_sbi; + /** * struct sbi_hart_boot_data - Hart specific boot used during booting and * cpu hotplug. diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h new file mode 100644 index 000000000000..a8103edbf51f --- /dev/null +++ b/arch/riscv/include/asm/cpufeature-macros.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022-2024 Rivos, Inc + */ + +#ifndef _ASM_CPUFEATURE_MACROS_H +#define _ASM_CPUFEATURE_MACROS_H + +#include <asm/hwcap.h> +#include <asm/alternative-macros.h> + +#define STANDARD_EXT 0 + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +static __always_inline bool riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +#endif /* _ASM_CPUFEATURE_MACROS_H */ diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h new file mode 100644 index 000000000000..fbd0e4306c93 --- /dev/null +++ b/arch/riscv/include/asm/cpufeature.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022-2024 Rivos, Inc + */ + +#ifndef _ASM_CPUFEATURE_H +#define _ASM_CPUFEATURE_H + +#include <linux/bitmap.h> +#include <linux/jump_label.h> +#include <linux/workqueue.h> +#include <linux/kconfig.h> +#include <linux/percpu-defs.h> +#include <linux/threads.h> +#include <asm/hwcap.h> +#include <asm/cpufeature-macros.h> + +/* + * These are probed via a device_initcall(), via either the SBI or directly + * from the corresponding CSRs. + */ +struct riscv_cpuinfo { + unsigned long mvendorid; + unsigned long marchid; + unsigned long mimpid; +}; + +struct riscv_isainfo { + DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); +}; + +DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); + +/* Per-cpu ISA extensions. */ +extern struct riscv_isainfo hart_isa[NR_CPUS]; + +extern u32 thead_vlenb_of; + +void __init riscv_user_isa_enable(void); + +#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ + .name = #_name, \ + .property = #_name, \ + .id = _id, \ + .subset_ext_ids = _subset_exts, \ + .subset_ext_size = _subset_exts_size, \ + .validate = _validate \ +} + +#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL) + +#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \ + _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate) + +/* Used to declare pure "lasso" extension (Zk for instance) */ +#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ + _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ + ARRAY_SIZE(_bundled_exts), NULL) +#define __RISCV_ISA_EXT_BUNDLE_VALIDATE(_name, _bundled_exts, _validate) \ + _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ + ARRAY_SIZE(_bundled_exts), _validate) + +/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ +#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL) +#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) + +bool __init check_unaligned_access_emulated_all_cpus(void); +void unaligned_access_init(void); +int cpu_online_unaligned_access_init(unsigned int cpu); +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) +void unaligned_emulation_finish(void); +bool unaligned_ctl_available(void); +#else +static inline bool unaligned_ctl_available(void) +{ + return false; +} +#endif + +#if defined(CONFIG_RISCV_MISALIGNED) +DECLARE_PER_CPU(long, misaligned_access_speed); +bool misaligned_traps_can_delegate(void); +#else +static inline bool misaligned_traps_can_delegate(void) +{ + return false; +} +#endif + +bool __init check_vector_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_VECTOR_MISALIGNED) +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused); +DECLARE_PER_CPU(long, vector_misaligned_access); +#endif + +#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) +DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key); + +static __always_inline bool has_fast_unaligned_accesses(void) +{ + return static_branch_likely(&fast_unaligned_access_speed_key); +} +#else +static __always_inline bool has_fast_unaligned_accesses(void) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) + return true; + else + return false; +} +#endif + +unsigned long riscv_get_elf_hwcap(void); + +struct riscv_isa_ext_data { + const unsigned int id; + const char *name; + const char *property; + const unsigned int *subset_ext_ids; + const unsigned int subset_ext_size; + int (*validate)(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap); +}; + +extern const struct riscv_isa_ext_data riscv_isa_ext[]; +extern const size_t riscv_isa_ext_count; +extern bool riscv_isa_fallback; + +unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_likely(STANDARD_EXT, ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} + +static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_unlikely(STANDARD_EXT, ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} + +#endif diff --git a/arch/riscv/include/asm/crash_reserve.h b/arch/riscv/include/asm/crash_reserve.h new file mode 100644 index 000000000000..013962e63587 --- /dev/null +++ b/arch/riscv/include/asm/crash_reserve.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _RISCV_CRASH_RESERVE_H +#define _RISCV_CRASH_RESERVE_H + +#define CRASH_ALIGN PMD_SIZE + +#define CRASH_ADDR_LOW_MAX dma32_phys_limit +#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM() + +extern phys_addr_t memblock_end_of_DRAM(void); +#endif diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 6d85655e7edf..6fed42e37705 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -7,7 +7,7 @@ #define _ASM_RISCV_CSR_H #include <asm/asm.h> -#include <linux/const.h> +#include <linux/bits.h> /* Status register flags */ #define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ @@ -24,29 +24,43 @@ #define SR_FS_CLEAN _AC(0x00004000, UL) #define SR_FS_DIRTY _AC(0x00006000, UL) +#define SR_VS _AC(0x00000600, UL) /* Vector Status */ +#define SR_VS_OFF _AC(0x00000000, UL) +#define SR_VS_INITIAL _AC(0x00000200, UL) +#define SR_VS_CLEAN _AC(0x00000400, UL) +#define SR_VS_DIRTY _AC(0x00000600, UL) + +#define SR_VS_THEAD _AC(0x01800000, UL) /* xtheadvector Status */ +#define SR_VS_OFF_THEAD _AC(0x00000000, UL) +#define SR_VS_INITIAL_THEAD _AC(0x00800000, UL) +#define SR_VS_CLEAN_THEAD _AC(0x01000000, UL) +#define SR_VS_DIRTY_THEAD _AC(0x01800000, UL) + #define SR_XS _AC(0x00018000, UL) /* Extension Status */ #define SR_XS_OFF _AC(0x00000000, UL) #define SR_XS_INITIAL _AC(0x00008000, UL) #define SR_XS_CLEAN _AC(0x00010000, UL) #define SR_XS_DIRTY _AC(0x00018000, UL) +#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */ + #ifndef CONFIG_64BIT -#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */ +#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */ #else -#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */ +#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */ #endif #ifdef CONFIG_64BIT #define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */ #define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */ #define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */ -#define SR_UXL_SHIFT 32 #endif /* SATP flags */ #ifndef CONFIG_64BIT #define SATP_PPN _AC(0x003FFFFF, UL) #define SATP_MODE_32 _AC(0x80000000, UL) +#define SATP_MODE_SHIFT 31 #define SATP_ASID_BITS 9 #define SATP_ASID_SHIFT 22 #define SATP_ASID_MASK _AC(0x1FF, UL) @@ -55,6 +69,7 @@ #define SATP_MODE_39 _AC(0x8000000000000000, UL) #define SATP_MODE_48 _AC(0x9000000000000000, UL) #define SATP_MODE_57 _AC(0xa000000000000000, UL) +#define SATP_MODE_SHIFT 60 #define SATP_ASID_BITS 16 #define SATP_ASID_SHIFT 44 #define SATP_ASID_MASK _AC(0xFFFF, UL) @@ -73,14 +88,19 @@ #define IRQ_S_EXT 9 #define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 #define IRQ_PMU_OVF 13 +#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1) +#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0) /* Exception causes */ #define EXC_INST_MISALIGNED 0 #define EXC_INST_ACCESS 1 #define EXC_INST_ILLEGAL 2 #define EXC_BREAKPOINT 3 +#define EXC_LOAD_MISALIGNED 4 #define EXC_LOAD_ACCESS 5 +#define EXC_STORE_MISALIGNED 6 #define EXC_STORE_ACCESS 7 #define EXC_SYSCALL 8 #define EXC_HYPERVISOR_SYSCALL 9 @@ -105,6 +125,10 @@ /* HSTATUS flags */ #ifdef CONFIG_64BIT +#define HSTATUS_HUPMM _AC(0x3000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_0 _AC(0x0000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_7 _AC(0x2000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_16 _AC(0x3000000000000, UL) #define HSTATUS_VSXL _AC(0x300000000, UL) #define HSTATUS_VSXL_SHIFT 32 #endif @@ -128,25 +152,25 @@ #define HGATP32_MODE_SHIFT 31 #define HGATP32_VMID_SHIFT 22 -#define HGATP32_VMID_MASK _AC(0x1FC00000, UL) -#define HGATP32_PPN _AC(0x003FFFFF, UL) +#define HGATP32_VMID GENMASK(28, 22) +#define HGATP32_PPN GENMASK(21, 0) #define HGATP64_MODE_SHIFT 60 #define HGATP64_VMID_SHIFT 44 -#define HGATP64_VMID_MASK _AC(0x03FFF00000000000, UL) -#define HGATP64_PPN _AC(0x00000FFFFFFFFFFF, UL) +#define HGATP64_VMID GENMASK(57, 44) +#define HGATP64_PPN GENMASK(43, 0) #define HGATP_PAGE_SHIFT 12 #ifdef CONFIG_64BIT #define HGATP_PPN HGATP64_PPN #define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT -#define HGATP_VMID_MASK HGATP64_VMID_MASK +#define HGATP_VMID HGATP64_VMID #define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT #else #define HGATP_PPN HGATP32_PPN #define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT -#define HGATP_VMID_MASK HGATP32_VMID_MASK +#define HGATP_VMID HGATP32_VMID #define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT #endif @@ -154,7 +178,64 @@ #define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT) #define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \ (_AC(1, UL) << IRQ_S_TIMER) | \ - (_AC(1, UL) << IRQ_S_EXT)) + (_AC(1, UL) << IRQ_S_EXT) | \ + (_AC(1, UL) << IRQ_PMU_OVF)) + +/* AIA CSR bits */ +#define TOPI_IID_SHIFT 16 +#define TOPI_IID_MASK GENMASK(11, 0) +#define TOPI_IPRIO_MASK GENMASK(7, 0) +#define TOPI_IPRIO_BITS 8 + +#define TOPEI_ID_SHIFT 16 +#define TOPEI_ID_MASK GENMASK(10, 0) +#define TOPEI_PRIO_MASK GENMASK(10, 0) + +#define ISELECT_IPRIO0 0x30 +#define ISELECT_IPRIO15 0x3f +#define ISELECT_MASK GENMASK(8, 0) + +#define HVICTL_VTI BIT(30) +#define HVICTL_IID GENMASK(27, 16) +#define HVICTL_IID_SHIFT 16 +#define HVICTL_DPR BIT(9) +#define HVICTL_IPRIOM BIT(8) +#define HVICTL_IPRIO GENMASK(7, 0) + +/* xENVCFG flags */ +#define ENVCFG_STCE (_AC(1, ULL) << 63) +#define ENVCFG_PBMTE (_AC(1, ULL) << 62) +#define ENVCFG_ADUE (_AC(1, ULL) << 61) +#define ENVCFG_PMM (_AC(0x3, ULL) << 32) +#define ENVCFG_PMM_PMLEN_0 (_AC(0x0, ULL) << 32) +#define ENVCFG_PMM_PMLEN_7 (_AC(0x2, ULL) << 32) +#define ENVCFG_PMM_PMLEN_16 (_AC(0x3, ULL) << 32) +#define ENVCFG_CBZE (_AC(1, UL) << 7) +#define ENVCFG_CBCFE (_AC(1, UL) << 6) +#define ENVCFG_CBIE_SHIFT 4 +#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT) +#define ENVCFG_CBIE_ILL _AC(0x0, UL) +#define ENVCFG_CBIE_FLUSH _AC(0x1, UL) +#define ENVCFG_CBIE_INV _AC(0x3, UL) +#define ENVCFG_FIOM _AC(0x1, UL) + +/* Smstateen bits */ +#define SMSTATEEN0_AIA_IMSIC_SHIFT 58 +#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT) +#define SMSTATEEN0_AIA_SHIFT 59 +#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT) +#define SMSTATEEN0_AIA_ISEL_SHIFT 60 +#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT) +#define SMSTATEEN0_HSENVCFG_SHIFT 62 +#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT) +#define SMSTATEEN0_SSTATEEN0_SHIFT 63 +#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) + +/* mseccfg bits */ +#define MSECCFG_PMM ENVCFG_PMM +#define MSECCFG_PMM_PMLEN_0 ENVCFG_PMM_PMLEN_0 +#define MSECCFG_PMM_PMLEN_7 ENVCFG_PMM_PMLEN_7 +#define MSECCFG_PMM_PMLEN_16 ENVCFG_PMM_PMLEN_16 /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 @@ -222,12 +303,14 @@ #define CSR_HPMCOUNTER30H 0xc9e #define CSR_HPMCOUNTER31H 0xc9f -#define CSR_SSCOUNTOVF 0xda0 +#define CSR_SCOUNTOVF 0xda0 #define CSR_SSTATUS 0x100 #define CSR_SIE 0x104 #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSTATEEN0 0x10c #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 #define CSR_SCAUSE 0x142 @@ -235,6 +318,30 @@ #define CSR_SIP 0x144 #define CSR_SATP 0x180 +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D + +/* xtheadvector symbolic CSR names */ +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa + +/* xtheadvector CSR masks */ +#define CSR_VXRM_MASK 3 +#define CSR_VXRM_SHIFT 1 +#define CSR_VXSAT_MASK 1 + +/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_SISELECT 0x150 +#define CSR_SIREG 0x151 + +/* Supervisor-Level Interrupts (AIA) */ +#define CSR_STOPEI 0x15c +#define CSR_STOPI 0xdb0 + +/* Supervisor-Level High-Half CSRs (AIA) */ +#define CSR_SIEH 0x114 +#define CSR_SIPH 0x154 + #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 #define CSR_VSTVEC 0x205 @@ -244,6 +351,8 @@ #define CSR_VSTVAL 0x243 #define CSR_VSIP 0x244 #define CSR_VSATP 0x280 +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 @@ -252,7 +361,9 @@ #define CSR_HTIMEDELTA 0x605 #define CSR_HCOUNTEREN 0x606 #define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a #define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a #define CSR_HTVAL 0x643 #define CSR_HIP 0x644 #define CSR_HVIP 0x645 @@ -260,10 +371,40 @@ #define CSR_HGATP 0x680 #define CSR_HGEIP 0xe12 +/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ +#define CSR_HVIEN 0x608 +#define CSR_HVICTL 0x609 +#define CSR_HVIPRIO1 0x646 +#define CSR_HVIPRIO2 0x647 + +/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */ +#define CSR_VSISELECT 0x250 +#define CSR_VSIREG 0x251 + +/* VS-Level Interrupts (H-extension with AIA) */ +#define CSR_VSTOPEI 0x25c +#define CSR_VSTOPI 0xeb0 + +/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ +#define CSR_HIDELEGH 0x613 +#define CSR_HVIENH 0x618 +#define CSR_HVIPH 0x655 +#define CSR_HVIPRIO1H 0x656 +#define CSR_HVIPRIO2H 0x657 +#define CSR_VSIEH 0x214 +#define CSR_VSIPH 0x254 + +/* Hypervisor stateen CSRs */ +#define CSR_HSTATEEN0 0x60c +#define CSR_HSTATEEN0H 0x61c + #define CSR_MSTATUS 0x300 #define CSR_MISA 0x301 +#define CSR_MIDELEG 0x303 #define CSR_MIE 0x304 #define CSR_MTVEC 0x305 +#define CSR_MENVCFG 0x30a +#define CSR_MENVCFGH 0x31a #define CSR_MSCRATCH 0x340 #define CSR_MEPC 0x341 #define CSR_MCAUSE 0x342 @@ -271,21 +412,65 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MSECCFG 0x747 +#define CSR_MSECCFGH 0x757 #define CSR_MVENDORID 0xf11 #define CSR_MARCHID 0xf12 #define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 +/* Machine-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_MISELECT 0x350 +#define CSR_MIREG 0x351 + +/* Machine-Level Interrupts (AIA) */ +#define CSR_MTOPEI 0x35c +#define CSR_MTOPI 0xfb0 + +/* Virtual Interrupts for Supervisor Level (AIA) */ +#define CSR_MVIEN 0x308 +#define CSR_MVIP 0x309 + +/* Machine-Level High-Half CSRs (AIA) */ +#define CSR_MIDELEGH 0x313 +#define CSR_MIEH 0x314 +#define CSR_MVIENH 0x318 +#define CSR_MVIPH 0x319 +#define CSR_MIPH 0x354 + +#define CSR_VSTART 0x8 +#define CSR_VCSR 0xf +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 + +/* Scalar Crypto Extension - Entropy */ +#define CSR_SEED 0x015 +#define SEED_OPST_MASK _AC(0xC0000000, UL) +#define SEED_OPST_BIST _AC(0x00000000, UL) +#define SEED_OPST_WAIT _AC(0x40000000, UL) +#define SEED_OPST_ES16 _AC(0x80000000, UL) +#define SEED_OPST_DEAD _AC(0xC0000000, UL) +#define SEED_ENTROPY_MASK _AC(0xFFFF, UL) + #ifdef CONFIG_RISCV_M_MODE # define CSR_STATUS CSR_MSTATUS # define CSR_IE CSR_MIE # define CSR_TVEC CSR_MTVEC +# define CSR_ENVCFG CSR_MENVCFG # define CSR_SCRATCH CSR_MSCRATCH # define CSR_EPC CSR_MEPC # define CSR_CAUSE CSR_MCAUSE # define CSR_TVAL CSR_MTVAL # define CSR_IP CSR_MIP +# define CSR_IEH CSR_MIEH +# define CSR_ISELECT CSR_MISELECT +# define CSR_IREG CSR_MIREG +# define CSR_IPH CSR_MIPH +# define CSR_TOPEI CSR_MTOPEI +# define CSR_TOPI CSR_MTOPI + # define SR_IE SR_MIE # define SR_PIE SR_MPIE # define SR_PP SR_MPP @@ -297,12 +482,20 @@ # define CSR_STATUS CSR_SSTATUS # define CSR_IE CSR_SIE # define CSR_TVEC CSR_STVEC +# define CSR_ENVCFG CSR_SENVCFG # define CSR_SCRATCH CSR_SSCRATCH # define CSR_EPC CSR_SEPC # define CSR_CAUSE CSR_SCAUSE # define CSR_TVAL CSR_STVAL # define CSR_IP CSR_SIP +# define CSR_IEH CSR_SIEH +# define CSR_ISELECT CSR_SISELECT +# define CSR_IREG CSR_SIREG +# define CSR_IPH CSR_SIPH +# define CSR_TOPEI CSR_STOPEI +# define CSR_TOPI CSR_STOPI + # define SR_IE SR_SIE # define SR_PIE SR_SPIE # define SR_PP SR_SPP diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h new file mode 100644 index 000000000000..312cfa0858fb --- /dev/null +++ b/arch/riscv/include/asm/dma-noncoherent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Renesas Electronics Corp. + */ + +#ifndef __ASM_DMA_NONCOHERENT_H +#define __ASM_DMA_NONCOHERENT_H + +#include <linux/dma-direct.h> + +/* + * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers + * + * @wback: Function pointer for cache writeback + * @inv: Function pointer for invalidating cache + * @wback_inv: Function pointer for flushing the cache (writeback + invalidating) + */ +struct riscv_nonstd_cache_ops { + void (*wback)(phys_addr_t paddr, size_t size); + void (*inv)(phys_addr_t paddr, size_t size); + void (*wback_inv)(phys_addr_t paddr, size_t size); +}; + +extern struct riscv_nonstd_cache_ops noncoherent_cache_ops; + +void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops); + +#endif /* __ASM_DMA_NONCOHERENT_H */ diff --git a/arch/riscv/include/asm/dmi.h b/arch/riscv/include/asm/dmi.h new file mode 100644 index 000000000000..ca7cce557ef7 --- /dev/null +++ b/arch/riscv/include/asm/dmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Intel Corporation + * + * based on arch/arm64/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __ASM_DMI_H +#define __ASM_DMI_H + +#include <linux/io.h> +#include <linux/slab.h> + +#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB) +#define dmi_early_unmap(x, l) memunmap(x) +#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB) +#define dmi_unmap(x) memunmap(x) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index f74879a8f1ea..46a355913b27 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -10,6 +10,7 @@ #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/tlbflush.h> +#include <asm/pgalloc.h> #ifdef CONFIG_EFI extern void efi_init(void); @@ -18,10 +19,7 @@ extern void efi_init(void); #endif int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); -int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); - -#define arch_efi_call_virt_setup() efi_virtmap_load() -#define arch_efi_call_virt_teardown() efi_virtmap_unload() +int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool); #define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE) @@ -31,13 +29,22 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) return ULONG_MAX; } -#define alloc_screen_info(x...) (&screen_info) - -static inline void free_screen_info(struct screen_info *si) +static inline unsigned long efi_get_kimg_min_align(void) { + /* + * RISC-V requires the kernel image to placed 2 MB aligned base for 64 + * bit and 4MB for 32 bit. + */ + return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M; } -void efi_virtmap_load(void); -void efi_virtmap_unload(void); +#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align() + +void arch_efi_call_virt_setup(void); +void arch_efi_call_virt_teardown(void); + +unsigned long stext_offset(void); + +void efi_icache_sync(unsigned long start, unsigned long end); #endif /* _ASM_EFI_H */ diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 14fc7342490b..c7aea7886d22 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -14,6 +14,7 @@ #include <asm/auxvec.h> #include <asm/byteorder.h> #include <asm/cacheinfo.h> +#include <asm/cpufeature.h> /* * These are used to set parameters in the core dumps. @@ -40,6 +41,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #define compat_elf_check_arch compat_elf_check_arch #define CORE_DUMP_USE_REGSET +#define ELF_FDPIC_CORE_EFLAGS 0 #define ELF_EXEC_PAGESIZE (PAGE_SIZE) /* @@ -48,25 +50,29 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); * the loader. We need to make sure that it is out of the way of the program * that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2) +#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2) #ifdef CONFIG_64BIT -#ifdef CONFIG_COMPAT -#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ +#define STACK_RND_MASK (is_compat_task() ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) -#else -#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) -#endif #endif + /* - * This yields a mask that user programs can use to figure out what - * instruction set this CPU supports. This could be done in user space, - * but it's not easy, and we've already done it here. + * Provides information on the availiable set of ISA extensions to userspace, + * via a bitmap that coorespends to each single-letter ISA extension. This is + * essentially defunct, but will remain for compatibility with userspace. */ -#define ELF_HWCAP (elf_hwcap) +#define ELF_HWCAP riscv_get_elf_hwcap() extern unsigned long elf_hwcap; +#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \ + do { \ + (_r)->a1 = _exec_map_addr; \ + (_r)->a2 = _interp_map_addr; \ + (_r)->a3 = dynamic_addr; \ + } while (0) + /* * This yields a string that ld.so will use to load implementation * specific libraries for optimization. This is more specific in @@ -76,7 +82,6 @@ extern unsigned long elf_hwcap; #define COMPAT_ELF_PLATFORM (NULL) -#ifdef CONFIG_MMU #define ARCH_DLINFO \ do { \ /* \ @@ -99,7 +104,22 @@ do { \ get_cache_size(2, CACHE_TYPE_UNIFIED)); \ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \ get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L3_CACHESIZE, \ + get_cache_size(3, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \ + get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \ + /* \ + * Should always be nonzero unless there's a kernel bug. \ + * If we haven't determined a sensible value to give to \ + * userspace, omit the entry: \ + */ \ + if (likely(signal_minsigstksz)) \ + NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \ + else \ + NEW_AUX_ENT(AT_IGNORE, 0); \ } while (0) + +#ifdef CONFIG_MMU #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, @@ -115,10 +135,7 @@ do { \ #ifdef CONFIG_COMPAT #define SET_PERSONALITY(ex) \ -do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - set_thread_flag(TIF_32BIT); \ - else \ - clear_thread_flag(TIF_32BIT); \ +do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ (current->personality & (~PER_MASK))); \ diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h new file mode 100644 index 000000000000..b28ccc6cdeea --- /dev/null +++ b/arch/riscv/include/asm/entry-common.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_ENTRY_COMMON_H +#define _ASM_RISCV_ENTRY_COMMON_H + +#include <asm/stacktrace.h> +#include <asm/thread_info.h> +#include <asm/vector.h> + +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) { + clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE); + /* + * We are already called with irq disabled, so go without + * keeping track of riscv_v_flags. + */ + riscv_v_vstate_restore(¤t->thread.vstate, regs); + } +} + +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare + +void handle_page_fault(struct pt_regs *regs); +void handle_break(struct pt_regs *regs); + +#ifdef CONFIG_RISCV_MISALIGNED +int handle_misaligned_load(struct pt_regs *regs); +int handle_misaligned_store(struct pt_regs *regs); +#else +static inline int handle_misaligned_load(struct pt_regs *regs) +{ + return -1; +} + +static inline int handle_misaligned_store(struct pt_regs *regs) +{ + return -1; +} +#endif + +#endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 416ead0f9a65..6e426ed7919a 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -6,8 +6,16 @@ #define ASM_ERRATA_LIST_H #include <asm/alternative.h> +#include <asm/csr.h> +#include <asm/insn-def.h> +#include <asm/hwcap.h> #include <asm/vendorid_list.h> +#ifdef CONFIG_ERRATA_ANDES +#define ERRATA_ANDES_NO_IOCP 0 +#define ERRATA_ANDES_NUMBER 1 +#endif + #ifdef CONFIG_ERRATA_SIFIVE #define ERRATA_SIFIVE_CIP_453 0 #define ERRATA_SIFIVE_CIP_1200 1 @@ -15,13 +23,12 @@ #endif #ifdef CONFIG_ERRATA_THEAD -#define ERRATA_THEAD_PBMT 0 -#define ERRATA_THEAD_NUMBER 1 +#define ERRATA_THEAD_MAE 0 +#define ERRATA_THEAD_PMU 1 +#define ERRATA_THEAD_GHOSTWRITE 2 +#define ERRATA_THEAD_NUMBER 3 #endif -#define CPUFEATURE_SVPBMT 0 -#define CPUFEATURE_NUMBER 1 - #ifdef __ASSEMBLY__ #define ALT_INSN_FAULT(x) \ @@ -37,30 +44,40 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ CONFIG_ERRATA_SIFIVE_CIP_453) #else /* !__ASSEMBLY__ */ -#define ALT_FLUSH_TLB_PAGE(x) \ +#define ALT_SFENCE_VMA_ASID(asid) \ +asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (asid) : "memory") + +#define ALT_SFENCE_VMA_ADDR(addr) \ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ : : "r" (addr) : "memory") +#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid) \ +asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr), "r" (asid) : "memory") + /* * _val is marked as "will be overwritten", so need to set it to 0 * in the default case. */ #define ALT_SVPBMT_SHIFT 61 -#define ALT_THEAD_PBMT_SHIFT 59 +#define ALT_THEAD_MAE_SHIFT 59 #define ALT_SVPBMT(_val, prot) \ asm(ALTERNATIVE_2("li %0, 0\t\nnop", \ "li %0, %1\t\nslli %0,%0,%3", 0, \ - CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \ + RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \ "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \ - ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \ + ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \ : "=r"(_val) \ : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \ - "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \ + "I"(prot##_THEAD >> ALT_THEAD_MAE_SHIFT), \ "I"(ALT_SVPBMT_SHIFT), \ - "I"(ALT_THEAD_PBMT_SHIFT)) + "I"(ALT_THEAD_MAE_SHIFT)) -#ifdef CONFIG_ERRATA_THEAD_PBMT +#ifdef CONFIG_ERRATA_THEAD_MAE /* * IO/NOCACHE memory types are handled together with svpbmt, * so on T-Head chips, check if no other memory type is set, @@ -68,13 +85,7 @@ asm(ALTERNATIVE_2("li %0, 0\t\nnop", \ */ #define ALT_THEAD_PMA(_val) \ asm volatile(ALTERNATIVE( \ - "nop\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "nop", \ + __nops(7), \ "li t3, %1\n\t" \ "slli t3, t3, %3\n\t" \ "and t3, %0, t3\n\t" \ @@ -83,16 +94,35 @@ asm volatile(ALTERNATIVE( \ "slli t3, t3, %3\n\t" \ "or %0, %0, t3\n\t" \ "2:", THEAD_VENDOR_ID, \ - ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \ + ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \ : "+r"(_val) \ - : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \ - "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \ - "I"(ALT_THEAD_PBMT_SHIFT) \ + : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_MAE_SHIFT), \ + "I"(_PAGE_PMA_THEAD >> ALT_THEAD_MAE_SHIFT), \ + "I"(ALT_THEAD_MAE_SHIFT) \ : "t3") #else #define ALT_THEAD_PMA(_val) #endif +#define ALT_CMO_OP(_op, _start, _size, _cachesize) \ +asm volatile(ALTERNATIVE( \ + __nops(5), \ + "mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + CBO_##_op(a0) \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t", \ + 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \ + : : "r"(_cachesize), \ + "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ + "r"((unsigned long)(_start) + (_size)) \ + : "a0") + +#define THEAD_C9XX_RV_IRQ_PMU 17 +#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h new file mode 100644 index 000000000000..07d9942682e0 --- /dev/null +++ b/arch/riscv/include/asm/exec.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/riscv/include/asm/extable.h b/arch/riscv/include/asm/extable.h index 512012d193dc..3eb5c1f7bf34 100644 --- a/arch/riscv/include/asm/extable.h +++ b/arch/riscv/include/asm/extable.h @@ -32,7 +32,11 @@ do { \ (b)->data = (tmp).data; \ } while (0) +#ifdef CONFIG_MMU bool fixup_exception(struct pt_regs *regs); +#else +static inline bool fixup_exception(struct pt_regs *regs) { return false; } +#endif #if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 2b443a3a487f..182db7930edc 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -1,12 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_FENCE_H #define _ASM_RISCV_FENCE_H +#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" +#define RISCV_FENCE(p, s) \ + ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) + #ifdef CONFIG_SMP -#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" -#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" +#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) +#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) +#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) #else #define RISCV_ACQUIRE_BARRIER #define RISCV_RELEASE_BARRIER +#define RISCV_FULL_BARRIER #endif #endif /* _ASM_RISCV_FENCE_H */ diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 5c3e7b97fcc6..0a55099bb734 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -22,6 +22,14 @@ */ enum fixed_addresses { FIX_HOLE, + /* + * The fdt fixmap mapping must be PMD aligned and will be mapped + * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit. + */ + FIX_FDT_END, + FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, + + /* Below fixmaps will be mapped using fixmap_pte */ FIX_PTE, FIX_PMD, FIX_PUD, diff --git a/arch/riscv/include/asm/fpu.h b/arch/riscv/include/asm/fpu.h new file mode 100644 index 000000000000..91c04c244e12 --- /dev/null +++ b/arch/riscv/include/asm/fpu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 SiFive + */ + +#ifndef _ASM_RISCV_FPU_H +#define _ASM_RISCV_FPU_H + +#include <asm/switch_to.h> + +#define kernel_fpu_available() has_fpu() + +void kernel_fpu_begin(void); +void kernel_fpu_end(void); + +#endif /* ! _ASM_RISCV_FPU_H */ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 04dad3380041..22ebea3c2b26 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -11,24 +11,38 @@ #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER) #define HAVE_FUNCTION_GRAPH_FP_TEST #endif -#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#ifndef __ASSEMBLY__ + +extern void *return_address(unsigned int level); + +#define ftrace_return_address(n) return_address(n) + +void _mcount(void); +unsigned long ftrace_call_adjust(unsigned long addr); +unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip); +#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip) /* - * Clang prior to 13 had "mcount" instead of "_mcount": - * https://reviews.llvm.org/D98881 + * Let's do like x86/arm64 and ignore the compat syscalls. */ -#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000 -#define MCOUNT_NAME _mcount -#else -#define MCOUNT_NAME mcount -#endif +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} -#define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifndef __ASSEMBLY__ -void MCOUNT_NAME(void); -static inline unsigned long ftrace_call_adjust(unsigned long addr) +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) { - return addr; + /* + * Since all syscall functions have __riscv_ prefix, we must skip it. + * However, as we described above, we decided to ignore compat + * syscalls, so we don't care about __riscv_compat_ prefix here. + */ + return !strcmp(sym + 8, name); } struct dyn_arch_ftrace { @@ -42,47 +56,187 @@ struct dyn_arch_ftrace { * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to * return address (original pc + 4) * + * The first 2 instructions for each tracable function is compiled to 2 nop + * instructions. Then, the kernel initializes the first instruction to auipc at + * boot time (<ftrace disable>). The second instruction is patched to jalr to + * start the trace. + * + *<Image>: + * 0: nop + * 4: nop + * + *<ftrace enable>: + * 0: auipc t0, 0x? + * 4: jalr t0, ?(t0) + * + *<ftrace disable>: + * 0: auipc t0, 0x? + * 4: nop + * * Dynamic ftrace generates probes to call sites, so we must deal with * both auipc and jalr at the same time. */ -#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME) +#define MCOUNT_ADDR ((unsigned long)_mcount) #define JALR_SIGN_MASK (0x00000800) #define JALR_OFFSET_MASK (0x00000fff) #define AUIPC_OFFSET_MASK (0xfffff000) #define AUIPC_PAD (0x00001000) #define JALR_SHIFT 20 -#define JALR_BASIC (0x000080e7) -#define AUIPC_BASIC (0x00000097) -#define NOP4 (0x00000013) - -#define make_call(caller, callee, call) \ -do { \ - call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \ - (unsigned long)caller)); \ - call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \ - (unsigned long)caller)); \ -} while (0) +#define JALR_T0 (0x000282e7) +#define AUIPC_T0 (0x00000297) +#define JALR_RANGE (JALR_SIGN_MASK - 1) -#define to_jalr_insn(offset) \ - (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC) +#define to_jalr_t0(offset) \ + (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0) -#define to_auipc_insn(offset) \ +#define to_auipc_t0(offset) \ ((offset & JALR_SIGN_MASK) ? \ - (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \ - ((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC)) + (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \ + ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0)) + +#define make_call_t0(caller, callee, call) \ +do { \ + unsigned int offset = \ + (unsigned long) (callee) - (unsigned long) (caller); \ + call[0] = to_auipc_t0(offset); \ + call[1] = to_jalr_t0(offset); \ +} while (0) /* - * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. + * Only the jalr insn in the auipc+jalr is patched, so we make it 4 + * bytes here. */ -#define MCOUNT_INSN_SIZE 8 +#define MCOUNT_INSN_SIZE 4 +#define MCOUNT_AUIPC_SIZE 4 +#define MCOUNT_JALR_SIZE 4 +#define MCOUNT_NOP4_SIZE 4 #ifndef __ASSEMBLY__ struct dyn_ftrace; int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop -#endif +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +#define arch_ftrace_get_regs(regs) NULL +#define HAVE_ARCH_FTRACE_REGS +struct ftrace_ops; +struct ftrace_regs; +#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs)) + +struct __arch_ftrace_regs { + unsigned long epc; + unsigned long ra; + unsigned long sp; + unsigned long s0; + unsigned long t1; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + unsigned long direct_tramp; +#endif + union { + unsigned long args[8]; + struct { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; +#ifdef CONFIG_CC_IS_CLANG + unsigned long t2; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; #endif + }; + }; +}; + +static __always_inline unsigned long ftrace_regs_get_instruction_pointer(const struct ftrace_regs + *fregs) +{ + return arch_ftrace_regs(fregs)->epc; +} + +static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long pc) +{ + arch_ftrace_regs(fregs)->epc = pc; +} + +static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) +{ + return arch_ftrace_regs(fregs)->sp; +} + +static __always_inline unsigned long ftrace_regs_get_frame_pointer(const struct ftrace_regs *fregs) +{ + return arch_ftrace_regs(fregs)->s0; +} + +static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs, + unsigned int n) +{ + if (n < 8) + return arch_ftrace_regs(fregs)->args[n]; + return 0; +} + +static __always_inline unsigned long ftrace_regs_get_return_value(const struct ftrace_regs *fregs) +{ + return arch_ftrace_regs(fregs)->a0; +} + +static __always_inline unsigned long ftrace_regs_get_return_address(const struct ftrace_regs *fregs) +{ + return arch_ftrace_regs(fregs)->ra; +} + +static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs, + unsigned long ret) +{ + arch_ftrace_regs(fregs)->a0 = ret; +} + +static __always_inline void ftrace_override_function_with_return(struct ftrace_regs *fregs) +{ + arch_ftrace_regs(fregs)->epc = arch_ftrace_regs(fregs)->ra; +} + +static __always_inline struct pt_regs * +ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs) +{ + struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs); + + memcpy(®s->a_regs, afregs->args, sizeof(afregs->args)); + regs->epc = afregs->epc; + regs->ra = afregs->ra; + regs->sp = afregs->sp; + regs->s0 = afregs->s0; + regs->t1 = afregs->t1; + return regs; +} + +int ftrace_regs_query_register_offset(const char *name); + +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#define ftrace_graph_func ftrace_graph_func + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) +{ + arch_ftrace_regs(fregs)->t1 = addr; +} +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* _ASM_RISCV_FTRACE_H */ diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index fc8130f995c1..90c86b115e00 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -85,7 +85,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __enable_user_access(); __asm__ __volatile__ ( - "1: lr.w.aqrl %[v],%[u] \n" + "1: lr.w %[v],%[u] \n" " bne %[v],%z[ov],3f \n" "2: sc.w.aqrl %[t],%z[nv],%[u] \n" " bnez %[t],1b \n" @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h index dfee2829fc7c..efeb5edf8a3a 100644 --- a/arch/riscv/include/asm/gpr-num.h +++ b/arch/riscv/include/asm/gpr-num.h @@ -3,6 +3,11 @@ #define __ASM_GPR_NUM_H #ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_x\num, \num + .endr + .equ .L__gpr_num_zero, 0 .equ .L__gpr_num_ra, 1 .equ .L__gpr_num_sp, 2 @@ -39,6 +44,9 @@ #else /* __ASSEMBLY__ */ #define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .endr\n" \ " .equ .L__gpr_num_zero, 0\n" \ " .equ .L__gpr_num_ra, 1\n" \ " .equ .L__gpr_num_sp, 2\n" \ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index a5c2ca1d1cd8..446126497768 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -2,7 +2,56 @@ #ifndef _ASM_RISCV_HUGETLB_H #define _ASM_RISCV_HUGETLB_H -#include <asm-generic/hugetlb.h> +#include <asm/cacheflush.h> #include <asm/page.h> +static inline void arch_clear_hugetlb_flags(struct folio *folio) +{ + clear_bit(PG_dcache_clean, &folio->flags); +} +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags + +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +bool arch_hugetlb_migration_supported(struct hstate *h); +#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported +#endif + +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define __HAVE_ARCH_HUGE_PTE_CLEAR +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte, + unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty); + +#define __HAVE_ARCH_HUGE_PTEP_GET +pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); +#define arch_make_huge_pte arch_make_huge_pte + +#endif /*CONFIG_RISCV_ISA_SVNAPOT*/ + +#include <asm-generic/hugetlb.h> + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 4e2486881840..affd63e11b0a 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -8,22 +8,8 @@ #ifndef _ASM_RISCV_HWCAP_H #define _ASM_RISCV_HWCAP_H -#include <linux/bits.h> #include <uapi/asm/hwcap.h> -#ifndef __ASSEMBLY__ -/* - * This yields a mask that user programs can use to figure out what - * instruction set this cpu supports. - */ -#define ELF_HWCAP (elf_hwcap) - -enum { - CAP_HWCAP = 1, -}; - -extern unsigned long elf_hwcap; - #define RISCV_ISA_EXT_a ('a' - 'a') #define RISCV_ISA_EXT_c ('c' - 'a') #define RISCV_ISA_EXT_d ('d' - 'a') @@ -31,46 +17,107 @@ extern unsigned long elf_hwcap; #define RISCV_ISA_EXT_h ('h' - 'a') #define RISCV_ISA_EXT_i ('i' - 'a') #define RISCV_ISA_EXT_m ('m' - 'a') -#define RISCV_ISA_EXT_s ('s' - 'a') -#define RISCV_ISA_EXT_u ('u' - 'a') +#define RISCV_ISA_EXT_q ('q' - 'a') +#define RISCV_ISA_EXT_v ('v' - 'a') /* - * Increse this to higher value as kernel support more ISA extensions. - */ -#define RISCV_ISA_EXT_MAX 64 -#define RISCV_ISA_EXT_NAME_LEN_MAX 32 - -/* The base ID for multi-letter ISA extensions */ -#define RISCV_ISA_EXT_BASE 26 - -/* - * This enum represent the logical ID for each multi-letter RISC-V ISA extension. - * The logical ID should start from RISCV_ISA_EXT_BASE and must not exceed - * RISCV_ISA_EXT_MAX. 0-25 range is reserved for single letter - * extensions while all the multi-letter extensions should define the next - * available logical extension id. + * These macros represent the logical IDs of each multi-letter RISC-V ISA + * extension and are used in the ISA bitmap. The logical IDs start from + * RISCV_ISA_EXT_BASE, which allows the 0-25 range to be reserved for single + * letter extensions. The maximum, RISCV_ISA_EXT_MAX, is defined in order + * to allocate the bitmap and may be increased when necessary. + * + * New extensions should just be added to the bottom, rather than added + * alphabetically, in order to avoid unnecessary shuffling. */ -enum riscv_isa_ext_id { - RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, - RISCV_ISA_EXT_SVPBMT, - RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, -}; - -struct riscv_isa_ext_data { - /* Name of the extension displayed to userspace via /proc/cpuinfo */ - char uprop[RISCV_ISA_EXT_NAME_LEN_MAX]; - /* The logical ISA extension ID */ - unsigned int isa_ext_id; -}; +#define RISCV_ISA_EXT_BASE 26 -unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +#define RISCV_ISA_EXT_SSCOFPMF 26 +#define RISCV_ISA_EXT_SSTC 27 +#define RISCV_ISA_EXT_SVINVAL 28 +#define RISCV_ISA_EXT_SVPBMT 29 +#define RISCV_ISA_EXT_ZBB 30 +#define RISCV_ISA_EXT_ZICBOM 31 +#define RISCV_ISA_EXT_ZIHINTPAUSE 32 +#define RISCV_ISA_EXT_SVNAPOT 33 +#define RISCV_ISA_EXT_ZICBOZ 34 +#define RISCV_ISA_EXT_SMAIA 35 +#define RISCV_ISA_EXT_SSAIA 36 +#define RISCV_ISA_EXT_ZBA 37 +#define RISCV_ISA_EXT_ZBS 38 +#define RISCV_ISA_EXT_ZICNTR 39 +#define RISCV_ISA_EXT_ZICSR 40 +#define RISCV_ISA_EXT_ZIFENCEI 41 +#define RISCV_ISA_EXT_ZIHPM 42 +#define RISCV_ISA_EXT_SMSTATEEN 43 +#define RISCV_ISA_EXT_ZICOND 44 +#define RISCV_ISA_EXT_ZBC 45 +#define RISCV_ISA_EXT_ZBKB 46 +#define RISCV_ISA_EXT_ZBKC 47 +#define RISCV_ISA_EXT_ZBKX 48 +#define RISCV_ISA_EXT_ZKND 49 +#define RISCV_ISA_EXT_ZKNE 50 +#define RISCV_ISA_EXT_ZKNH 51 +#define RISCV_ISA_EXT_ZKR 52 +#define RISCV_ISA_EXT_ZKSED 53 +#define RISCV_ISA_EXT_ZKSH 54 +#define RISCV_ISA_EXT_ZKT 55 +#define RISCV_ISA_EXT_ZVBB 56 +#define RISCV_ISA_EXT_ZVBC 57 +#define RISCV_ISA_EXT_ZVKB 58 +#define RISCV_ISA_EXT_ZVKG 59 +#define RISCV_ISA_EXT_ZVKNED 60 +#define RISCV_ISA_EXT_ZVKNHA 61 +#define RISCV_ISA_EXT_ZVKNHB 62 +#define RISCV_ISA_EXT_ZVKSED 63 +#define RISCV_ISA_EXT_ZVKSH 64 +#define RISCV_ISA_EXT_ZVKT 65 +#define RISCV_ISA_EXT_ZFH 66 +#define RISCV_ISA_EXT_ZFHMIN 67 +#define RISCV_ISA_EXT_ZIHINTNTL 68 +#define RISCV_ISA_EXT_ZVFH 69 +#define RISCV_ISA_EXT_ZVFHMIN 70 +#define RISCV_ISA_EXT_ZFA 71 +#define RISCV_ISA_EXT_ZTSO 72 +#define RISCV_ISA_EXT_ZACAS 73 +#define RISCV_ISA_EXT_ZVE32X 74 +#define RISCV_ISA_EXT_ZVE32F 75 +#define RISCV_ISA_EXT_ZVE64X 76 +#define RISCV_ISA_EXT_ZVE64F 77 +#define RISCV_ISA_EXT_ZVE64D 78 +#define RISCV_ISA_EXT_ZIMOP 79 +#define RISCV_ISA_EXT_ZCA 80 +#define RISCV_ISA_EXT_ZCB 81 +#define RISCV_ISA_EXT_ZCD 82 +#define RISCV_ISA_EXT_ZCF 83 +#define RISCV_ISA_EXT_ZCMOP 84 +#define RISCV_ISA_EXT_ZAWRS 85 +#define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_SMMPM 87 +#define RISCV_ISA_EXT_SMNPM 88 +#define RISCV_ISA_EXT_SSNPM 89 +#define RISCV_ISA_EXT_ZABHA 90 +#define RISCV_ISA_EXT_ZICCRSE 91 +#define RISCV_ISA_EXT_SVADE 92 +#define RISCV_ISA_EXT_SVADU 93 +#define RISCV_ISA_EXT_ZFBFMIN 94 +#define RISCV_ISA_EXT_ZVFBFMIN 95 +#define RISCV_ISA_EXT_ZVFBFWMA 96 +#define RISCV_ISA_EXT_ZAAMO 97 +#define RISCV_ISA_EXT_ZALRSC 98 +#define RISCV_ISA_EXT_ZICBOP 99 -#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) +#define RISCV_ISA_EXT_XLINUXENVCFG 127 -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) +#define RISCV_ISA_EXT_MAX 128 +#define RISCV_ISA_EXT_INVALID U32_MAX +#ifdef CONFIG_RISCV_M_MODE +#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM +#else +#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM #endif #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h new file mode 100644 index 000000000000..7fe0a379474a --- /dev/null +++ b/arch/riscv/include/asm/hwprobe.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright 2023-2024 Rivos, Inc + */ + +#ifndef _ASM_HWPROBE_H +#define _ASM_HWPROBE_H + +#include <uapi/asm/hwprobe.h> + +#define RISCV_HWPROBE_MAX_KEY 13 + +static inline bool riscv_hwprobe_key_is_valid(__s64 key) +{ + return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; +} + +static inline bool hwprobe_key_is_bitmask(__s64 key) +{ + switch (key) { + case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: + case RISCV_HWPROBE_KEY_IMA_EXT_0: + case RISCV_HWPROBE_KEY_CPUPERF_0: + case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: + case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0: + return true; + } + + return false; +} + +static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, + struct riscv_hwprobe *other_pair) +{ + if (pair->key != other_pair->key) + return false; + + if (hwprobe_key_is_bitmask(pair->key)) + return (pair->value & other_pair->value) == other_pair->value; + + return pair->value == other_pair->value; +} + +#endif diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h index e0b319af3681..8927a6ea1127 100644 --- a/arch/riscv/include/asm/image.h +++ b/arch/riscv/include/asm/image.h @@ -30,6 +30,8 @@ RISCV_HEADER_VERSION_MINOR) #ifndef __ASSEMBLY__ +#define riscv_image_flag_field(flags, field)\ + (((flags) >> field##_SHIFT) & field##_MASK) /** * struct riscv_image_header - riscv kernel image header * @code0: Executable code diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h new file mode 100644 index 000000000000..d5adbaec1d01 --- /dev/null +++ b/arch/riscv/include/asm/insn-def.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_INSN_DEF_H +#define __ASM_INSN_DEF_H + +#include <asm/asm.h> + +#define INSN_R_FUNC7_SHIFT 25 +#define INSN_R_RS2_SHIFT 20 +#define INSN_R_RS1_SHIFT 15 +#define INSN_R_FUNC3_SHIFT 12 +#define INSN_R_RD_SHIFT 7 +#define INSN_R_OPCODE_SHIFT 0 + +#define INSN_I_SIMM12_SHIFT 20 +#define INSN_I_RS1_SHIFT 15 +#define INSN_I_FUNC3_SHIFT 12 +#define INSN_I_RD_SHIFT 7 +#define INSN_I_OPCODE_SHIFT 0 + +#define INSN_S_SIMM7_SHIFT 25 +#define INSN_S_RS2_SHIFT 20 +#define INSN_S_RS1_SHIFT 15 +#define INSN_S_FUNC3_SHIFT 12 +#define INSN_S_SIMM5_SHIFT 7 +#define INSN_S_OPCODE_SHIFT 0 + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_AS_HAS_INSN + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2 + .endm + + .macro insn_i, opcode, func3, rd, rs1, simm12 + .insn i \opcode, \func3, \rd, \rs1, \simm12 + .endm + + .macro insn_s, opcode, func3, rs2, simm12, rs1 + .insn s \opcode, \func3, \rs2, \simm12(\rs1) + .endm + +#else + +#include <asm/gpr-num.h> + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \ + (\func3 << INSN_R_FUNC3_SHIFT) | \ + (\func7 << INSN_R_FUNC7_SHIFT) | \ + (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \ + (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT)) + .endm + + .macro insn_i, opcode, func3, rd, rs1, simm12 + .4byte ((\opcode << INSN_I_OPCODE_SHIFT) | \ + (\func3 << INSN_I_FUNC3_SHIFT) | \ + (.L__gpr_num_\rd << INSN_I_RD_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_I_RS1_SHIFT) | \ + (\simm12 << INSN_I_SIMM12_SHIFT)) + .endm + + .macro insn_s, opcode, func3, rs2, simm12, rs1 + .4byte ((\opcode << INSN_S_OPCODE_SHIFT) | \ + (\func3 << INSN_S_FUNC3_SHIFT) | \ + (.L__gpr_num_\rs2 << INSN_S_RS2_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_S_RS1_SHIFT) | \ + ((\simm12 & 0x1f) << INSN_S_SIMM5_SHIFT) | \ + (((\simm12 >> 5) & 0x7f) << INSN_S_SIMM7_SHIFT)) + .endm + +#endif + +#define __INSN_R(...) insn_r __VA_ARGS__ +#define __INSN_I(...) insn_i __VA_ARGS__ +#define __INSN_S(...) insn_s __VA_ARGS__ + +#else /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_AS_HAS_INSN + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" + +#define __INSN_I(opcode, func3, rd, rs1, simm12) \ + ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" + +#define __INSN_S(opcode, func3, rs2, simm12, rs1) \ + ".insn s " opcode ", " func3 ", " rs2 ", " simm12 "(" rs1 ")\n" + +#else + +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define DEFINE_INSN_R \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \ +" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \ +" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \ +" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \ +" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \ +" .endm\n" + +#define DEFINE_INSN_I \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_i, opcode, func3, rd, rs1, simm12\n" \ +" .4byte ((\\opcode << " __stringify(INSN_I_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_I_FUNC3_SHIFT) ") |" \ +" (.L__gpr_num_\\rd << " __stringify(INSN_I_RD_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_I_RS1_SHIFT) ") |" \ +" (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \ +" .endm\n" + +#define DEFINE_INSN_S \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_s, opcode, func3, rs2, simm12, rs1\n" \ +" .4byte ((\\opcode << " __stringify(INSN_S_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_S_FUNC3_SHIFT) ") |" \ +" (.L__gpr_num_\\rs2 << " __stringify(INSN_S_RS2_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_S_RS1_SHIFT) ") |" \ +" ((\\simm12 & 0x1f) << " __stringify(INSN_S_SIMM5_SHIFT) ") |" \ +" (((\\simm12 >> 5) & 0x7f) << " __stringify(INSN_S_SIMM7_SHIFT) "))\n" \ +" .endm\n" + +#define UNDEFINE_INSN_R \ +" .purgem insn_r\n" + +#define UNDEFINE_INSN_I \ +" .purgem insn_i\n" + +#define UNDEFINE_INSN_S \ +" .purgem insn_s\n" + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + DEFINE_INSN_R \ + "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \ + UNDEFINE_INSN_R + +#define __INSN_I(opcode, func3, rd, rs1, simm12) \ + DEFINE_INSN_I \ + "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \ + UNDEFINE_INSN_I + +#define __INSN_S(opcode, func3, rs2, simm12, rs1) \ + DEFINE_INSN_S \ + "insn_s " opcode ", " func3 ", " rs2 ", " simm12 ", " rs1 "\n" \ + UNDEFINE_INSN_S + +#endif + +#endif /* ! __ASSEMBLY__ */ + +#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ + RV_##rd, RV_##rs1, RV_##rs2) + +#define INSN_I(opcode, func3, rd, rs1, simm12) \ + __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \ + RV_##rs1, RV_##simm12) + +#define INSN_S(opcode, func3, rs2, simm12, rs1) \ + __INSN_S(RV_##opcode, RV_##func3, RV_##rs2, \ + RV_##simm12, RV_##rs1) + +#define RV_OPCODE(v) __ASM_STR(v) +#define RV_FUNC3(v) __ASM_STR(v) +#define RV_FUNC7(v) __ASM_STR(v) +#define RV_SIMM12(v) __ASM_STR(v) +#define RV_RD(v) __ASM_STR(v) +#define RV_RS1(v) __ASM_STR(v) +#define RV_RS2(v) __ASM_STR(v) +#define __RV_REG(v) __ASM_STR(x ## v) +#define RV___RD(v) __RV_REG(v) +#define RV___RS1(v) __RV_REG(v) +#define RV___RS2(v) __RV_REG(v) + +#define RV_OPCODE_MISC_MEM RV_OPCODE(15) +#define RV_OPCODE_OP_IMM RV_OPCODE(19) +#define RV_OPCODE_SYSTEM RV_OPCODE(115) + +#define HFENCE_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HFENCE_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#define HLVX_HU(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \ + RD(dest), RS1(addr), __RS2(3)) + +#define HLV_W(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \ + RD(dest), RS1(addr), __RS2(0)) + +#ifdef CONFIG_64BIT +#define HLV_D(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \ + RD(dest), RS1(addr), __RS2(0)) +#else +#define HLV_D(dest, addr) \ + __ASM_STR(.error "hlv.d requires 64-bit support") +#endif + +#define SINVAL_VMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define SFENCE_W_INVAL() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(0)) + +#define SFENCE_INVAL_IR() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(1)) + +#define HINVAL_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HINVAL_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#define CBO_INVAL(base) \ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(0)) + +#define CBO_CLEAN(base) \ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(1)) + +#define CBO_FLUSH(base) \ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(2)) + +#define CBO_ZERO(base) \ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(4)) + +#define PREFETCH_I(base, offset) \ + INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(0), \ + SIMM12((offset) & 0xfe0), RS1(base)) + +#define PREFETCH_R(base, offset) \ + INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(1), \ + SIMM12((offset) & 0xfe0), RS1(base)) + +#define PREFETCH_W(base, offset) \ + INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \ + SIMM12((offset) & 0xfe0), RS1(base)) + +#define RISCV_PAUSE ".4byte 0x100000f" +#define ZAWRS_WRS_NTO ".4byte 0x00d00073" +#define ZAWRS_WRS_STO ".4byte 0x01d00073" +#define RISCV_NOP4 ".4byte 0x00000013" + +#define RISCV_INSN_NOP4 _AC(0x00000013, U) + +#ifndef __ASSEMBLY__ +#define nop() __asm__ __volatile__ ("nop") +#define __nops(n) ".rept " #n "\nnop\n.endr\n" +#define nops(n) __asm__ __volatile__ (__nops(n)) +#endif + +#endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h new file mode 100644 index 000000000000..09fde95a5e8f --- /dev/null +++ b/arch/riscv/include/asm/insn.h @@ -0,0 +1,431 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 SiFive + */ + +#ifndef _ASM_RISCV_INSN_H +#define _ASM_RISCV_INSN_H + +#include <linux/bits.h> + +#define RV_INSN_FUNCT3_MASK GENMASK(14, 12) +#define RV_INSN_FUNCT3_OPOFF 12 +#define RV_INSN_OPCODE_MASK GENMASK(6, 0) +#define RV_INSN_OPCODE_OPOFF 0 +#define RV_INSN_FUNCT12_OPOFF 20 + +#define RV_ENCODE_FUNCT3(f_) (RVG_FUNCT3_##f_ << RV_INSN_FUNCT3_OPOFF) +#define RV_ENCODE_FUNCT12(f_) (RVG_FUNCT12_##f_ << RV_INSN_FUNCT12_OPOFF) + +/* The bit field of immediate value in I-type instruction */ +#define RV_I_IMM_SIGN_OPOFF 31 +#define RV_I_IMM_11_0_OPOFF 20 +#define RV_I_IMM_SIGN_OFF 12 +#define RV_I_IMM_11_0_OFF 0 +#define RV_I_IMM_11_0_MASK GENMASK(11, 0) + +/* The bit field of immediate value in J-type instruction */ +#define RV_J_IMM_SIGN_OPOFF 31 +#define RV_J_IMM_10_1_OPOFF 21 +#define RV_J_IMM_11_OPOFF 20 +#define RV_J_IMM_19_12_OPOFF 12 +#define RV_J_IMM_SIGN_OFF 20 +#define RV_J_IMM_10_1_OFF 1 +#define RV_J_IMM_11_OFF 11 +#define RV_J_IMM_19_12_OFF 12 +#define RV_J_IMM_10_1_MASK GENMASK(9, 0) +#define RV_J_IMM_11_MASK GENMASK(0, 0) +#define RV_J_IMM_19_12_MASK GENMASK(7, 0) + +/* + * U-type IMMs contain the upper 20bits [31:20] of an immediate with + * the rest filled in by zeros, so no shifting required. Similarly, + * bit31 contains the signed state, so no sign extension necessary. + */ +#define RV_U_IMM_SIGN_OPOFF 31 +#define RV_U_IMM_31_12_OPOFF 0 +#define RV_U_IMM_31_12_MASK GENMASK(31, 12) + +/* The bit field of immediate value in B-type instruction */ +#define RV_B_IMM_SIGN_OPOFF 31 +#define RV_B_IMM_10_5_OPOFF 25 +#define RV_B_IMM_4_1_OPOFF 8 +#define RV_B_IMM_11_OPOFF 7 +#define RV_B_IMM_SIGN_OFF 12 +#define RV_B_IMM_10_5_OFF 5 +#define RV_B_IMM_4_1_OFF 1 +#define RV_B_IMM_11_OFF 11 +#define RV_B_IMM_10_5_MASK GENMASK(5, 0) +#define RV_B_IMM_4_1_MASK GENMASK(3, 0) +#define RV_B_IMM_11_MASK GENMASK(0, 0) + +/* The register offset in RVG instruction */ +#define RVG_RS1_OPOFF 15 +#define RVG_RS2_OPOFF 20 +#define RVG_RD_OPOFF 7 +#define RVG_RS1_MASK GENMASK(4, 0) +#define RVG_RD_MASK GENMASK(4, 0) + +/* The bit field of immediate value in RVC J instruction */ +#define RVC_J_IMM_SIGN_OPOFF 12 +#define RVC_J_IMM_4_OPOFF 11 +#define RVC_J_IMM_9_8_OPOFF 9 +#define RVC_J_IMM_10_OPOFF 8 +#define RVC_J_IMM_6_OPOFF 7 +#define RVC_J_IMM_7_OPOFF 6 +#define RVC_J_IMM_3_1_OPOFF 3 +#define RVC_J_IMM_5_OPOFF 2 +#define RVC_J_IMM_SIGN_OFF 11 +#define RVC_J_IMM_4_OFF 4 +#define RVC_J_IMM_9_8_OFF 8 +#define RVC_J_IMM_10_OFF 10 +#define RVC_J_IMM_6_OFF 6 +#define RVC_J_IMM_7_OFF 7 +#define RVC_J_IMM_3_1_OFF 1 +#define RVC_J_IMM_5_OFF 5 +#define RVC_J_IMM_4_MASK GENMASK(0, 0) +#define RVC_J_IMM_9_8_MASK GENMASK(1, 0) +#define RVC_J_IMM_10_MASK GENMASK(0, 0) +#define RVC_J_IMM_6_MASK GENMASK(0, 0) +#define RVC_J_IMM_7_MASK GENMASK(0, 0) +#define RVC_J_IMM_3_1_MASK GENMASK(2, 0) +#define RVC_J_IMM_5_MASK GENMASK(0, 0) + +/* The bit field of immediate value in RVC B instruction */ +#define RVC_B_IMM_SIGN_OPOFF 12 +#define RVC_B_IMM_4_3_OPOFF 10 +#define RVC_B_IMM_7_6_OPOFF 5 +#define RVC_B_IMM_2_1_OPOFF 3 +#define RVC_B_IMM_5_OPOFF 2 +#define RVC_B_IMM_SIGN_OFF 8 +#define RVC_B_IMM_4_3_OFF 3 +#define RVC_B_IMM_7_6_OFF 6 +#define RVC_B_IMM_2_1_OFF 1 +#define RVC_B_IMM_5_OFF 5 +#define RVC_B_IMM_4_3_MASK GENMASK(1, 0) +#define RVC_B_IMM_7_6_MASK GENMASK(1, 0) +#define RVC_B_IMM_2_1_MASK GENMASK(1, 0) +#define RVC_B_IMM_5_MASK GENMASK(0, 0) + +#define RVC_INSN_FUNCT4_MASK GENMASK(15, 12) +#define RVC_INSN_FUNCT4_OPOFF 12 +#define RVC_INSN_FUNCT3_MASK GENMASK(15, 13) +#define RVC_INSN_FUNCT3_OPOFF 13 +#define RVC_INSN_J_RS1_MASK GENMASK(11, 7) +#define RVC_INSN_J_RS2_MASK GENMASK(6, 2) +#define RVC_INSN_OPCODE_MASK GENMASK(1, 0) +#define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF) +#define RVC_ENCODE_FUNCT4(f_) (RVC_FUNCT4_##f_ << RVC_INSN_FUNCT4_OPOFF) + +/* The register offset in RVC op=C0 instruction */ +#define RVC_C0_RS1_OPOFF 7 +#define RVC_C0_RS2_OPOFF 2 +#define RVC_C0_RD_OPOFF 2 + +/* The register offset in RVC op=C1 instruction */ +#define RVC_C1_RS1_OPOFF 7 +#define RVC_C1_RS2_OPOFF 2 +#define RVC_C1_RD_OPOFF 7 + +/* The register offset in RVC op=C2 instruction */ +#define RVC_C2_RS1_OPOFF 7 +#define RVC_C2_RS2_OPOFF 2 +#define RVC_C2_RD_OPOFF 7 +#define RVC_C2_RS1_MASK GENMASK(4, 0) + +/* parts of opcode for RVG*/ +#define RVG_OPCODE_FENCE 0x0f +#define RVG_OPCODE_AUIPC 0x17 +#define RVG_OPCODE_BRANCH 0x63 +#define RVG_OPCODE_JALR 0x67 +#define RVG_OPCODE_JAL 0x6f +#define RVG_OPCODE_SYSTEM 0x73 +#define RVG_SYSTEM_CSR_OFF 20 +#define RVG_SYSTEM_CSR_MASK GENMASK(12, 0) + +/* parts of opcode for RVF, RVD and RVQ */ +#define RVFDQ_FL_FS_WIDTH_OFF 12 +#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0) +#define RVFDQ_FL_FS_WIDTH_W 2 +#define RVFDQ_FL_FS_WIDTH_D 3 +#define RVFDQ_LS_FS_WIDTH_Q 4 +#define RVFDQ_OPCODE_FL 0x07 +#define RVFDQ_OPCODE_FS 0x27 + +/* parts of opcode for RVV */ +#define RVV_OPCODE_VECTOR 0x57 +#define RVV_VL_VS_WIDTH_8 0 +#define RVV_VL_VS_WIDTH_16 5 +#define RVV_VL_VS_WIDTH_32 6 +#define RVV_VL_VS_WIDTH_64 7 +#define RVV_OPCODE_VL RVFDQ_OPCODE_FL +#define RVV_OPCODE_VS RVFDQ_OPCODE_FS + +/* parts of opcode for RVC*/ +#define RVC_OPCODE_C0 0x0 +#define RVC_OPCODE_C1 0x1 +#define RVC_OPCODE_C2 0x2 + +/* parts of funct3 code for I, M, A extension*/ +#define RVG_FUNCT3_JALR 0x0 +#define RVG_FUNCT3_BEQ 0x0 +#define RVG_FUNCT3_BNE 0x1 +#define RVG_FUNCT3_BLT 0x4 +#define RVG_FUNCT3_BGE 0x5 +#define RVG_FUNCT3_BLTU 0x6 +#define RVG_FUNCT3_BGEU 0x7 + +/* parts of funct3 code for C extension*/ +#define RVC_FUNCT3_C_BEQZ 0x6 +#define RVC_FUNCT3_C_BNEZ 0x7 +#define RVC_FUNCT3_C_J 0x5 +#define RVC_FUNCT3_C_JAL 0x1 +#define RVC_FUNCT4_C_JR 0x8 +#define RVC_FUNCT4_C_JALR 0x9 +#define RVC_FUNCT4_C_EBREAK 0x9 + +#define RVG_FUNCT12_EBREAK 0x1 +#define RVG_FUNCT12_SRET 0x102 + +#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC) +#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR) +#define RVG_MATCH_JAL (RVG_OPCODE_JAL) +#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE) +#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_BGE (RV_ENCODE_FUNCT3(BGE) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_BLTU (RV_ENCODE_FUNCT3(BLTU) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_BGEU (RV_ENCODE_FUNCT3(BGEU) | RVG_OPCODE_BRANCH) +#define RVG_MATCH_EBREAK (RV_ENCODE_FUNCT12(EBREAK) | RVG_OPCODE_SYSTEM) +#define RVG_MATCH_SRET (RV_ENCODE_FUNCT12(SRET) | RVG_OPCODE_SYSTEM) +#define RVC_MATCH_C_BEQZ (RVC_ENCODE_FUNCT3(C_BEQZ) | RVC_OPCODE_C1) +#define RVC_MATCH_C_BNEZ (RVC_ENCODE_FUNCT3(C_BNEZ) | RVC_OPCODE_C1) +#define RVC_MATCH_C_J (RVC_ENCODE_FUNCT3(C_J) | RVC_OPCODE_C1) +#define RVC_MATCH_C_JAL (RVC_ENCODE_FUNCT3(C_JAL) | RVC_OPCODE_C1) +#define RVC_MATCH_C_JR (RVC_ENCODE_FUNCT4(C_JR) | RVC_OPCODE_C2) +#define RVC_MATCH_C_JALR (RVC_ENCODE_FUNCT4(C_JALR) | RVC_OPCODE_C2) +#define RVC_MATCH_C_EBREAK (RVC_ENCODE_FUNCT4(C_EBREAK) | RVC_OPCODE_C2) + +#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK) +#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK) +#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK) +#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK) +#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK) +#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) +#define RVC_MASK_C_J (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) +#define RVG_MASK_BEQ (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_BNE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_BLT (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_BGE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_BLTU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVG_MASK_BGEU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) +#define RVC_MASK_C_BEQZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) +#define RVC_MASK_C_BNEZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) +#define RVC_MASK_C_EBREAK 0xffff +#define RVG_MASK_EBREAK 0xffffffff +#define RVG_MASK_SRET 0xffffffff + +#define __INSN_LENGTH_MASK _UL(0x3) +#define __INSN_LENGTH_GE_32 _UL(0x3) +#define __INSN_OPCODE_MASK _UL(0x7F) +#define __INSN_BRANCH_OPCODE _UL(RVG_OPCODE_BRANCH) + +#define __RISCV_INSN_FUNCS(name, mask, val) \ +static __always_inline bool riscv_insn_is_##name(u32 code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ + +#if __riscv_xlen == 32 +/* C.JAL is an RV32C-only instruction */ +__RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL) +#else +#define riscv_insn_is_c_jal(opcode) 0 +#endif +__RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC) +__RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR) +__RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL) +__RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J) +__RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ) +__RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE) +__RISCV_INSN_FUNCS(blt, RVG_MASK_BLT, RVG_MATCH_BLT) +__RISCV_INSN_FUNCS(bge, RVG_MASK_BGE, RVG_MATCH_BGE) +__RISCV_INSN_FUNCS(bltu, RVG_MASK_BLTU, RVG_MATCH_BLTU) +__RISCV_INSN_FUNCS(bgeu, RVG_MASK_BGEU, RVG_MATCH_BGEU) +__RISCV_INSN_FUNCS(c_beqz, RVC_MASK_C_BEQZ, RVC_MATCH_C_BEQZ) +__RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ) +__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK) +__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK) +__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET) +__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE); + +/* special case to catch _any_ system instruction */ +static __always_inline bool riscv_insn_is_system(u32 code) +{ + return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM; +} + +/* special case to catch _any_ branch instruction */ +static __always_inline bool riscv_insn_is_branch(u32 code) +{ + return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH; +} + +static __always_inline bool riscv_insn_is_c_jr(u32 code) +{ + return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + +static __always_inline bool riscv_insn_is_c_jalr(u32 code) +{ + return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + +#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) +#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) +#define RV_X(X, s, mask) (((X) >> (s)) & (mask)) +#define RVC_X(X, s, mask) RV_X(X, s, mask) + +#define RV_EXTRACT_RS1_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); }) + +#define RV_EXTRACT_RD_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); }) + +#define RV_EXTRACT_UTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); }) + +#define RV_EXTRACT_JTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \ + (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \ + (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \ + (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); }) + +#define RV_EXTRACT_ITYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \ + (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); }) + +#define RV_EXTRACT_BTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \ + (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \ + (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \ + (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); }) + +#define RVC_EXTRACT_C2_RS1_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); }) + +#define RVC_EXTRACT_JTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \ + (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \ + (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \ + (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \ + (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \ + (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \ + (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \ + (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); }) + +#define RVC_EXTRACT_BTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \ + (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \ + (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ + (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ + (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) + +#define RVG_EXTRACT_SYSTEM_CSR(x) \ + ({typeof(x) x_ = (x); RV_X(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); }) + +#define RVFDQ_EXTRACT_FL_FS_WIDTH(x) \ + ({typeof(x) x_ = (x); RV_X(x_, RVFDQ_FL_FS_WIDTH_OFF, \ + RVFDQ_FL_FS_WIDTH_MASK); }) + +#define RVV_EXRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x) + +/* + * Get the immediate from a J-type instruction. + * + * @insn: instruction to process + * Return: immediate + */ +static inline s32 riscv_insn_extract_jtype_imm(u32 insn) +{ + return RV_EXTRACT_JTYPE_IMM(insn); +} + +/* + * Update a J-type instruction with an immediate value. + * + * @insn: pointer to the jtype instruction + * @imm: the immediate to insert into the instruction + */ +static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm) +{ + /* drop the old IMMs, all jal IMM bits sit at 31:12 */ + *insn &= ~GENMASK(31, 12); + *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) | + (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) | + (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) | + (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF); +} + +/* + * Put together one immediate from a U-type and I-type instruction pair. + * + * The U-type contains an upper immediate, meaning bits[31:12] with [11:0] + * being zero, while the I-type contains a 12bit immediate. + * Combined these can encode larger 32bit values and are used for example + * in auipc + jalr pairs to allow larger jumps. + * + * @utype_insn: instruction containing the upper immediate + * @itype_insn: instruction + * Return: combined immediate + */ +static inline s32 riscv_insn_extract_utype_itype_imm(u32 utype_insn, u32 itype_insn) +{ + s32 imm; + + imm = RV_EXTRACT_UTYPE_IMM(utype_insn); + imm += RV_EXTRACT_ITYPE_IMM(itype_insn); + + return imm; +} + +/* + * Update a set of two instructions (U-type + I-type) with an immediate value. + * + * Used for example in auipc+jalrs pairs the U-type instructions contains + * a 20bit upper immediate representing bits[31:12], while the I-type + * instruction contains a 12bit immediate representing bits[11:0]. + * + * This also takes into account that both separate immediates are + * considered as signed values, so if the I-type immediate becomes + * negative (BIT(11) set) the U-type part gets adjusted. + * + * @utype_insn: pointer to the utype instruction of the pair + * @itype_insn: pointer to the itype instruction of the pair + * @imm: the immediate to insert into the two instructions + */ +static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype_insn, s32 imm) +{ + /* drop possible old IMM values */ + *utype_insn &= ~(RV_U_IMM_31_12_MASK); + *itype_insn &= ~(RV_I_IMM_11_0_MASK << RV_I_IMM_11_0_OPOFF); + + /* add the adapted IMMs */ + *utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1); + *itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF); +} +#endif /* _ASM_RISCV_INSN_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 69605a474270..a0e51840b9db 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -47,10 +47,10 @@ * sufficient to ensure this works sanely on controllers that support I/O * writes. */ -#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); -#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); -#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); -#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); +#define __io_pbr() RISCV_FENCE(io, i) +#define __io_par(v) RISCV_FENCE(i, ior) +#define __io_pbw() RISCV_FENCE(iow, o) +#define __io_paw() RISCV_FENCE(o, io) /* * Accesses from a single hart to a single I/O address must be ordered. This @@ -101,9 +101,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) __io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) -#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) -#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) -#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) +#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count) +#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count) +#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) @@ -115,24 +115,29 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) -#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) -#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) -#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) +#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count) +#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count) +#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count) #ifdef CONFIG_64BIT __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) -#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) +#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) #define writesq(addr, buffer, count) __writesq(addr, buffer, count) __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) -#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count) +#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) #endif #include <asm-generic/io.h> +#ifdef CONFIG_MMU +#define arch_memremap_wb(addr, size, flags) \ + ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL))) +#endif + #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h index e4c435509983..7b038f3b7cb0 100644 --- a/arch/riscv/include/asm/irq.h +++ b/arch/riscv/include/asm/irq.h @@ -12,6 +12,68 @@ #include <asm-generic/irq.h> -extern void __init init_IRQ(void); +#define INVALID_CONTEXT UINT_MAX + +#ifdef CONFIG_SMP +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + +void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); + +struct fwnode_handle *riscv_get_intc_hwnode(void); + +#ifdef CONFIG_ACPI + +enum riscv_irqchip_type { + ACPI_RISCV_IRQCHIP_INTC = 0x00, + ACPI_RISCV_IRQCHIP_IMSIC = 0x01, + ACPI_RISCV_IRQCHIP_PLIC = 0x02, + ACPI_RISCV_IRQCHIP_APLIC = 0x03, +}; + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs); +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); +unsigned long acpi_rintc_index_to_hartid(u32 index); +unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); +unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); +unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); +int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); + +#else +static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + return 0; +} + +static inline unsigned long acpi_rintc_index_to_hartid(u32 index) +{ + return INVALID_HARTID; +} + +static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, + unsigned int ctxt_idx) +{ + return INVALID_HARTID; +} + +static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) +{ + return INVALID_CONTEXT; +} + +static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + return INVALID_CONTEXT; +} + +static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) +{ + return 0; +} + +#endif /* CONFIG_ACPI */ #endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h new file mode 100644 index 000000000000..6441ded3b0cf --- /dev/null +++ b/arch/riscv/include/asm/irq_stack.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_IRQ_STACK_H +#define _ASM_RISCV_IRQ_STACK_H + +#include <linux/bug.h> +#include <linux/gfp.h> +#include <linux/kconfig.h> +#include <linux/vmalloc.h> +#include <linux/pgtable.h> +#include <asm/thread_info.h> + +DECLARE_PER_CPU(ulong *, irq_stack_ptr); + +asmlinkage void call_on_irq_stack(struct pt_regs *regs, + void (*func)(struct pt_regs *)); + +#ifdef CONFIG_VMAP_STACK +/* + * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd + * stacks need to have the same alignment. + */ +static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node) +{ + void *p; + + p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node, + __builtin_return_address(0)); + return kasan_reset_tag(p); +} +#endif /* CONFIG_VMAP_STACK */ + +#endif /* _ASM_RISCV_IRQ_STACK_H */ diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h index b53891964ae0..b27a4d64fc6a 100644 --- a/arch/riscv/include/asm/irq_work.h +++ b/arch/riscv/include/asm/irq_work.h @@ -6,5 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void) { return IS_ENABLED(CONFIG_SMP); } -extern void arch_irq_work_raise(void); + #endif /* _ASM_RISCV_IRQ_WORK_H */ diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h index 08d4d6a5b7e9..6fd8cbfcfcc7 100644 --- a/arch/riscv/include/asm/irqflags.h +++ b/arch/riscv/include/asm/irqflags.h @@ -7,7 +7,6 @@ #ifndef _ASM_RISCV_IRQFLAGS_H #define _ASM_RISCV_IRQFLAGS_H -#include <asm/processor.h> #include <asm/csr.h> /* read interrupt enabled status */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 38af2ec7b9bf..87a71cc6d146 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -12,22 +12,32 @@ #include <linux/types.h> #include <asm/asm.h> +#define HAVE_JUMP_LABEL_BATCH + #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct static_key *key, - bool branch) +#define JUMP_TABLE_ENTRY(key, label) \ + ".pushsection __jump_table, \"aw\" \n\t" \ + ".align " RISCV_LGPTR " \n\t" \ + ".long 1b - ., " label " - . \n\t" \ + "" RISCV_PTR " " key " - . \n\t" \ + ".popsection \n\t" + +/* This macro is also expanded on the Rust side. */ +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + " .align 2 \n\t" \ + " .option push \n\t" \ + " .option norelax \n\t" \ + " .option norvc \n\t" \ + "1: nop \n\t" \ + " .option pop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + +static __always_inline bool arch_static_branch(struct static_key * const key, + const bool branch) { - asm_volatile_goto( - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" - "1: nop \n\t" - " .option pop \n\t" - " .pushsection __jump_table, \"aw\" \n\t" - " .align " RISCV_LGPTR " \n\t" - " .long 1b - ., %l[label] - . \n\t" - " " RISCV_PTR " %0 - . \n\t" - " .popsection \n\t" + asm goto( + ARCH_STATIC_BRANCH_ASM("%0", "%l[label]") : : "i"(&((char *)key)[branch]) : : label); return false; @@ -35,20 +45,20 @@ label: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, - bool branch) +#define ARCH_STATIC_BRANCH_JUMP_ASM(key, label) \ + " .align 2 \n\t" \ + " .option push \n\t" \ + " .option norelax \n\t" \ + " .option norvc \n\t" \ + "1: j " label " \n\t" \ + " .option pop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + +static __always_inline bool arch_static_branch_jump(struct static_key * const key, + const bool branch) { - asm_volatile_goto( - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" - "1: jal zero, %l[label] \n\t" - " .option pop \n\t" - " .pushsection __jump_table, \"aw\" \n\t" - " .align " RISCV_LGPTR " \n\t" - " .long 1b - ., %l[label] - . \n\t" - " " RISCV_PTR " %0 - . \n\t" - " .popsection \n\t" + asm goto( + ARCH_STATIC_BRANCH_JUMP_ASM("%0", "%l[label]") : : "i"(&((char *)key)[branch]) : : label); return false; diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h index 0b85e363e778..e6a0071bdb56 100644 --- a/arch/riscv/include/asm/kasan.h +++ b/arch/riscv/include/asm/kasan.h @@ -6,8 +6,6 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_KASAN - /* * The following comment was copied from arm64: * KASAN_SHADOW_START: beginning of the kernel virtual addresses. @@ -34,6 +32,8 @@ */ #define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK) #define KASAN_SHADOW_END MODULES_LOWEST_VADDR + +#ifdef CONFIG_KASAN #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) void kasan_init(void); diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h index eee260e8ab30..b9ee8346cc8c 100644 --- a/arch/riscv/include/asm/kexec.h +++ b/arch/riscv/include/asm/kexec.h @@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs, #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { + void *fdt; /* For CONFIG_KEXEC_FILE */ unsigned long fdt_addr; }; @@ -55,6 +56,7 @@ extern riscv_kexec_method riscv_kexec_norelocate; #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops elf_kexec_ops; +extern const struct kexec_file_ops image_kexec_ops; struct purgatory_info; int arch_kexec_apply_relocations_add(struct purgatory_info *pi, @@ -62,6 +64,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +struct kimage; +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + +int load_extra_segments(struct kimage *image, unsigned long kernel_start, + unsigned long kernel_len, char *initrd, + unsigned long initrd_len, char *cmdline, + unsigned long cmdline_len); #endif #endif diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h index d887a54042aa..d08bf7fb3aee 100644 --- a/arch/riscv/include/asm/kfence.h +++ b/arch/riscv/include/asm/kfence.h @@ -8,41 +8,8 @@ #include <asm-generic/pgalloc.h> #include <asm/pgtable.h> -static inline int split_pmd_page(unsigned long addr) -{ - int i; - unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK))); - pmd_t *pmd = pmd_off_k(addr); - pte_t *pte = pte_alloc_one_kernel(&init_mm); - - if (!pte) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); - set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE)); - - flush_tlb_kernel_range(addr, addr + PMD_SIZE); - return 0; -} - static inline bool arch_kfence_init_pool(void) { - int ret; - unsigned long addr; - pmd_t *pmd; - - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); - addr += PAGE_SIZE) { - pmd = pmd_off_k(addr); - - if (pmd_leaf(*pmd)) { - ret = split_pmd_page(addr); - if (ret) - return false; - } - } - return true; } @@ -51,11 +18,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) pte_t *pte = virt_to_kpte(addr); if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT)); else - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT)); - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + preempt_disable(); + local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + preempt_enable(); return true; } diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 46677daf708b..cc11c4544cff 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -19,16 +19,9 @@ #ifndef __ASSEMBLY__ +void arch_kgdb_breakpoint(void); extern unsigned long kgdb_compiled_break; -static inline void arch_kgdb_breakpoint(void) -{ - asm(".global kgdb_compiled_break\n" - ".option norvc\n" - "kgdb_compiled_break: ebreak\n" - ".option rvc\n"); -} - #endif /* !__ASSEMBLY__ */ #define DBG_REG_ZERO "zero" diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 217ef89f22b9..78ea44f76718 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -40,8 +40,15 @@ void arch_remove_kprobe(struct kprobe *p); int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); bool kprobe_breakpoint_handler(struct pt_regs *regs); bool kprobe_single_step_handler(struct pt_regs *regs); -void __kretprobe_trampoline(void); -void __kprobes *trampoline_probe_handler(struct pt_regs *regs); - +#else +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) +{ + return false; +} + +static inline bool kprobe_single_step_handler(struct pt_regs *regs) +{ + return false; +} #endif /* CONFIG_KPROBES */ #endif /* _ASM_RISCV_KPROBES_H */ diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h new file mode 100644 index 000000000000..3b643b9efc07 --- /dev/null +++ b/arch/riscv/include/asm/kvm_aia.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel <apatel@ventanamicro.com> + */ + +#ifndef __KVM_RISCV_AIA_H +#define __KVM_RISCV_AIA_H + +#include <linux/jump_label.h> +#include <linux/kvm_types.h> +#include <asm/csr.h> + +struct kvm_aia { + /* In-kernel irqchip created */ + bool in_kernel; + + /* In-kernel irqchip initialized */ + bool initialized; + + /* Virtualization mode (Emulation, HW Accelerated, or Auto) */ + u32 mode; + + /* Number of MSIs */ + u32 nr_ids; + + /* Number of wired IRQs */ + u32 nr_sources; + + /* Number of group bits in IMSIC address */ + u32 nr_group_bits; + + /* Position of group bits in IMSIC address */ + u32 nr_group_shift; + + /* Number of hart bits in IMSIC address */ + u32 nr_hart_bits; + + /* Number of guest bits in IMSIC address */ + u32 nr_guest_bits; + + /* Guest physical address of APLIC */ + gpa_t aplic_addr; + + /* Internal state of APLIC */ + void *aplic_state; +}; + +struct kvm_vcpu_aia_csr { + unsigned long vsiselect; + unsigned long hviprio1; + unsigned long hviprio2; + unsigned long vsieh; + unsigned long hviph; + unsigned long hviprio1h; + unsigned long hviprio2h; +}; + +struct kvm_vcpu_aia { + /* CPU AIA CSR context of Guest VCPU */ + struct kvm_vcpu_aia_csr guest_csr; + + /* Guest physical address of IMSIC for this VCPU */ + gpa_t imsic_addr; + + /* HART index of IMSIC extacted from guest physical address */ + u32 hart_index; + + /* Internal state of IMSIC for this VCPU */ + void *imsic_state; +}; + +#define KVM_RISCV_AIA_UNDEF_ADDR (-1) + +#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized) + +#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel) + +extern unsigned int kvm_riscv_aia_nr_hgei; +extern unsigned int kvm_riscv_aia_max_ids; +DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available); +#define kvm_riscv_aia_available() \ + static_branch_unlikely(&kvm_riscv_aia_available) + +extern struct kvm_device_ops kvm_riscv_aia_device_ops; + +void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu); + +#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1) +int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); +int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type, + bool write, unsigned long *val); +int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type); +void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, + u32 guest_index, u32 offset, u32 iid); +int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu); + +int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v); +int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v); +int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type); +int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level); +int kvm_riscv_aia_aplic_init(struct kvm *kvm); +void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm); + +#ifdef CONFIG_32BIT +void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu); +#else +static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu) +{ +} +static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu) +{ +} +#endif +bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); + +void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *out_val); +int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long val); + +int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu, + unsigned int csr_num, + unsigned long *val, + unsigned long new_val, + unsigned long wr_mask); +int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); +#define KVM_RISCV_VCPU_AIA_CSR_FUNCS \ +{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \ +{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei }, + +int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu); + +int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index, + u32 guest_index, u32 iid); +int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi); +int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level); + +void kvm_riscv_aia_init_vm(struct kvm *kvm); +void kvm_riscv_aia_destroy_vm(struct kvm *kvm); + +int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, + void __iomem **hgei_va, phys_addr_t *hgei_pa); +void kvm_riscv_aia_free_hgei(int cpu, int hgei); +void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable); + +void kvm_riscv_aia_enable(void); +void kvm_riscv_aia_disable(void); +int kvm_riscv_aia_init(void); +void kvm_riscv_aia_exit(void); + +#endif diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 319c8aeb42af..85cfebc32e4c 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -13,9 +13,14 @@ #include <linux/kvm.h> #include <linux/kvm_types.h> #include <linux/spinlock.h> -#include <asm/csr.h> +#include <asm/hwcap.h> +#include <asm/kvm_aia.h> +#include <asm/ptrace.h> #include <asm/kvm_vcpu_fp.h> +#include <asm/kvm_vcpu_insn.h> +#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_timer.h> +#include <asm/kvm_vcpu_pmu.h> #define KVM_MAX_VCPUS 1024 @@ -23,6 +28,8 @@ #define KVM_VCPU_MAX_FEATURES 0 +#define KVM_IRQCHIP_NUM_PINS 1024 + #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) @@ -34,6 +41,18 @@ KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_HFENCE \ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) + +#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \ + BIT(EXC_BREAKPOINT) | \ + BIT(EXC_SYSCALL) | \ + BIT(EXC_INST_PAGE_FAULT) | \ + BIT(EXC_LOAD_PAGE_FAULT) | \ + BIT(EXC_STORE_PAGE_FAULT)) + +#define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \ + BIT(IRQ_VS_TIMER) | \ + BIT(IRQ_VS_EXT)) enum kvm_riscv_hfence_type { KVM_RISCV_HFENCE_UNKNOWN = 0, @@ -61,9 +80,18 @@ struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 ecall_exit_stat; u64 wfi_exit_stat; + u64 wrs_exit_stat; u64 mmio_exit_user; u64 mmio_exit_kernel; + u64 csr_exit_user; + u64 csr_exit_kernel; + u64 signal_exits; u64 exits; + u64 instr_illegal_exits; + u64 load_misaligned_exits; + u64 store_misaligned_exits; + u64 load_access_exits; + u64 store_access_exits; }; struct kvm_arch_memory_slot { @@ -88,18 +116,12 @@ struct kvm_arch { /* Guest Timer */ struct kvm_guest_timer timer; -}; -struct kvm_mmio_decode { - unsigned long insn; - int insn_len; - int len; - int shift; - int return_handled; -}; + /* AIA Guest/VM context */ + struct kvm_aia aia; -struct kvm_sbi_context { - int return_handled; + /* KVM_CAP_RISCV_MP_STATE_RESET */ + bool mp_state_reset; }; struct kvm_cpu_trap { @@ -147,6 +169,7 @@ struct kvm_cpu_context { unsigned long sstatus; unsigned long hstatus; union __riscv_fp_state fp; + struct __riscv_v_ext_state vector; }; struct kvm_vcpu_csr { @@ -160,6 +183,23 @@ struct kvm_vcpu_csr { unsigned long hvip; unsigned long vsatp; unsigned long scounteren; + unsigned long senvcfg; +}; + +struct kvm_vcpu_config { + u64 henvcfg; + u64 hstateen0; + unsigned long hedeleg; +}; + +struct kvm_vcpu_smstateen_csr { + unsigned long sstateen0; +}; + +struct kvm_vcpu_reset_state { + spinlock_t lock; + unsigned long pc; + unsigned long a1; }; struct kvm_vcpu_arch { @@ -170,12 +210,19 @@ struct kvm_vcpu_arch { int last_exit_cpu; /* ISA feature bits (similar to MISA) */ - unsigned long isa; + DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); + + /* Vendor, Arch, and Implementation details */ + unsigned long mvendorid; + unsigned long marchid; + unsigned long mimpid; /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ unsigned long host_sscratch; unsigned long host_stvec; unsigned long host_scounteren; + unsigned long host_senvcfg; + unsigned long host_sstateen0; /* CPU context of Host */ struct kvm_cpu_context host_context; @@ -186,11 +233,11 @@ struct kvm_vcpu_arch { /* CPU CSR context of Guest VCPU */ struct kvm_vcpu_csr guest_csr; - /* CPU context upon Guest VCPU reset */ - struct kvm_cpu_context guest_reset_context; + /* CPU Smstateen CSR context of Guest VCPU */ + struct kvm_vcpu_smstateen_csr smstateen_csr; - /* CPU CSR context upon Guest VCPU reset */ - struct kvm_vcpu_csr guest_reset_csr; + /* CPU reset state of Guest VCPU */ + struct kvm_vcpu_reset_state reset_state; /* * VCPU interrupts @@ -201,8 +248,9 @@ struct kvm_vcpu_arch { * in irqs_pending. Our approach is modeled around multiple producer * and single consumer problem where the consumer is the VCPU itself. */ - unsigned long irqs_pending; - unsigned long irqs_pending_mask; +#define KVM_RISCV_VCPU_NR_IRQS 64 + DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS); + DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); /* VCPU Timer */ struct kvm_vcpu_timer timer; @@ -216,24 +264,47 @@ struct kvm_vcpu_arch { /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; + /* CSR instruction details */ + struct kvm_csr_decode csr_decode; + /* SBI context */ - struct kvm_sbi_context sbi_context; + struct kvm_vcpu_sbi_context sbi_context; + + /* AIA VCPU context */ + struct kvm_vcpu_aia aia_context; /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_memory_cache mmu_page_cache; - /* VCPU power-off state */ - bool power_off; + /* VCPU power state */ + struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; /* Don't run the VCPU (blocked) */ bool pause; -}; -static inline void kvm_arch_hardware_unsetup(void) {} -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} + /* Performance monitoring context */ + struct kvm_pmu pmu_context; + + /* 'static' configurations which are set only once */ + struct kvm_vcpu_config cfg; -#define KVM_ARCH_WANT_MMU_NOTIFIER + /* SBI steal-time accounting */ + struct { + gpa_t shmem; + u64 last_steal; + } sta; +}; + +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For riscv, any event that arrives while a vCPU is + * loaded is considered to be "in guest". + */ +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 @@ -285,46 +356,63 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, void kvm_riscv_hfence_vvma_all(struct kvm *kvm, unsigned long hbase, unsigned long hmask); +int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, + phys_addr_t hpa, unsigned long size, + bool writable, bool in_atomic); +void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, + unsigned long size); int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); void kvm_riscv_gstage_free_pgd(struct kvm *kvm); void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); -void kvm_riscv_gstage_mode_detect(void); -unsigned long kvm_riscv_gstage_mode(void); +void __init kvm_riscv_gstage_mode_detect(void); +unsigned long __init kvm_riscv_gstage_mode(void); int kvm_riscv_gstage_gpa_bits(void); -void kvm_riscv_gstage_vmid_detect(void); +void __init kvm_riscv_gstage_vmid_detect(void); unsigned long kvm_riscv_gstage_vmid_bits(void); int kvm_riscv_gstage_vmid_init(struct kvm *kvm); bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); +int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); + void __kvm_riscv_unpriv_trap(void); -void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu); unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, bool read_insn, unsigned long guest_addr, struct kvm_cpu_trap *trap); void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap); -int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap); void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); +void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); +unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, + u64 __user *uindices); +int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); + int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); -bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); +bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); +void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu); -int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); +void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); #endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_nacl.h b/arch/riscv/include/asm/kvm_nacl.h new file mode 100644 index 000000000000..4124d5e06a0f --- /dev/null +++ b/arch/riscv/include/asm/kvm_nacl.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Ventana Micro Systems Inc. + */ + +#ifndef __KVM_NACL_H +#define __KVM_NACL_H + +#include <linux/jump_label.h> +#include <linux/percpu.h> +#include <asm/byteorder.h> +#include <asm/csr.h> +#include <asm/sbi.h> + +struct kvm_vcpu_arch; + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available); +#define kvm_riscv_nacl_available() \ + static_branch_unlikely(&kvm_riscv_nacl_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available); +#define kvm_riscv_nacl_sync_csr_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_csr_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available); +#define kvm_riscv_nacl_sync_hfence_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_hfence_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available); +#define kvm_riscv_nacl_sync_sret_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_sret_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available); +#define kvm_riscv_nacl_autoswap_csr_available() \ + static_branch_unlikely(&kvm_riscv_nacl_autoswap_csr_available) + +struct kvm_riscv_nacl { + void *shmem; + phys_addr_t shmem_phys; +}; +DECLARE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl); + +void __kvm_riscv_nacl_hfence(void *shmem, + unsigned long control, + unsigned long page_num, + unsigned long page_count); + +void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch, + unsigned long sbi_ext_id, + unsigned long sbi_func_id); + +int kvm_riscv_nacl_enable(void); + +void kvm_riscv_nacl_disable(void); + +void kvm_riscv_nacl_exit(void); + +int kvm_riscv_nacl_init(void); + +#ifdef CONFIG_32BIT +#define lelong_to_cpu(__x) le32_to_cpu(__x) +#define cpu_to_lelong(__x) cpu_to_le32(__x) +#else +#define lelong_to_cpu(__x) le64_to_cpu(__x) +#define cpu_to_lelong(__x) cpu_to_le64(__x) +#endif + +#define nacl_shmem() \ + this_cpu_ptr(&kvm_riscv_nacl)->shmem + +#define nacl_scratch_read_long(__shmem, __offset) \ +({ \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + lelong_to_cpu(*__p); \ +}) + +#define nacl_scratch_write_long(__shmem, __offset, __val) \ +do { \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + *__p = cpu_to_lelong(__val); \ +} while (0) + +#define nacl_scratch_write_longs(__shmem, __offset, __array, __count) \ +do { \ + unsigned int __i; \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + for (__i = 0; __i < (__count); __i++) \ + __p[__i] = cpu_to_lelong((__array)[__i]); \ +} while (0) + +#define nacl_sync_hfence(__e) \ + sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE, \ + (__e), 0, 0, 0, 0, 0) + +#define nacl_hfence_mkconfig(__type, __order, __vmid, __asid) \ +({ \ + unsigned long __c = SBI_NACL_SHMEM_HFENCE_CONFIG_PEND; \ + __c |= ((__type) & SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT; \ + __c |= (((__order) - SBI_NACL_SHMEM_HFENCE_ORDER_BASE) & \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT; \ + __c |= ((__vmid) & SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT; \ + __c |= ((__asid) & SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK); \ + __c; \ +}) + +#define nacl_hfence_mkpnum(__order, __addr) \ + ((__addr) >> (__order)) + +#define nacl_hfence_mkpcount(__order, __size) \ + ((__size) >> (__order)) + +#define nacl_hfence_gvma(__shmem, __gpa, __gpsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA, \ + __order, 0, 0), \ + nacl_hfence_mkpnum(__order, __gpa), \ + nacl_hfence_mkpcount(__order, __gpsz)) + +#define nacl_hfence_gvma_all(__shmem) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL, \ + 0, 0, 0), 0, 0) + +#define nacl_hfence_gvma_vmid(__shmem, __vmid, __gpa, __gpsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID, \ + __order, __vmid, 0), \ + nacl_hfence_mkpnum(__order, __gpa), \ + nacl_hfence_mkpcount(__order, __gpsz)) + +#define nacl_hfence_gvma_vmid_all(__shmem, __vmid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL, \ + 0, __vmid, 0), 0, 0) + +#define nacl_hfence_vvma(__shmem, __vmid, __gva, __gvsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA, \ + __order, __vmid, 0), \ + nacl_hfence_mkpnum(__order, __gva), \ + nacl_hfence_mkpcount(__order, __gvsz)) + +#define nacl_hfence_vvma_all(__shmem, __vmid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL, \ + 0, __vmid, 0), 0, 0) + +#define nacl_hfence_vvma_asid(__shmem, __vmid, __asid, __gva, __gvsz, __order)\ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID, \ + __order, __vmid, __asid), \ + nacl_hfence_mkpnum(__order, __gva), \ + nacl_hfence_mkpcount(__order, __gvsz)) + +#define nacl_hfence_vvma_asid_all(__shmem, __vmid, __asid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL, \ + 0, __vmid, __asid), 0, 0) + +#define nacl_csr_read(__shmem, __csr) \ +({ \ + unsigned long *__a = (__shmem) + SBI_NACL_SHMEM_CSR_OFFSET; \ + lelong_to_cpu(__a[SBI_NACL_SHMEM_CSR_INDEX(__csr)]); \ +}) + +#define nacl_csr_write(__shmem, __csr, __val) \ +do { \ + void *__s = (__shmem); \ + unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \ + unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \ + u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \ + __a[__i] = cpu_to_lelong(__val); \ + __b[__i >> 3] |= 1U << (__i & 0x7); \ +} while (0) + +#define nacl_csr_swap(__shmem, __csr, __val) \ +({ \ + void *__s = (__shmem); \ + unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \ + unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \ + u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \ + unsigned long __r = lelong_to_cpu(__a[__i]); \ + __a[__i] = cpu_to_lelong(__val); \ + __b[__i >> 3] |= 1U << (__i & 0x7); \ + __r; \ +}) + +#define nacl_sync_csr(__csr) \ + sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_CSR, \ + (__csr), 0, 0, 0, 0, 0) + +/* + * Each ncsr_xyz() macro defined below has it's own static-branch so every + * use of ncsr_xyz() macro emits a patchable direct jump. This means multiple + * back-to-back ncsr_xyz() macro usage will emit multiple patchable direct + * jumps which is sub-optimal. + * + * Based on the above, it is recommended to avoid multiple back-to-back + * ncsr_xyz() macro usage. + */ + +#define ncsr_read(__csr) \ +({ \ + unsigned long __r; \ + if (kvm_riscv_nacl_available()) \ + __r = nacl_csr_read(nacl_shmem(), __csr); \ + else \ + __r = csr_read(__csr); \ + __r; \ +}) + +#define ncsr_write(__csr, __val) \ +do { \ + if (kvm_riscv_nacl_sync_csr_available()) \ + nacl_csr_write(nacl_shmem(), __csr, __val); \ + else \ + csr_write(__csr, __val); \ +} while (0) + +#define ncsr_swap(__csr, __val) \ +({ \ + unsigned long __r; \ + if (kvm_riscv_nacl_sync_csr_available()) \ + __r = nacl_csr_swap(nacl_shmem(), __csr, __val); \ + else \ + __r = csr_swap(__csr, __val); \ + __r; \ +}) + +#define nsync_csr(__csr) \ +do { \ + if (kvm_riscv_nacl_sync_csr_available()) \ + nacl_sync_csr(__csr); \ +} while (0) + +#endif diff --git a/arch/riscv/include/asm/kvm_vcpu_fp.h b/arch/riscv/include/asm/kvm_vcpu_fp.h index 4da9b8e0f050..b5540147409f 100644 --- a/arch/riscv/include/asm/kvm_vcpu_fp.h +++ b/arch/riscv/include/asm/kvm_vcpu_fp.h @@ -22,9 +22,9 @@ void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, - unsigned long isa); + const unsigned long *isa); void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, - unsigned long isa); + const unsigned long *isa); void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); #else @@ -32,12 +32,12 @@ static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) { } static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, - unsigned long isa) + const unsigned long *isa) { } static inline void kvm_riscv_vcpu_guest_fp_restore( struct kvm_cpu_context *cntx, - unsigned long isa) + const unsigned long *isa) { } static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) diff --git a/arch/riscv/include/asm/kvm_vcpu_insn.h b/arch/riscv/include/asm/kvm_vcpu_insn.h new file mode 100644 index 000000000000..350011c83581 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_insn.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#ifndef __KVM_VCPU_RISCV_INSN_H +#define __KVM_VCPU_RISCV_INSN_H + +struct kvm_vcpu; +struct kvm_run; +struct kvm_cpu_trap; + +struct kvm_mmio_decode { + unsigned long insn; + int insn_len; + int len; + int shift; + int return_handled; +}; + +struct kvm_csr_decode { + unsigned long insn; + int return_handled; +}; + +/* Return values used by function emulating a particular instruction */ +enum kvm_insn_return { + KVM_INSN_EXIT_TO_USER_SPACE = 0, + KVM_INSN_CONTINUE_NEXT_SEPC, + KVM_INSN_CONTINUE_SAME_SEPC, + KVM_INSN_ILLEGAL_TRAP, + KVM_INSN_VIRTUAL_TRAP +}; + +void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap); + +int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst); +int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst); +int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); + +#endif diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h new file mode 100644 index 000000000000..1d85b6617508 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Rivos Inc + * + * Authors: + * Atish Patra <atishp@rivosinc.com> + */ + +#ifndef __KVM_VCPU_RISCV_PMU_H +#define __KVM_VCPU_RISCV_PMU_H + +#include <linux/perf/riscv_pmu.h> +#include <asm/kvm_vcpu_insn.h> +#include <asm/sbi.h> + +#ifdef CONFIG_RISCV_PMU_SBI +#define RISCV_KVM_MAX_FW_CTRS 32 +#define RISCV_KVM_MAX_HW_CTRS 32 +#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS) +static_assert(RISCV_KVM_MAX_COUNTERS <= 64); + +struct kvm_fw_event { + /* Current value of the event */ + u64 value; + + /* Event monitoring status */ + bool started; +}; + +/* Per virtual pmu counter data */ +struct kvm_pmc { + u8 idx; + struct perf_event *perf_event; + u64 counter_val; + union sbi_pmu_ctr_info cinfo; + /* Event monitoring status */ + bool started; + /* Monitoring event ID */ + unsigned long event_idx; + struct kvm_vcpu *vcpu; +}; + +/* PMU data structure per vcpu */ +struct kvm_pmu { + struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS]; + struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS]; + /* Number of the virtual firmware counters available */ + int num_fw_ctrs; + /* Number of the virtual hardware counters available */ + int num_hw_ctrs; + /* A flag to indicate that pmu initialization is done */ + bool init_done; + /* Bit map of all the virtual counter used */ + DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS); + /* Bit map of all the virtual counter overflown */ + DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS); + /* The address of the counter snapshot area (guest physical address) */ + gpa_t snapshot_addr; + /* The actual data of the snapshot */ + struct riscv_pmu_snapshot_data *sdata; +}; + +#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) +#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context)) + +#if defined(CONFIG_32BIT) +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#else +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#endif + +int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); +int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); + +int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, u64 ival, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + unsigned long eidx, u64 evtdata, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); +void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low, + unsigned long saddr_high, unsigned long flags, + struct kvm_vcpu_sbi_return *retdata); +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); + +#else +struct kvm_pmu { +}; + +static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return KVM_INSN_CONTINUE_NEXT_SEPC; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } +} + +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy }, + +static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} +static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +{ + return 0; +} + +static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {} +static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {} +#endif /* CONFIG_RISCV_PMU_SBI */ +#endif /* !__KVM_VCPU_RISCV_PMU_H */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 83d6d4d2b1df..439ab2b3534f 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -11,26 +11,89 @@ #define KVM_SBI_IMPID 3 -#define KVM_SBI_VERSION_MAJOR 0 -#define KVM_SBI_VERSION_MINOR 3 +#define KVM_SBI_VERSION_MAJOR 2 +#define KVM_SBI_VERSION_MINOR 0 + +enum kvm_riscv_sbi_ext_status { + KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED, + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE, + KVM_RISCV_SBI_EXT_STATUS_ENABLED, + KVM_RISCV_SBI_EXT_STATUS_DISABLED, +}; + +struct kvm_vcpu_sbi_context { + int return_handled; + enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX]; +}; + +struct kvm_vcpu_sbi_return { + unsigned long out_val; + unsigned long err_val; + struct kvm_cpu_trap *utrap; + bool uexit; +}; struct kvm_vcpu_sbi_extension { unsigned long extid_start; unsigned long extid_end; + + bool default_disabled; + /** * SBI extension handler. It can be defined for a given extension or group of * extension. But it should always return linux error codes rather than SBI * specific error codes. */ int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, struct kvm_cpu_trap *utrap, - bool *exit); + struct kvm_vcpu_sbi_return *retdata); + + /* Extension specific probe function */ + unsigned long (*probe)(struct kvm_vcpu *vcpu); }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type, u64 flags); -const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu, + unsigned long pc, unsigned long a1); +void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( + struct kvm_vcpu *vcpu, unsigned long extid); +bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx); +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); +void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); + +int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long *reg_val); +int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_val); + +#ifdef CONFIG_RISCV_SBI_V01 +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; +#endif +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; +#ifdef CONFIG_RISCV_PMU_SBI +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu; +#endif #endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h index 375281eb49e0..82f7260301da 100644 --- a/arch/riscv/include/asm/kvm_vcpu_timer.h +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -28,6 +28,11 @@ struct kvm_vcpu_timer { u64 next_cycles; /* Underlying hrtimer instance */ struct hrtimer hrt; + + /* Flag to check if sstc is enabled or not */ + bool sstc_enabled; + /* A function pointer to switch between stimecmp or hrtimer at runtime */ + int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles); }; int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); @@ -39,6 +44,9 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); -int kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); #endif diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h new file mode 100644 index 000000000000..57a798a4cb0d --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 SiFive + * + * Authors: + * Vincent Chen <vincent.chen@sifive.com> + * Greentime Hu <greentime.hu@sifive.com> + */ + +#ifndef __KVM_VCPU_RISCV_VECTOR_H +#define __KVM_VCPU_RISCV_VECTOR_H + +#include <linux/types.h> + +#ifdef CONFIG_RISCV_ISA_V +#include <asm/vector.h> +#include <asm/kvm_host.h> + +static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_save(&context->vector, context->vector.datap); +} + +static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_restore(&context->vector, context->vector.datap); +} + +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); +#else + +struct kvm_cpu_context; + +static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) +{ +} + +static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) +{ +} +#endif + +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +#endif diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h new file mode 100644 index 000000000000..47b240d0d596 --- /dev/null +++ b/arch/riscv/include/asm/membarrier.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_RISCV_MEMBARRIER_H +#define _ASM_RISCV_MEMBARRIER_H + +static inline void membarrier_arch_switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ + /* + * Only need the full barrier when switching between processes. + * Barrier when switching from kernel to userspace is not + * required here, given that it is implied by mmdrop(). Barrier + * when switching from userspace to kernel is not needed after + * store to rq->curr. + */ + if (IS_ENABLED(CONFIG_SMP) && + likely(!(atomic_read(&next->membarrier_state) & + (MEMBARRIER_STATE_PRIVATE_EXPEDITED | + MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev)) + return; + + /* + * The membarrier system call requires a full memory barrier + * after storing to rq->curr, before going back to user-space. + * + * This barrier is also needed for the SYNC_CORE command when + * switching between processes; in particular, on a transition + * from a thread belonging to another mm to a thread belonging + * to the mm for which a membarrier SYNC_CORE is done on CPU0: + * + * - [CPU0] sets all bits in the mm icache_stale_mask (in + * prepare_sync_core_cmd()); + * + * - [CPU1] stores to rq->curr (by the scheduler); + * + * - [CPU0] loads rq->curr within membarrier and observes + * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on + * CPU1; this means membarrier relies on switch_mm() to + * issue the sync-core; + * + * - [CPU1] switch_mm() loads icache_stale_mask; if the bit + * is zero, switch_mm() may incorrectly skip the sync-core. + * + * Matches a full barrier in the proximity of the membarrier + * system call entry. + */ + smp_mb(); +} + +#endif /* _ASM_RISCV_MEMBARRIER_H */ diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index aff6c33ab0c0..06cadfd7a237 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -12,6 +12,7 @@ #define _ASM_RISCV_MMIO_H #include <linux/types.h> +#include <asm/fence.h> #include <asm/mmiowb.h> /* Generic IO read/write. These perform native-endian accesses. */ @@ -101,9 +102,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. These are defined to order the indicated access (either a read or - * write) with all other I/O memory accesses. Since the platform specification - * defines that all I/O regions are strongly ordered on channel 2, no explicit - * fences are required to enforce this ordering. + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. */ /* FIXME: These are now the same as asm-generic */ #define __io_rbr() do {} while (0) @@ -125,14 +126,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. The memory barriers here are necessary as RISC-V + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") +#define __io_ar(v) RISCV_FENCE(i, ir) +#define __io_bw() RISCV_FENCE(w, o) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h index 0b2333e71fdc..52ce4a399d9b 100644 --- a/arch/riscv/include/asm/mmiowb.h +++ b/arch/riscv/include/asm/mmiowb.h @@ -7,7 +7,7 @@ * "o,w" is sufficient to ensure that all writes to the device have completed * before the write to the spinlock is allowed to commit. */ -#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); +#define mmiowb() RISCV_FENCE(o, w) #include <linux/smp.h> #include <asm-generic/mmiowb.h> diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index cedcf8ea3c76..1cc90465d75b 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -16,15 +16,30 @@ typedef struct { atomic_long_t id; #endif void *vdso; - void *vdso_info; #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; + /* Force local icache flush on all migrations. */ + bool force_icache_flush; +#endif +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif + unsigned long flags; +#ifdef CONFIG_RISCV_ISA_SUPM + u8 pmlen; #endif } mm_context_t; -void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, - phys_addr_t sz, pgprot_t prot); +/* Lock the pointer masking mode because this mm is multithreaded */ +#define MM_CONTEXT_LOCK_PMLEN 0 + +#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK) +#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK) + +void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, + pgprot_t prot); #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_MMU_H */ diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 7030837adc1a..8c4bc49a3a0f 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { +#ifdef CONFIG_RISCV_ISA_SUPM + next->context.pmlen = 0; +#endif switch_mm(prev, next, NULL); } @@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_MMU atomic_long_set(&mm->context.id, 0); #endif + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags); return 0; } DECLARE_STATIC_KEY_FALSE(use_asid_allocator); +#ifdef CONFIG_RISCV_ISA_SUPM +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> mm->context.pmlen; +} +#endif + #include <asm-generic/mmu_context.h> #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h deleted file mode 100644 index fa17e01d9ab2..000000000000 --- a/arch/riscv/include/asm/mmzone.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -#ifdef CONFIG_NUMA - -#include <asm/numa.h> - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[(nid)]) - -#endif /* CONFIG_NUMA */ -#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index 76aa96a9fc08..0f3baaa6a9a8 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h @@ -5,6 +5,7 @@ #define _ASM_RISCV_MODULE_H #include <asm-generic/module.h> +#include <linux/elf.h> struct module; unsigned long module_emit_got_entry(struct module *mod, unsigned long val); @@ -111,4 +112,19 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, #endif /* CONFIG_MODULE_SECTIONS */ +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + + return NULL; +} + #endif /* _ASM_RISCV_MODULE_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 1526e410e802..572a141ddecd 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -12,15 +12,8 @@ #include <linux/pfn.h> #include <linux/const.h> -#define PAGE_SHIFT (12) -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#include <vdso/page.h> -#ifdef CONFIG_64BIT -#define HUGE_MAX_HSTATE 2 -#else -#define HUGE_MAX_HSTATE 1 -#endif #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) @@ -31,28 +24,33 @@ * When not using MMU this corresponds to the first free page in * physical memory (aligned on a page boundary). */ -#ifdef CONFIG_64BIT #ifdef CONFIG_MMU -#define PAGE_OFFSET kernel_map.page_offset -#else -#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) -#endif -/* - * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so - * define the PAGE_OFFSET value for SV39. - */ +#ifdef CONFIG_64BIT +#define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL) #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) -#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) +#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL) +#ifdef CONFIG_XIP_KERNEL +#define PAGE_OFFSET PAGE_OFFSET_L3 #else -#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#define PAGE_OFFSET kernel_map.page_offset +#endif /* CONFIG_XIP_KERNEL */ +#else +#define PAGE_OFFSET _AC(0xc0000000, UL) #endif /* CONFIG_64BIT */ +#else +#define PAGE_OFFSET ((unsigned long)phys_ram_base) +#endif /* CONFIG_MMU */ #ifndef __ASSEMBLY__ +#ifdef CONFIG_RISCV_ISA_ZICBOZ +void clear_page(void *page); +#else #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#endif #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) -#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr) #define copy_user_page(vto, vfrom, vaddr, topg) \ memcpy((vto), (vfrom), PAGE_SIZE) @@ -90,31 +88,38 @@ typedef struct page *pgtable_t; #define PTE_FMT "%08lx" #endif -#ifdef CONFIG_MMU -extern unsigned long riscv_pfn_base; -#define ARCH_PFN_OFFSET (riscv_pfn_base) -#else -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) -#endif /* CONFIG_MMU */ +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) +/* + * We override this value as its generic definition uses __pa too early in + * the boot process (before kernel_map.va_pa_offset is set). + */ +#define MIN_MEMBLOCK_ADDR 0 +#endif + +#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) struct kernel_mapping { - unsigned long page_offset; unsigned long virt_addr; + unsigned long virt_offset; uintptr_t phys_addr; uintptr_t size; /* Offset between linear mapping virtual address and kernel load address */ unsigned long va_pa_offset; /* Offset between kernel mapping virtual address and kernel load address */ - unsigned long va_kernel_pa_offset; - unsigned long va_kernel_xip_pa_offset; #ifdef CONFIG_XIP_KERNEL + unsigned long va_kernel_xip_text_pa_offset; + unsigned long va_kernel_xip_data_pa_offset; uintptr_t xiprom; uintptr_t xiprom_sz; +#else + unsigned long page_offset; + unsigned long va_kernel_pa_offset; #endif }; extern struct kernel_mapping kernel_map; extern phys_addr_t phys_ram_base; +extern unsigned long vmemmap_start_pfn; #define is_kernel_mapping(x) \ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) @@ -122,22 +127,41 @@ extern phys_addr_t phys_ram_base; #define is_linear_mapping(x) \ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) +#ifndef CONFIG_DEBUG_VIRTUAL #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) -#define kernel_mapping_pa_to_va(y) ({ \ - unsigned long _y = y; \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ +#else +void *linear_mapping_pa_to_va(unsigned long x); +#endif + +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = (unsigned long)(y); \ + (_y < phys_ram_base) ? \ + (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ + (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) +#endif + #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) +#ifndef CONFIG_DEBUG_VIRTUAL #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) +#else +phys_addr_t linear_mapping_va_to_pa(unsigned long x); +#endif + +#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_va_to_pa(y) ({ \ - unsigned long _y = y; \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ - ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \ - ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ + unsigned long _y = (unsigned long)(y); \ + (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ + (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ + (_y - kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) +#endif #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ @@ -166,16 +190,14 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) -#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) -#define page_to_bus(page) (page_to_phys(page)) -#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) - #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) -#ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) \ - (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) -#endif +unsigned long kaslr_offset(void); + +static __always_inline void *pfn_to_kaddr(unsigned long pfn) +{ + return __va(pfn << PAGE_SHIFT); +} #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/paravirt.h b/arch/riscv/include/asm/paravirt.h new file mode 100644 index 000000000000..c0abde70fc2c --- /dev/null +++ b/arch/riscv/include/asm/paravirt.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_PARAVIRT_H +#define _ASM_RISCV_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include <linux/static_call_types.h> + +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); + +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int __init pv_time_init(void); + +#else + +#define pv_time_init() do {} while (0) + +#endif /* CONFIG_PARAVIRT */ +#endif /* _ASM_RISCV_PARAVIRT_H */ diff --git a/arch/riscv/include/asm/paravirt_api_clock.h b/arch/riscv/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..65ac7cee0dad --- /dev/null +++ b/arch/riscv/include/asm/paravirt_api_clock.h @@ -0,0 +1 @@ +#include <asm/paravirt.h> diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h deleted file mode 100644 index f36368de839f..000000000000 --- a/arch/riscv/include/asm/parse_asm.h +++ /dev/null @@ -1,219 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2020 SiFive - */ - -#include <linux/bits.h> - -/* The bit field of immediate value in I-type instruction */ -#define I_IMM_SIGN_OPOFF 31 -#define I_IMM_11_0_OPOFF 20 -#define I_IMM_SIGN_OFF 12 -#define I_IMM_11_0_OFF 0 -#define I_IMM_11_0_MASK GENMASK(11, 0) - -/* The bit field of immediate value in J-type instruction */ -#define J_IMM_SIGN_OPOFF 31 -#define J_IMM_10_1_OPOFF 21 -#define J_IMM_11_OPOFF 20 -#define J_IMM_19_12_OPOFF 12 -#define J_IMM_SIGN_OFF 20 -#define J_IMM_10_1_OFF 1 -#define J_IMM_11_OFF 11 -#define J_IMM_19_12_OFF 12 -#define J_IMM_10_1_MASK GENMASK(9, 0) -#define J_IMM_11_MASK GENMASK(0, 0) -#define J_IMM_19_12_MASK GENMASK(7, 0) - -/* The bit field of immediate value in B-type instruction */ -#define B_IMM_SIGN_OPOFF 31 -#define B_IMM_10_5_OPOFF 25 -#define B_IMM_4_1_OPOFF 8 -#define B_IMM_11_OPOFF 7 -#define B_IMM_SIGN_OFF 12 -#define B_IMM_10_5_OFF 5 -#define B_IMM_4_1_OFF 1 -#define B_IMM_11_OFF 11 -#define B_IMM_10_5_MASK GENMASK(5, 0) -#define B_IMM_4_1_MASK GENMASK(3, 0) -#define B_IMM_11_MASK GENMASK(0, 0) - -/* The register offset in RVG instruction */ -#define RVG_RS1_OPOFF 15 -#define RVG_RS2_OPOFF 20 -#define RVG_RD_OPOFF 7 - -/* The bit field of immediate value in RVC J instruction */ -#define RVC_J_IMM_SIGN_OPOFF 12 -#define RVC_J_IMM_4_OPOFF 11 -#define RVC_J_IMM_9_8_OPOFF 9 -#define RVC_J_IMM_10_OPOFF 8 -#define RVC_J_IMM_6_OPOFF 7 -#define RVC_J_IMM_7_OPOFF 6 -#define RVC_J_IMM_3_1_OPOFF 3 -#define RVC_J_IMM_5_OPOFF 2 -#define RVC_J_IMM_SIGN_OFF 11 -#define RVC_J_IMM_4_OFF 4 -#define RVC_J_IMM_9_8_OFF 8 -#define RVC_J_IMM_10_OFF 10 -#define RVC_J_IMM_6_OFF 6 -#define RVC_J_IMM_7_OFF 7 -#define RVC_J_IMM_3_1_OFF 1 -#define RVC_J_IMM_5_OFF 5 -#define RVC_J_IMM_4_MASK GENMASK(0, 0) -#define RVC_J_IMM_9_8_MASK GENMASK(1, 0) -#define RVC_J_IMM_10_MASK GENMASK(0, 0) -#define RVC_J_IMM_6_MASK GENMASK(0, 0) -#define RVC_J_IMM_7_MASK GENMASK(0, 0) -#define RVC_J_IMM_3_1_MASK GENMASK(2, 0) -#define RVC_J_IMM_5_MASK GENMASK(0, 0) - -/* The bit field of immediate value in RVC B instruction */ -#define RVC_B_IMM_SIGN_OPOFF 12 -#define RVC_B_IMM_4_3_OPOFF 10 -#define RVC_B_IMM_7_6_OPOFF 5 -#define RVC_B_IMM_2_1_OPOFF 3 -#define RVC_B_IMM_5_OPOFF 2 -#define RVC_B_IMM_SIGN_OFF 8 -#define RVC_B_IMM_4_3_OFF 3 -#define RVC_B_IMM_7_6_OFF 6 -#define RVC_B_IMM_2_1_OFF 1 -#define RVC_B_IMM_5_OFF 5 -#define RVC_B_IMM_4_3_MASK GENMASK(1, 0) -#define RVC_B_IMM_7_6_MASK GENMASK(1, 0) -#define RVC_B_IMM_2_1_MASK GENMASK(1, 0) -#define RVC_B_IMM_5_MASK GENMASK(0, 0) - -/* The register offset in RVC op=C0 instruction */ -#define RVC_C0_RS1_OPOFF 7 -#define RVC_C0_RS2_OPOFF 2 -#define RVC_C0_RD_OPOFF 2 - -/* The register offset in RVC op=C1 instruction */ -#define RVC_C1_RS1_OPOFF 7 -#define RVC_C1_RS2_OPOFF 2 -#define RVC_C1_RD_OPOFF 7 - -/* The register offset in RVC op=C2 instruction */ -#define RVC_C2_RS1_OPOFF 7 -#define RVC_C2_RS2_OPOFF 2 -#define RVC_C2_RD_OPOFF 7 - -/* parts of opcode for RVG*/ -#define OPCODE_BRANCH 0x63 -#define OPCODE_JALR 0x67 -#define OPCODE_JAL 0x6f -#define OPCODE_SYSTEM 0x73 - -/* parts of opcode for RVC*/ -#define OPCODE_C_0 0x0 -#define OPCODE_C_1 0x1 -#define OPCODE_C_2 0x2 - -/* parts of funct3 code for I, M, A extension*/ -#define FUNCT3_JALR 0x0 -#define FUNCT3_BEQ 0x0 -#define FUNCT3_BNE 0x1000 -#define FUNCT3_BLT 0x4000 -#define FUNCT3_BGE 0x5000 -#define FUNCT3_BLTU 0x6000 -#define FUNCT3_BGEU 0x7000 - -/* parts of funct3 code for C extension*/ -#define FUNCT3_C_BEQZ 0xc000 -#define FUNCT3_C_BNEZ 0xe000 -#define FUNCT3_C_J 0xa000 -#define FUNCT3_C_JAL 0x2000 -#define FUNCT4_C_JR 0x8000 -#define FUNCT4_C_JALR 0xf000 - -#define FUNCT12_SRET 0x10200000 - -#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR) -#define MATCH_JAL (OPCODE_JAL) -#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH) -#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH) -#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH) -#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH) -#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH) -#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH) -#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM) -#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1) -#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1) -#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1) -#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1) -#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2) -#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2) - -#define MASK_JALR 0x707f -#define MASK_JAL 0x7f -#define MASK_C_JALR 0xf07f -#define MASK_C_JR 0xf07f -#define MASK_C_JAL 0xe003 -#define MASK_C_J 0xe003 -#define MASK_BEQ 0x707f -#define MASK_BNE 0x707f -#define MASK_BLT 0x707f -#define MASK_BGE 0x707f -#define MASK_BLTU 0x707f -#define MASK_BGEU 0x707f -#define MASK_C_BEQZ 0xe003 -#define MASK_C_BNEZ 0xe003 -#define MASK_SRET 0xffffffff - -#define __INSN_LENGTH_MASK _UL(0x3) -#define __INSN_LENGTH_GE_32 _UL(0x3) -#define __INSN_OPCODE_MASK _UL(0x7F) -#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH) - -/* Define a series of is_XXX_insn functions to check if the value INSN - * is an instance of instruction XXX. - */ -#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \ -static inline bool is_ ## INSN_NAME ## _insn(long insn) \ -{ \ - return (insn & (INSN_MASK)) == (INSN_MATCH); \ -} - -#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) -#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) -#define RV_X(X, s, mask) (((X) >> (s)) & (mask)) -#define RVC_X(X, s, mask) RV_X(X, s, mask) - -#define EXTRACT_JTYPE_IMM(x) \ - ({typeof(x) x_ = (x); \ - (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \ - (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \ - (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \ - (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); }) - -#define EXTRACT_ITYPE_IMM(x) \ - ({typeof(x) x_ = (x); \ - (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \ - (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); }) - -#define EXTRACT_BTYPE_IMM(x) \ - ({typeof(x) x_ = (x); \ - (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \ - (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \ - (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \ - (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); }) - -#define EXTRACT_RVC_J_IMM(x) \ - ({typeof(x) x_ = (x); \ - (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \ - (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \ - (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \ - (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \ - (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \ - (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \ - (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \ - (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); }) - -#define EXTRACT_RVC_B_IMM(x) \ - ({typeof(x) x_ = (x); \ - (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \ - (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \ - (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ - (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ - (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 7fd52a30e605..cc2a184cfc2e 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -12,31 +12,10 @@ #include <asm/io.h> -#define PCIBIOS_MIN_IO 0 -#define PCIBIOS_MIN_MEM 0 - -/* RISC-V shim does not initialize PCI bus */ -#define pcibios_assign_all_busses() 1 - -#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - /* no legacy IRQ on risc-v */ - return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ - /* always show the domain in /proc */ - return 1; -} - -#ifdef CONFIG_NUMA +#define PCIBIOS_MIN_IO 4 +#define PCIBIOS_MIN_MEM 16 +#if defined(CONFIG_PCI) && defined(CONFIG_NUMA) static inline int pcibus_to_node(struct pci_bus *bus) { return dev_to_node(&bus->dev); @@ -46,8 +25,9 @@ static inline int pcibus_to_node(struct pci_bus *bus) cpu_all_mask : \ cpumask_of_node(pcibus_to_node(bus))) #endif -#endif /* CONFIG_NUMA */ +#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */ -#endif /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h> #endif /* _ASM_RISCV_PCI_H */ diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h index d42c901f9a97..bcc928fd3785 100644 --- a/arch/riscv/include/asm/perf_event.h +++ b/arch/riscv/include/asm/perf_event.h @@ -8,6 +8,16 @@ #ifndef _ASM_RISCV_PERF_EVENT_H #define _ASM_RISCV_PERF_EVENT_H +#ifdef CONFIG_PERF_EVENTS #include <linux/perf_event.h> #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->epc = (__ip); \ + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->status = SR_PP; \ +} +#endif + #endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 947f23d7b6af..770ce18a7328 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -8,10 +8,10 @@ #define _ASM_RISCV_PGALLOC_H #include <linux/mm.h> +#include <asm/sbi.h> #include <asm/tlb.h> #ifdef CONFIG_MMU -#define __HAVE_ARCH_PUD_ALLOC_ONE #define __HAVE_ARCH_PUD_FREE #include <asm-generic/pgalloc.h> @@ -79,15 +79,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, } } -#define pud_alloc_one pud_alloc_one -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - if (pgtable_l4_enabled) - return __pud_alloc_one(mm, addr); - - return NULL; -} - #define pud_free pud_free static inline void pud_free(struct mm_struct *mm, pud_t *pud) { @@ -95,64 +86,55 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) __pud_free(mm, pud); } -#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) - -#define p4d_alloc_one p4d_alloc_one -static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr) { - if (pgtable_l5_enabled) { - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (p4d_t *)get_zeroed_page(gfp); - } - - return NULL; + if (pgtable_l4_enabled) + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); } -static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) +static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr) { - BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - free_page((unsigned long)p4d); + if (pgtable_l5_enabled) + tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); } +#endif /* __PAGETABLE_PMD_FOLDED */ -#define p4d_free p4d_free -static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +static inline void sync_kernel_mappings(pgd_t *pgd) { - if (pgtable_l5_enabled) - __p4d_free(mm, p4d); + memcpy(pgd + USER_PTRS_PER_PGD, + init_mm.pgd + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } -#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) -#endif /* __PAGETABLE_PMD_FOLDED */ - static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd = __pgd_alloc(mm, 0); if (likely(pgd != NULL)) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* Copy kernel mappings */ - memcpy(pgd + USER_PTRS_PER_PGD, - init_mm.pgd + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + sync_kernel_mappings(pgd); } return pgd; } #ifndef __PAGETABLE_PMD_FOLDED -#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); +} #endif /* __PAGETABLE_PMD_FOLDED */ -#define __pte_free_tlb(tlb, pte, buf) \ -do { \ - pgtable_pte_page_dtor(pte); \ - tlb_remove_page((tlb), pte); \ -} while (0) +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + tlb_remove_ptdesc(tlb, page_ptdesc(pte)); +} #endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_PGALLOC_H */ diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h index 59ba1fbaf784..00f3369570a8 100644 --- a/arch/riscv/include/asm/pgtable-32.h +++ b/arch/riscv/include/asm/pgtable-32.h @@ -33,4 +33,7 @@ _PAGE_WRITE | _PAGE_EXEC | \ _PAGE_USER | _PAGE_GLOBAL)) +static const __maybe_unused int pgtable_l4_enabled; +static const __maybe_unused int pgtable_l5_enabled; + #endif /* _ASM_RISCV_PGTABLE_32_H */ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index dc42375c2357..7de05db7d3bd 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -16,8 +16,6 @@ extern bool pgtable_l5_enabled; #define PGDIR_SHIFT_L3 30 #define PGDIR_SHIFT_L4 39 #define PGDIR_SHIFT_L5 48 -#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3) - #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \ (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)) /* Size of region mapped by a page global directory */ @@ -25,7 +23,11 @@ extern bool pgtable_l5_enabled; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) /* p4d is folded into pgd in case of 4-level page table */ -#define P4D_SHIFT 39 +#define P4D_SHIFT_L3 30 +#define P4D_SHIFT_L4 39 +#define P4D_SHIFT_L5 39 +#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \ + (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3)) #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE - 1)) @@ -75,6 +77,40 @@ typedef struct { #define _PAGE_PFN_MASK GENMASK(53, 10) /* + * [63] Svnapot definitions: + * 0 Svnapot disabled + * 1 Svnapot enabled + */ +#define _PAGE_NAPOT_SHIFT 63 +#define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT) +/* + * Only 64KB (order 4) napot ptes supported. + */ +#define NAPOT_CONT_ORDER_BASE 4 +enum napot_cont_order { + NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE, + NAPOT_ORDER_MAX, +}; + +#define for_each_napot_order(order) \ + for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++) +#define for_each_napot_order_rev(order) \ + for (order = NAPOT_ORDER_MAX - 1; \ + order >= NAPOT_CONT_ORDER_BASE; order--) +#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1)) + +#define napot_cont_shift(order) ((order) + PAGE_SHIFT) +#define napot_cont_size(order) BIT(napot_cont_shift(order)) +#define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL)) +#define napot_pte_num(order) BIT(order) + +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE)) +#else +#define HUGE_MAX_HSTATE 2 +#endif + +/* * [62:61] Svpbmt Memory Type definitions: * * 00 - PMA Normal Cacheable, No change to implied PMA memory type @@ -88,14 +124,18 @@ typedef struct { /* * [63:59] T-Head Memory Type definitions: - * - * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * bit[63] SO - Strong Order + * bit[62] C - Cacheable + * bit[61] B - Bufferable + * bit[60] SH - Shareable + * bit[59] Sec - Trustable + * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable - * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable */ #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60)) -#define _PAGE_NOCACHE_THEAD 0UL -#define _PAGE_IO_THEAD (1UL << 63) +#define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60)) +#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60)) #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59)) static inline u64 riscv_page_mtmask(void) @@ -144,11 +184,11 @@ static inline int pud_none(pud_t pud) static inline int pud_bad(pud_t pud) { - return !pud_present(pud); + return !pud_present(pud) || (pud_val(pud) & _PAGE_LEAF); } #define pud_leaf pud_leaf -static inline int pud_leaf(pud_t pud) +static inline bool pud_leaf(pud_t pud) { return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF); } @@ -160,7 +200,7 @@ static inline int pud_user(pud_t pud) static inline void set_pud(pud_t *pudp, pud_t pud) { - *pudp = pud; + WRITE_ONCE(*pudp, pud); } static inline void pud_clear(pud_t *pudp) @@ -222,8 +262,6 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) return __page_val_to_pfn(pmd_val(pmd)); } -#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot) - #define pmd_ERROR(e) \ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) @@ -236,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { if (pgtable_l4_enabled) - *p4dp = p4d; + WRITE_ONCE(*p4dp, p4d); else set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) }); } @@ -298,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d) #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset pud_offset -static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) -{ - if (pgtable_l4_enabled) - return p4d_pgtable(*p4d) + pud_index(address); - - return (pud_t *)p4d; -} +pud_t *pud_offset(p4d_t *p4d, unsigned long address); static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { if (pgtable_l5_enabled) - *pgdp = pgd; + WRITE_ONCE(*pgdp, pgd); else set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); } @@ -362,12 +394,27 @@ static inline struct page *pgd_page(pgd_t pgd) #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) #define p4d_offset p4d_offset -static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pte_devmap(pte_t pte); +static inline pte_t pmd_pte(pmd_t pmd); +static inline pte_t pud_pte(pud_t pud); + +static inline int pmd_devmap(pmd_t pmd) { - if (pgtable_l5_enabled) - return pgd_pgtable(*pgd) + p4d_index(address); + return pte_devmap(pmd_pte(pmd)); +} - return (p4d_t *)pgd; +static inline int pud_devmap(pud_t pud) +{ + return pte_devmap(pud_pte(pud)); +} + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; } +#endif #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index b9e13a8fe2b7..a8f5205cea54 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -16,9 +16,10 @@ #define _PAGE_GLOBAL (1 << 5) /* Global */ #define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */ #define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */ -#define _PAGE_SOFT (1 << 8) /* Reserved for software */ +#define _PAGE_SOFT (3 << 8) /* Reserved for software */ -#define _PAGE_SPECIAL _PAGE_SOFT +#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ +#define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */ #define _PAGE_TABLE _PAGE_PRESENT /* @@ -27,6 +28,9 @@ */ #define _PAGE_PROT_NONE _PAGE_GLOBAL +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED + #define _PAGE_PFN_SHIFT 10 /* diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 5dbd6610729b..438ce7df24c3 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -12,7 +12,11 @@ #include <asm/pgtable-bits.h> #ifndef CONFIG_MMU -#define KERNEL_LINK_ADDR PAGE_OFFSET +#ifdef CONFIG_RELOCATABLE +#define KERNEL_LINK_ADDR UL(0) +#else +#define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL) +#endif #define KERN_VIRT_SIZE (UL(-1)) #else @@ -31,7 +35,7 @@ #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) /* - * Half of the kernel address space (half of the entries of the page global + * Half of the kernel address space (1/4 of the entries of the page global * directory) is for the direct mapping. */ #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) @@ -55,6 +59,9 @@ #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) #define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#else +#define MODULES_VADDR VMALLOC_START +#define MODULES_END VMALLOC_END #endif /* @@ -62,11 +69,16 @@ * struct pages to map half the virtual address space. Then * position vmemmap directly below the VMALLOC region. */ +#define VA_BITS_SV32 32 #ifdef CONFIG_64BIT +#define VA_BITS_SV39 39 +#define VA_BITS_SV48 48 +#define VA_BITS_SV57 57 + #define VA_BITS (pgtable_l5_enabled ? \ - 57 : (pgtable_l4_enabled ? 48 : 39)) + VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39)) #else -#define VA_BITS 32 +#define VA_BITS VA_BITS_SV32 #endif #define VMEMMAP_SHIFT \ @@ -79,7 +91,7 @@ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. */ -#define vmemmap ((struct page *)VMEMMAP_START) +#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn) #define PCI_IO_SIZE SZ_16M #define PCI_IO_END VMEMMAP_START @@ -87,31 +99,39 @@ #define FIXADDR_TOP PCI_IO_START #ifdef CONFIG_64BIT -#define FIXADDR_SIZE PMD_SIZE +#define MAX_FDT_SIZE PMD_SIZE +#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) +#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) #else -#define FIXADDR_SIZE PGDIR_SIZE +#define MAX_FDT_SIZE PGDIR_SIZE +#define FIX_FDT_SIZE MAX_FDT_SIZE +#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) #endif #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #endif -#ifdef CONFIG_XIP_KERNEL -#define XIP_OFFSET SZ_32M -#define XIP_OFFSET_MASK (SZ_32M - 1) -#else -#define XIP_OFFSET 0 -#endif - #ifndef __ASSEMBLY__ #include <asm/page.h> #include <asm/tlbflush.h> #include <linux/mm_types.h> +#include <asm/compat.h> +#include <asm/cpufeature.h> #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) #ifdef CONFIG_64BIT #include <asm/pgtable-64.h> + +#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1)) +#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) +#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) + +#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) +#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) +#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) +#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) #else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ @@ -120,11 +140,14 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_FIXUP(addr) ({ \ + extern char _sdata[], _start[], _end[]; \ + uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_sdata - (uintptr_t)&_start; \ + uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_end - (uintptr_t)&_start; \ uintptr_t __a = (uintptr_t)(addr); \ - (__a >= CONFIG_XIP_PHYS_ADDR && \ - __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ - __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ - __a; \ + (__a >= __rom_start_data && __a < __rom_end_data) ? \ + __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ }) #else #define XIP_FIXUP(addr) (addr) @@ -143,7 +166,7 @@ struct pt_alloc_ops { #endif }; -extern struct pt_alloc_ops pt_ops __initdata; +extern struct pt_alloc_ops pt_ops __meminitdata; #ifdef CONFIG_MMU /* Number of PGD entries that a user-mode program can use */ @@ -161,8 +184,7 @@ extern struct pt_alloc_ops pt_ops __initdata; _PAGE_EXEC | _PAGE_WRITE) #define PAGE_COPY PAGE_READ -#define PAGE_COPY_EXEC PAGE_EXEC -#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC +#define PAGE_COPY_EXEC PAGE_READ_EXEC #define PAGE_SHARED PAGE_WRITE #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC @@ -185,26 +207,8 @@ extern struct pt_alloc_ops pt_ops __initdata; #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) extern pgd_t swapper_pg_dir[]; - -/* MAP_PRIVATE permissions: xwr (copy-on-write) */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXEC -#define __P101 PAGE_READ_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_READ_EXEC - -/* MAP_SHARED permissions: xwr */ -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXEC -#define __S101 PAGE_READ_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC +extern pgd_t trampoline_pg_dir[]; +extern pgd_t early_pg_dir[]; #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_present(pmd_t pmd) @@ -235,14 +239,14 @@ static inline int pmd_bad(pmd_t pmd) } #define pmd_leaf pmd_leaf -static inline int pmd_leaf(pmd_t pmd) +static inline bool pmd_leaf(pmd_t pmd) { return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); } static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); } static inline void pmd_clear(pmd_t *pmdp) @@ -284,10 +288,47 @@ static inline pte_t pud_pte(pud_t pud) return __pte(pud_val(pud)); } +#ifdef CONFIG_RISCV_ISA_SVNAPOT + +static __always_inline bool has_svnapot(void) +{ + return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT); +} + +static inline unsigned long pte_napot(pte_t pte) +{ + return pte_val(pte) & _PAGE_NAPOT; +} + +static inline pte_t pte_mknapot(pte_t pte, unsigned int order) +{ + int pos = order - 1 + _PAGE_PFN_SHIFT; + unsigned long napot_bit = BIT(pos); + unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT); + + return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); +} + +#else + +static __always_inline bool has_svnapot(void) { return false; } + +static inline unsigned long pte_napot(pte_t pte) +{ + return 0; +} + +#endif /* CONFIG_RISCV_ISA_SVNAPOT */ + /* Yields the page frame number (PFN) of a page table entry */ static inline unsigned long pte_pfn(pte_t pte) { - return __page_val_to_pfn(pte_val(pte)); + unsigned long res = __page_val_to_pfn(pte_val(pte)); + + if (has_svnapot() && pte_napot(pte)) + res = res & (res - 1UL); + + return res; } #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -302,13 +343,32 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); } -#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) +#define pte_pgprot pte_pgprot +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} static inline int pte_present(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) +{ + if (pte_val(a) & _PAGE_PRESENT) + return true; + + if ((pte_val(a) & _PAGE_PROT_NONE) && + atomic_read(&mm->tlb_flush_pending)) + return true; + + return false; +} + static inline int pte_none(pte_t pte) { return (pte_val(pte) == 0); @@ -349,6 +409,13 @@ static inline int pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t pte) +{ + return pte_val(pte) & _PAGE_DEVMAP; +} +#endif + /* static inline pte_t pte_rdprotect(pte_t pte) */ static inline pte_t pte_wrprotect(pte_t pte) @@ -358,7 +425,7 @@ static inline pte_t pte_wrprotect(pte_t pte) /* static inline pte_t pte_mkread(pte_t pte) */ -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { return __pte(pte_val(pte) | _PAGE_WRITE); } @@ -390,11 +457,22 @@ static inline pte_t pte_mkspecial(pte_t pte) return __pte(pte_val(pte) | _PAGE_SPECIAL); } +static inline pte_t pte_mkdevmap(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DEVMAP); +} + static inline pte_t pte_mkhuge(pte_t pte) { return pte; } +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define pte_leaf_size(pte) (pte_napot(pte) ? \ + napot_cont_size(napot_cont_order(pte)) :\ + PAGE_SIZE) +#endif + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/asm-generic/pgtable.h @@ -425,9 +503,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* Commit new configuration to MMU hardware */ -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) + : : : : svvptc); + /* * The kernel assumes that TLBs don't cache invalid entries, but * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a @@ -435,8 +517,21 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, * Relying on flush_tlb_fix_spurious_fault would suffice, but * the extra traps reduce performance. So, eagerly SFENCE.VMA. */ - local_flush_tlb_page(address); + while (nr--) + local_flush_tlb_page(address + nr * PAGE_SIZE); + +svvptc:; + /* + * Svvptc guarantees that the new valid pte will be visible within + * a bounded timeframe, so when the uarch does not cache invalid + * entries, we don't have to do anything. + */ } +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -459,46 +554,48 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) */ static inline void set_pte(pte_t *ptep, pte_t pteval) { - *ptep = pteval; + WRITE_ONCE(*ptep, pteval); } -void flush_icache_pte(pte_t pte); +void flush_icache_pte(struct mm_struct *mm, pte_t pte); -static inline void __set_pte_at(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, pte_t pteval) +static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) { if (pte_present(pteval) && pte_exec(pteval)) - flush_icache_pte(pteval); + flush_icache_pte(mm, pteval); set_pte(ptep, pteval); } -static inline void set_pte_at(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, pte_t pteval) +#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT + +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, unsigned int nr) { - page_table_check_pte_set(mm, addr, ptep, pteval); - __set_pte_at(mm, addr, ptep, pteval); + page_table_check_ptes_set(mm, ptep, pteval, nr); + + for (;;) { + __set_pte_at(mm, ptep, pteval); + if (--nr == 0) + break; + ptep++; + pte_val(pteval) += 1 << _PAGE_PFN_SHIFT; + } } +#define set_ptes set_ptes static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - __set_pte_at(mm, addr, ptep, __pte(0)); + __set_pte_at(mm, ptep, __pte(0)); } -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -static inline int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty) -{ - if (!pte_same(*ptep, entry)) - set_pte_at(vma->vm_mm, address, ptep, entry); - /* - * update_mmu_cache will unconditionally execute, handling both - * the case that the PTE changed and the spurious fault case. - */ - return true; -} +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ +extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep); #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, @@ -506,21 +603,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, { pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); - page_table_check_pte_clear(mm, address, pte); + page_table_check_pte_clear(mm, pte); return pte; } -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); -} - #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) @@ -550,6 +637,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, return ptep_test_and_clear_young(vma, address, ptep); } +#define pgprot_nx pgprot_nx +static inline pgprot_t pgprot_nx(pgprot_t _prot) +{ + return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC); +} + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t _prot) { @@ -573,6 +666,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) } /* + * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By + * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in + * DT. + */ +#define arch_has_hw_pte_young arch_has_hw_pte_young +static inline bool arch_has_hw_pte_young(void) +{ + return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); +} + +/* * THP functions */ static inline pmd_t pte_pmd(pte_t pte) @@ -580,6 +684,11 @@ static inline pmd_t pte_pmd(pte_t pte) return __pmd(pte_val(pte)); } +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} + static inline pmd_t pmd_mkhuge(pmd_t pmd) { return pmd; @@ -599,11 +708,24 @@ static inline unsigned long pmd_pfn(pmd_t pmd) #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); } +#define pmd_pgprot pmd_pgprot +static inline pgprot_t pmd_pgprot(pmd_t pmd) +{ + return pte_pgprot(pmd_pte(pmd)); +} + +#define pud_pgprot pud_pgprot +static inline pgprot_t pud_pgprot(pud_t pud) +{ + return pte_pgprot(pud_pte(pud)); +} + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); @@ -615,11 +737,19 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return pte_write(pud_pte(pud)); +} + +#define pmd_dirty pmd_dirty static inline int pmd_dirty(pmd_t pmd) { return pte_dirty(pmd_pte(pmd)); } +#define pmd_young pmd_young static inline int pmd_young(pmd_t pmd) { return pte_young(pmd_pte(pmd)); @@ -640,9 +770,9 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pte_pmd(pte_mkyoung(pmd_pte(pmd))); } -static inline pmd_t pmd_mkwrite(pmd_t pmd) +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) { - return pte_pmd(pte_mkwrite(pmd_pte(pmd))); + return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); } static inline pmd_t pmd_wrprotect(pmd_t pmd) @@ -660,18 +790,47 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pte_pmd(pte_mkdirty(pmd_pte(pmd))); } +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return pte_pmd(pte_mkdevmap(pmd_pte(pmd))); +} + +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP +static inline bool pmd_special(pmd_t pmd) +{ + return pte_special(pmd_pte(pmd)); +} + +static inline pmd_t pmd_mkspecial(pmd_t pmd) +{ + return pte_pmd(pte_mkspecial(pmd_pte(pmd))); +} +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP +static inline bool pud_special(pud_t pud) +{ + return pte_special(pud_pte(pud)); +} + +static inline pud_t pud_mkspecial(pud_t pud) +{ + return pte_pud(pte_mkspecial(pud_pte(pud))); +} +#endif + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - page_table_check_pmd_set(mm, addr, pmdp, pmd); - return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); + page_table_check_pmd_set(mm, pmdp, pmd); + return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { - page_table_check_pud_set(mm, addr, pudp, pud); - return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); + page_table_check_pud_set(mm, pudp, pud); + return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); } #ifdef CONFIG_PAGE_TABLE_CHECK @@ -718,7 +877,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, { pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); - page_table_check_pmd_clear(mm, address, pmd); + page_table_check_pmd_clear(mm, pmd); return pmd; } @@ -734,22 +893,125 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, static inline pmd_t pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { - page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); + page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); } + +#define pmdp_collapse_flush pmdp_collapse_flush +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +static inline pud_t pud_wrprotect(pud_t pud) +{ + return pte_pud(pte_wrprotect(pud_pte(pud))); +} + +static inline int pud_trans_huge(pud_t pud) +{ + return pud_leaf(pud); +} + +static inline int pud_dirty(pud_t pud) +{ + return pte_dirty(pud_pte(pud)); +} + +static inline pud_t pud_mkyoung(pud_t pud) +{ + return pte_pud(pte_mkyoung(pud_pte(pud))); +} + +static inline pud_t pud_mkold(pud_t pud) +{ + return pte_pud(pte_mkold(pud_pte(pud))); +} + +static inline pud_t pud_mkdirty(pud_t pud) +{ + return pte_pud(pte_mkdirty(pud_pte(pud))); +} + +static inline pud_t pud_mkclean(pud_t pud) +{ + return pte_pud(pte_mkclean(pud_pte(pud))); +} + +static inline pud_t pud_mkwrite(pud_t pud) +{ + return pte_pud(pte_mkwrite_novma(pud_pte(pud))); +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + return pud; +} + +static inline pud_t pud_mkdevmap(pud_t pud) +{ + return pte_pud(pte_mkdevmap(pud_pte(pud))); +} + +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty); +} + +static inline int pudp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp) +{ + return ptep_test_and_clear_young(vma, address, (pte_t *)pudp); +} + +static inline int pud_young(pud_t pud) +{ + return pte_young(pud_pte(pud)); +} + +static inline void update_mmu_cache_pud(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp) +{ + pte_t *ptep = (pte_t *)pudp; + + update_mmu_cache(vma, address, ptep); +} + +static inline pud_t pudp_establish(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, pud_t pud) +{ + page_table_check_pud_set(vma->vm_mm, pudp, pud); + return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud))); +} + +static inline pud_t pud_mkinvalid(pud_t pud) +{ + return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE)); +} + +extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp); + +static inline pud_t pud_modify(pud_t pud, pgprot_t newprot) +{ + return pte_pud(pte_modify(pud_pte(pud), newprot)); +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * Encode and decode a swap entry + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) * bit 1 to 3: _PAGE_LEAF (zero) * bit 5: _PAGE_PROT_NONE (zero) - * bits 6 to 10: swap type - * bits 10 to XLEN-1: swap offset + * bit 6: exclusive marker + * bits 7 to 11: swap type + * bits 12 to XLEN-1: swap offset */ -#define __SWP_TYPE_SHIFT 6 +#define __SWP_TYPE_SHIFT 7 #define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -760,11 +1022,27 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) #define __swp_entry(type, offset) ((swp_entry_t) \ - { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ + ((offset) << __SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); +} + #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) #define __swp_entry_to_pmd(swp) __pmd((swp).val) @@ -784,22 +1062,24 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Note that PGDIR_SIZE must evenly divide TASK_SIZE. * Task size is: - * - 0x9fc00000 (~2.5GB) for RV32. - * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu - * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * - 0x9fc00000 (~2.5GB) for RV32. + * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu + * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu * * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V * Instruction Set Manual Volume II: Privileged Architecture" states that * "load and store effective addresses, which are 64bits, must have bits * 63–48 all equal to bit 47, or else a page-fault exception will occur." + * Similarly for SV57, bits 63–57 must be equal to bit 56. */ #ifdef CONFIG_64BIT #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) -#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) +#define TASK_SIZE_MAX LONG_MAX #ifdef CONFIG_COMPAT #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ +#define TASK_SIZE (is_compat_task() ? \ TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 @@ -807,7 +1087,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #else #define TASK_SIZE FIXADDR_START -#define TASK_SIZE_MIN TASK_SIZE #endif #else /* CONFIG_MMU */ @@ -815,14 +1094,12 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #define PAGE_SHARED __pgprot(0) #define PAGE_KERNEL __pgprot(0) #define swapper_pg_dir NULL -#define TASK_SIZE 0xffffffffUL -#define VMALLOC_START 0 +#define TASK_SIZE _AC(-1, UL) +#define VMALLOC_START _AC(0, UL) #define VMALLOC_END TASK_SIZE #endif /* !CONFIG_MMU */ -#define kern_addr_valid(addr) (1) /* FIXME */ - extern char _start[]; extern void *_dtb_early_va; extern uintptr_t _dtb_early_pa; @@ -834,7 +1111,6 @@ extern uintptr_t _dtb_early_pa; #define dtb_early_pa _dtb_early_pa #endif /* CONFIG_XIP_KERNEL */ extern u64 satp_mode; -extern bool pgtable_l4_enabled; void paging_init(void); void misc_mem_init(void); @@ -846,6 +1122,25 @@ void misc_mem_init(void); extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) +/* + * Use set_p*_safe(), and elide TLB flushing, when confident that *no* + * TLB flush will be required as a result of the "set". For example, use + * in scenarios where it is known ahead of time that the routine is + * setting non-present entries, or re-setting an existing entry to the + * same value. Otherwise, use the typical "set" helpers and flush the + * TLB. + */ +#define set_p4d_safe(p4dp, p4d) \ +({ \ + WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ + set_p4d(p4dp, p4d); \ +}) + +#define set_pgd_safe(pgdp, pgd) \ +({ \ + WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ + set_pgd(pgdp, pgd); \ +}) #endif /* !__ASSEMBLY__ */ #endif /* _ASM_RISCV_PGTABLE_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 21c8072dce17..24d3af4d3807 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -7,30 +7,100 @@ #define _ASM_RISCV_PROCESSOR_H #include <linux/const.h> +#include <linux/cache.h> +#include <linux/prctl.h> #include <vdso/processor.h> #include <asm/ptrace.h> +#include <asm/insn-def.h> +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> -/* - * This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) +#define arch_get_mmap_end(addr, len, flags) \ +({ \ + STACK_TOP_MAX; \ +}) + +#define arch_get_mmap_base(addr, base) \ +({ \ + base; \ +}) -#define STACK_TOP TASK_SIZE #ifdef CONFIG_64BIT +#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) #define STACK_TOP_MAX TASK_SIZE_64 #else +#define DEFAULT_MAP_WINDOW TASK_SIZE #define STACK_TOP_MAX TASK_SIZE #endif #define STACK_ALIGN 16 +#define STACK_TOP DEFAULT_MAP_WINDOW + +#ifdef CONFIG_MMU +#define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0) +#else +#define user_max_virt_addr() 0 +#endif /* CONFIG_MMU */ + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#ifdef CONFIG_64BIT +#define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3) +#else +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) +#endif + #ifndef __ASSEMBLY__ struct task_struct; struct pt_regs; +/* + * We use a flag to track in-kernel Vector context. Currently the flag has the + * following meaning: + * + * - bit 0: indicates whether the in-kernel Vector context is active. The + * activation of this state disables the preemption. On a non-RT kernel, it + * also disable bh. + * - bits 8: is used for tracking preemptible kernel-mode Vector, when + * RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not + * disable the preemption if the thread's kernel_vstate.datap is allocated. + * Instead, the kernel set this bit field. Then the trap entry/exit code + * knows if we are entering/exiting the context that owns preempt_v. + * - 0: the task is not using preempt_v + * - 1: the task is actively using preempt_v. But whether does the task own + * the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK. + * - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine + * when preempt_v starts: + * - 0: the task is actively using, and own preempt_v context. + * - non-zero: the task was using preempt_v, but then took a trap within. + * Thus, the task does not own preempt_v. Any use of Vector will have to + * save preempt_v, if dirty, and fallback to non-preemptible kernel-mode + * Vector. + * - bit 29: The thread voluntarily calls schedule() while holding an active + * preempt_v. All preempt_v context should be dropped in such case because + * V-regs are caller-saved. Only sstatus.VS=ON is persisted across a + * schedule() call. + * - bit 30: The in-kernel preempt_v context is saved, and requries to be + * restored when returning to the context that owns the preempt_v. + * - bit 31: The in-kernel preempt_v context is dirty, as signaled by the + * trap entry code. Any context switches out-of current task need to save + * it to the task's in-kernel V context. Also, any traps nesting on-top-of + * preempt_v requesting to use V needs a save. + */ +#define RISCV_V_CTX_DEPTH_MASK 0x00ff0000 + +#define RISCV_V_CTX_UNIT_DEPTH 0x00010000 +#define RISCV_KERNEL_MODE_V 0x00000001 +#define RISCV_PREEMPT_V 0x00000100 +#define RISCV_PREEMPT_V_DIRTY 0x80000000 +#define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000 +#define RISCV_PREEMPT_V_IN_SCHEDULE 0x20000000 + /* CPU-specific state of a task */ struct thread_struct { /* Callee-saved registers */ @@ -39,6 +109,19 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long envcfg; + unsigned long sum; + u32 riscv_v_flags; + u32 vstate_ctrl; + struct __riscv_v_ext_state vstate; + unsigned long align_ctl; + struct __riscv_v_ext_state kernel_vstate; +#ifdef CONFIG_SMP + /* Flush the icache on migration */ + bool force_icache_flush; + /* A forced icache flush is not needed if migrating to the previous cpu. */ + unsigned int prev_cpu; +#endif }; /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -51,6 +134,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, #define INIT_THREAD { \ .sp = sizeof(init_stack) + (long)&init_stack, \ + .align_ctl = PR_UNALIGN_NOPRINT, \ } #define task_pt_regs(tsk) \ @@ -60,16 +144,32 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) +#define PREFETCH_ASM(x) \ + ALTERNATIVE(__nops(1), PREFETCH_R(x, 0), 0, \ + RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) -/* Do necessary setup to start up a newly executed thread. */ -extern void start_thread(struct pt_regs *regs, - unsigned long pc, unsigned long sp); +#define PREFETCHW_ASM(x) \ + ALTERNATIVE(__nops(1), PREFETCH_W(x, 0), 0, \ + RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) +#ifdef CONFIG_RISCV_ISA_ZICBOP +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *x) { + __asm__ __volatile__(PREFETCH_ASM(%0) : : "r" (x) : "memory"); } +#define ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *x) +{ + __asm__ __volatile__(PREFETCHW_ASM(%0) : : "r" (x) : "memory"); +} +#endif /* CONFIG_RISCV_ISA_ZICBOP */ + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, + unsigned long pc, unsigned long sp); + extern unsigned long __get_wchan(struct task_struct *p); @@ -78,13 +178,43 @@ static inline void wait_for_interrupt(void) __asm__ __volatile__ ("wfi"); } +extern phys_addr_t dma32_phys_limit; + struct device_node; -int riscv_of_processor_hartid(struct device_node *node); -int riscv_of_parent_hartid(struct device_node *node); +int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid); +int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid); +int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid); extern void riscv_fill_hwcap(void); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +extern unsigned long signal_minsigstksz __ro_after_init; + +#ifdef CONFIG_RISCV_ISA_V +/* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */ +#define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg) +#define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current() +extern long riscv_v_vstate_ctrl_set_current(unsigned long arg); +extern long riscv_v_vstate_ctrl_get_current(void); +#endif /* CONFIG_RISCV_ISA_V */ + +extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + +#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2) +extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread); + +#ifdef CONFIG_RISCV_ISA_SUPM +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); +long get_tagged_addr_ctrl(struct task_struct *task); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h deleted file mode 100644 index 3c9ea6dd5af7..000000000000 --- a/arch/riscv/include/asm/ptdump.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2019 SiFive - */ - -#ifndef _ASM_RISCV_PTDUMP_H -#define _ASM_RISCV_PTDUMP_H - -void ptdump_check_wx(void); - -#ifdef CONFIG_DEBUG_WX -static inline void debug_checkwx(void) -{ - ptdump_check_wx(); -} -#else -static inline void debug_checkwx(void) -{ -} -#endif - -#endif /* _ASM_RISCV_PTDUMP_H */ diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index 6ecd461129d2..a7dc0e330757 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -23,14 +23,16 @@ struct pt_regs { unsigned long t2; unsigned long s0; unsigned long s1; - unsigned long a0; - unsigned long a1; - unsigned long a2; - unsigned long a3; - unsigned long a4; - unsigned long a5; - unsigned long a6; - unsigned long a7; + struct_group(a_regs, + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + ); unsigned long s2; unsigned long s3; unsigned long s4; @@ -53,6 +55,9 @@ struct pt_regs { unsigned long orig_a0; }; +#define PTRACE_SYSEMU 0x1f +#define PTRACE_SYSEMU_SINGLESTEP 0x20 + #ifdef CONFIG_64BIT #define REG_FMT "%016lx" #else @@ -121,8 +126,6 @@ extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer); -int do_syscall_trace_enter(struct pt_regs *regs); -void do_syscall_trace_exit(struct pt_regs *regs); /** * regs_get_register() - get register value from its offset @@ -172,6 +175,11 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, return 0; } +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) +{ + return !(regs->status & SR_PIE); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/runtime-const.h b/arch/riscv/include/asm/runtime-const.h new file mode 100644 index 000000000000..451fd76b8811 --- /dev/null +++ b/arch/riscv/include/asm/runtime-const.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_RUNTIME_CONST_H +#define _ASM_RISCV_RUNTIME_CONST_H + +#include <asm/asm.h> +#include <asm/alternative.h> +#include <asm/cacheflush.h> +#include <asm/insn-def.h> +#include <linux/memory.h> +#include <asm/text-patching.h> + +#include <linux/uaccess.h> + +#ifdef CONFIG_32BIT +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret; \ + asm_inline(".option push\n\t" \ + ".option norvc\n\t" \ + "1:\t" \ + "lui %[__ret],0x89abd\n\t" \ + "addi %[__ret],%[__ret],-0x211\n\t" \ + ".option pop\n\t" \ + ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \ + ".long 1b - .\n\t" \ + ".popsection" \ + : [__ret] "=r" (__ret)); \ + __ret; \ +}) +#else +/* + * Loading 64-bit constants into a register from immediates is a non-trivial + * task on riscv64. To get it somewhat performant, load 32 bits into two + * different registers and then combine the results. + * + * If the processor supports the Zbkb extension, we can combine the final + * "slli,slli,srli,add" into the single "pack" instruction. If the processor + * doesn't support Zbkb but does support the Zbb extension, we can + * combine the final "slli,srli,add" into one instruction "add.uw". + */ +#define RISCV_RUNTIME_CONST_64_PREAMBLE \ + ".option push\n\t" \ + ".option norvc\n\t" \ + "1:\t" \ + "lui %[__ret],0x89abd\n\t" \ + "lui %[__tmp],0x1234\n\t" \ + "addiw %[__ret],%[__ret],-0x211\n\t" \ + "addiw %[__tmp],%[__tmp],0x567\n\t" \ + +#define RISCV_RUNTIME_CONST_64_BASE \ + "slli %[__tmp],%[__tmp],32\n\t" \ + "slli %[__ret],%[__ret],32\n\t" \ + "srli %[__ret],%[__ret],32\n\t" \ + "add %[__ret],%[__ret],%[__tmp]\n\t" \ + +#define RISCV_RUNTIME_CONST_64_ZBA \ + ".option push\n\t" \ + ".option arch,+zba\n\t" \ + ".option norvc\n\t" \ + "slli %[__tmp],%[__tmp],32\n\t" \ + "add.uw %[__ret],%[__ret],%[__tmp]\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".option pop\n\t" \ + +#define RISCV_RUNTIME_CONST_64_ZBKB \ + ".option push\n\t" \ + ".option arch,+zbkb\n\t" \ + ".option norvc\n\t" \ + "pack %[__ret],%[__ret],%[__tmp]\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".option pop\n\t" \ + +#define RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \ + ".option pop\n\t" \ + ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \ + ".long 1b - .\n\t" \ + ".popsection" \ + +#if defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA) \ + && defined(CONFIG_RISCV_ISA_ZBKB) +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret, __tmp; \ + asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \ + ALTERNATIVE_2( \ + RISCV_RUNTIME_CONST_64_BASE, \ + RISCV_RUNTIME_CONST_64_ZBA, \ + 0, RISCV_ISA_EXT_ZBA, 1, \ + RISCV_RUNTIME_CONST_64_ZBKB, \ + 0, RISCV_ISA_EXT_ZBKB, 1 \ + ) \ + RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \ + : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \ + __ret; \ +}) +#elif defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA) +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret, __tmp; \ + asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \ + ALTERNATIVE( \ + RISCV_RUNTIME_CONST_64_BASE, \ + RISCV_RUNTIME_CONST_64_ZBA, \ + 0, RISCV_ISA_EXT_ZBA, 1 \ + ) \ + RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \ + : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \ + __ret; \ +}) +#elif defined(CONFIG_RISCV_ISA_ZBKB) +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret, __tmp; \ + asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \ + ALTERNATIVE( \ + RISCV_RUNTIME_CONST_64_BASE, \ + RISCV_RUNTIME_CONST_64_ZBKB, \ + 0, RISCV_ISA_EXT_ZBKB, 1 \ + ) \ + RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \ + : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \ + __ret; \ +}) +#else +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret, __tmp; \ + asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \ + RISCV_RUNTIME_CONST_64_BASE \ + RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \ + : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \ + __ret; \ +}) +#endif +#endif + +#define runtime_const_shift_right_32(val, sym) \ +({ \ + u32 __ret; \ + asm_inline(".option push\n\t" \ + ".option norvc\n\t" \ + "1:\t" \ + SRLI " %[__ret],%[__val],12\n\t" \ + ".option pop\n\t" \ + ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \ + ".long 1b - .\n\t" \ + ".popsection" \ + : [__ret] "=r" (__ret) \ + : [__val] "r" (val)); \ + __ret; \ +}) + +#define runtime_const_init(type, sym) do { \ + extern s32 __start_runtime_##type##_##sym[]; \ + extern s32 __stop_runtime_##type##_##sym[]; \ + \ + runtime_const_fixup(__runtime_fixup_##type, \ + (unsigned long)(sym), \ + __start_runtime_##type##_##sym, \ + __stop_runtime_##type##_##sym); \ +} while (0) + +static inline void __runtime_fixup_caches(void *where, unsigned int insns) +{ + /* On riscv there are currently only cache-wide flushes so va is ignored. */ + __always_unused uintptr_t va = (uintptr_t)where; + + flush_icache_range(va, va + 4 * insns); +} + +/* + * The 32-bit immediate is stored in a lui+addi pairing. + * lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction. + * addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction. + */ +static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, unsigned int val) +{ + unsigned int lower_immediate, upper_immediate; + u32 lui_insn, addi_insn, addi_insn_mask; + __le32 lui_res, addi_res; + + /* Mask out upper 12 bit of addi */ + addi_insn_mask = 0x000fffff; + + lui_insn = (u32)le16_to_cpu(lui_parcel[0]) | (u32)le16_to_cpu(lui_parcel[1]) << 16; + addi_insn = (u32)le16_to_cpu(addi_parcel[0]) | (u32)le16_to_cpu(addi_parcel[1]) << 16; + + lower_immediate = sign_extend32(val, 11); + upper_immediate = (val - lower_immediate); + + if (upper_immediate & 0xfffff000) { + /* replace upper 20 bits of lui with upper immediate */ + lui_insn &= 0x00000fff; + lui_insn |= upper_immediate & 0xfffff000; + } else { + /* replace lui with nop if immediate is small enough to fit in addi */ + lui_insn = RISCV_INSN_NOP4; + /* + * lui is being skipped, so do a load instead of an add. A load + * is performed by adding with the x0 register. Setting rs to + * zero with the following mask will accomplish this goal. + */ + addi_insn_mask &= 0x07fff; + } + + if (lower_immediate & 0x00000fff) { + /* replace upper 12 bits of addi with lower 12 bits of val */ + addi_insn &= addi_insn_mask; + addi_insn |= (lower_immediate & 0x00000fff) << 20; + } else { + /* replace addi with nop if lower_immediate is empty */ + addi_insn = RISCV_INSN_NOP4; + } + + addi_res = cpu_to_le32(addi_insn); + lui_res = cpu_to_le32(lui_insn); + mutex_lock(&text_mutex); + patch_insn_write(addi_parcel, &addi_res, sizeof(addi_res)); + patch_insn_write(lui_parcel, &lui_res, sizeof(lui_res)); + mutex_unlock(&text_mutex); +} + +static inline void __runtime_fixup_ptr(void *where, unsigned long val) +{ +#ifdef CONFIG_32BIT + __runtime_fixup_32(where, where + 4, val); + __runtime_fixup_caches(where, 2); +#else + __runtime_fixup_32(where, where + 8, val); + __runtime_fixup_32(where + 4, where + 12, val >> 32); + __runtime_fixup_caches(where, 4); +#endif +} + +/* + * Replace the least significant 5 bits of the srli/srliw immediate that is + * located at bits 20-24 + */ +static inline void __runtime_fixup_shift(void *where, unsigned long val) +{ + __le16 *parcel = where; + __le32 res; + u32 insn; + + insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16; + + insn &= 0xfe0fffff; + insn |= (val & 0b11111) << 20; + + res = cpu_to_le32(insn); + mutex_lock(&text_mutex); + patch_text_nosync(where, &res, sizeof(insn)); + mutex_unlock(&text_mutex); +} + +static inline void runtime_const_fixup(void (*fn)(void *, unsigned long), + unsigned long val, s32 *start, s32 *end) +{ + while (start < end) { + fn(*start + (void *)start, val); + start++; + } +} + +#endif /* _ASM_RISCV_RUNTIME_CONST_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 9e3c2cf1edaf..341e74238aa0 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/cpumask.h> +#include <linux/jump_label.h> #ifdef CONFIG_RISCV_SBI enum sbi_ext_id { @@ -29,7 +30,12 @@ enum sbi_ext_id { SBI_EXT_RFENCE = 0x52464E43, SBI_EXT_HSM = 0x48534D, SBI_EXT_SRST = 0x53525354, + SBI_EXT_SUSP = 0x53555350, SBI_EXT_PMU = 0x504D55, + SBI_EXT_DBCN = 0x4442434E, + SBI_EXT_STA = 0x535441, + SBI_EXT_NACL = 0x4E41434C, + SBI_EXT_FWFT = 0x46574654, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -113,6 +119,14 @@ enum sbi_srst_reset_reason { SBI_SRST_RESET_REASON_SYS_FAILURE, }; +enum sbi_ext_susp_fid { + SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, +}; + +enum sbi_ext_susp_sleep_type { + SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, +}; + enum sbi_ext_pmu_fid { SBI_EXT_PMU_NUM_COUNTERS = 0, SBI_EXT_PMU_COUNTER_GET_INFO, @@ -120,10 +134,35 @@ enum sbi_ext_pmu_fid { SBI_EXT_PMU_COUNTER_START, SBI_EXT_PMU_COUNTER_STOP, SBI_EXT_PMU_COUNTER_FW_READ, + SBI_EXT_PMU_COUNTER_FW_READ_HI, + SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, }; -#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(55, 0) +union sbi_pmu_ctr_info { + unsigned long value; + struct { + unsigned long csr:12; + unsigned long width:6; +#if __riscv_xlen == 32 + unsigned long reserved:13; +#else + unsigned long reserved:45; +#endif + unsigned long type:1; + }; +}; + +/* Data structure to contain the pmu snapshot data */ +struct riscv_pmu_snapshot_data { + u64 ctr_overflow_mask; + u64 ctr_values[64]; + u64 reserved[447]; +}; + +#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) +#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0) #define RISCV_PMU_RAW_EVENT_IDX 0x20000 +#define RISCV_PLAT_FW_EVENT 0xFFFF /** General pmu event codes specified in SBI PMU extension */ enum sbi_pmu_hw_generic_events_t { @@ -155,9 +194,9 @@ enum sbi_pmu_fw_generic_events_t { SBI_PMU_FW_ILLEGAL_INSN = 4, SBI_PMU_FW_SET_TIMER = 5, SBI_PMU_FW_IPI_SENT = 6, - SBI_PMU_FW_IPI_RECVD = 7, + SBI_PMU_FW_IPI_RCVD = 7, SBI_PMU_FW_FENCE_I_SENT = 8, - SBI_PMU_FW_FENCE_I_RECVD = 9, + SBI_PMU_FW_FENCE_I_RCVD = 9, SBI_PMU_FW_SFENCE_VMA_SENT = 10, SBI_PMU_FW_SFENCE_VMA_RCVD = 11, SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12, @@ -189,22 +228,209 @@ enum sbi_pmu_ctr_type { SBI_PMU_CTR_TYPE_FW, }; +/* Helper macros to decode event idx */ +#define SBI_PMU_EVENT_IDX_OFFSET 20 +#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF +#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF +#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000 +#define SBI_PMU_EVENT_RAW_IDX 0x20000 +#define SBI_PMU_FIXED_CTR_MASK 0x07 + +#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8 +#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06 +#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01 + +#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3 +#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1 + +#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF + /* Flags defined for config matching function */ -#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0) -#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1) -#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2) -#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3) -#define SBI_PMU_CFG_FLAG_SET_VSNH (1 << 4) -#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5) -#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6) -#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7) +#define SBI_PMU_CFG_FLAG_SKIP_MATCH BIT(0) +#define SBI_PMU_CFG_FLAG_CLEAR_VALUE BIT(1) +#define SBI_PMU_CFG_FLAG_AUTO_START BIT(2) +#define SBI_PMU_CFG_FLAG_SET_VUINH BIT(3) +#define SBI_PMU_CFG_FLAG_SET_VSINH BIT(4) +#define SBI_PMU_CFG_FLAG_SET_UINH BIT(5) +#define SBI_PMU_CFG_FLAG_SET_SINH BIT(6) +#define SBI_PMU_CFG_FLAG_SET_MINH BIT(7) /* Flags defined for counter start function */ -#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0) +#define SBI_PMU_START_FLAG_SET_INIT_VALUE BIT(0) +#define SBI_PMU_START_FLAG_INIT_SNAPSHOT BIT(1) /* Flags defined for counter stop function */ -#define SBI_PMU_STOP_FLAG_RESET (1 << 0) +#define SBI_PMU_STOP_FLAG_RESET BIT(0) +#define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT BIT(1) + +enum sbi_ext_dbcn_fid { + SBI_EXT_DBCN_CONSOLE_WRITE = 0, + SBI_EXT_DBCN_CONSOLE_READ = 1, + SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, +}; + +/* SBI STA (steal-time accounting) extension */ +enum sbi_ext_sta_fid { + SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0, +}; + +struct sbi_sta_struct { + __le32 sequence; + __le32 flags; + __le64 steal; + u8 preempted; + u8 pad[47]; +} __packed; + +#define SBI_SHMEM_DISABLE -1 + +enum sbi_ext_nacl_fid { + SBI_EXT_NACL_PROBE_FEATURE = 0x0, + SBI_EXT_NACL_SET_SHMEM = 0x1, + SBI_EXT_NACL_SYNC_CSR = 0x2, + SBI_EXT_NACL_SYNC_HFENCE = 0x3, + SBI_EXT_NACL_SYNC_SRET = 0x4, +}; + +enum sbi_ext_nacl_feature { + SBI_NACL_FEAT_SYNC_CSR = 0x0, + SBI_NACL_FEAT_SYNC_HFENCE = 0x1, + SBI_NACL_FEAT_SYNC_SRET = 0x2, + SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3, +}; +#define SBI_NACL_SHMEM_ADDR_SHIFT 12 +#define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000 +#define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000 +#define SBI_NACL_SHMEM_SRET_OFFSET 0x0000 +#define SBI_NACL_SHMEM_SRET_SIZE 0x0200 +#define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \ + SBI_NACL_SHMEM_SRET_SIZE) +#define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080 +#define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \ + SBI_NACL_SHMEM_AUTOSWAP_SIZE) +#define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580 +#define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \ + SBI_NACL_SHMEM_UNUSED_SIZE) +#define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780 +#define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \ + SBI_NACL_SHMEM_HFENCE_SIZE) +#define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080 +#define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \ + SBI_NACL_SHMEM_DBITMAP_SIZE) +#define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024) +#define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \ + SBI_NACL_SHMEM_CSR_SIZE) + +#define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \ + ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff)) + +#define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \ + (SBI_NACL_SHMEM_HFENCE_SIZE / \ + SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) +#define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \ + (SBI_NACL_SHMEM_HFENCE_OFFSET + \ + (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \ + SBI_NACL_SHMEM_HFENCE_ENTRY(__num) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\ + (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8)) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\ + (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \ + ((__riscv_xlen / 8) * 3)) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \ + (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \ + SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1) + +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7 + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12 + +#if __riscv_xlen == 32 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7 +#else +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14 +#endif +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1) + +#define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0) +#define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1) + +#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i)) +#define SBI_NACL_SHMEM_SRET_X_LAST 31 + +/* SBI function IDs for FW feature extension */ +#define SBI_EXT_FWFT_SET 0x0 +#define SBI_EXT_FWFT_GET 0x1 + +enum sbi_fwft_feature_t { + SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0, + SBI_FWFT_LANDING_PAD = 0x1, + SBI_FWFT_SHADOW_STACK = 0x2, + SBI_FWFT_DOUBLE_TRAP = 0x3, + SBI_FWFT_PTE_AD_HW_UPDATING = 0x4, + SBI_FWFT_POINTER_MASKING_PMLEN = 0x5, + SBI_FWFT_LOCAL_RESERVED_START = 0x6, + SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff, + SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000, + SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff, + + SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000, + SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff, + SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000, + SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff, +}; + +#define SBI_FWFT_PLATFORM_FEATURE_BIT BIT(30) +#define SBI_FWFT_GLOBAL_FEATURE_BIT BIT(31) + +#define SBI_FWFT_SET_FLAG_LOCK BIT(0) + +/* SBI spec version fields */ #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -220,6 +446,12 @@ enum sbi_pmu_ctr_type { #define SBI_ERR_ALREADY_AVAILABLE -6 #define SBI_ERR_ALREADY_STARTED -7 #define SBI_ERR_ALREADY_STOPPED -8 +#define SBI_ERR_NO_SHMEM -9 +#define SBI_ERR_INVALID_STATE -10 +#define SBI_ERR_BAD_RANGE -11 +#define SBI_ERR_TIMEOUT -12 +#define SBI_ERR_IO -13 +#define SBI_ERR_DENIED_LOCKED -14 extern unsigned long sbi_spec_version; struct sbiret { @@ -228,24 +460,28 @@ struct sbiret { }; void sbi_init(void); -struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, - unsigned long arg1, unsigned long arg2, - unsigned long arg3, unsigned long arg4, - unsigned long arg5); +long __sbi_base_ecall(int fid); +struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + int fid, int ext); +#define sbi_ecall(e, f, a0, a1, a2, a3, a4, a5) \ + __sbi_ecall(a0, a1, a2, a3, a4, a5, f, e) +#ifdef CONFIG_RISCV_SBI_V01 void sbi_console_putchar(int ch); int sbi_console_getchar(void); +#else +static inline void sbi_console_putchar(int ch) { } +static inline int sbi_console_getchar(void) { return -ENOENT; } +#endif long sbi_get_mvendorid(void); long sbi_get_marchid(void); long sbi_get_mimpid(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); -void sbi_clear_ipi(void); -int sbi_send_ipi(const struct cpumask *cpu_mask); +void sbi_send_ipi(unsigned int cpu); int sbi_remote_fence_i(const struct cpumask *cpu_mask); -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size); int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, @@ -265,7 +501,24 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid); -int sbi_probe_extension(int ext); +long sbi_probe_extension(int ext); + +int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags); +int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature, + unsigned long value, unsigned long flags); +/** + * sbi_fwft_set_online_cpus() - Set a feature on all online cpus + * @feature: The feature to be set + * @value: The feature value to be set + * @flags: FWFT feature set flags + * + * Return: 0 on success, appropriate linux error code otherwise. + */ +static inline int sbi_fwft_set_online_cpus(u32 feature, unsigned long value, + unsigned long flags) +{ + return sbi_fwft_set_cpumask(cpu_online_mask, feature, value, flags); +} /* Check if current SBI specification version is 0.1 or not */ static inline int sbi_spec_is_0_1(void) @@ -290,13 +543,61 @@ static inline unsigned long sbi_minor_version(void) static inline unsigned long sbi_mk_version(unsigned long major, unsigned long minor) { - return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << - SBI_SPEC_VERSION_MAJOR_SHIFT) | minor; + return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << SBI_SPEC_VERSION_MAJOR_SHIFT) + | (minor & SBI_SPEC_VERSION_MINOR_MASK); +} + +static inline int sbi_err_map_linux_errno(int err) +{ + switch (err) { + case SBI_SUCCESS: + return 0; + case SBI_ERR_DENIED: + case SBI_ERR_DENIED_LOCKED: + return -EPERM; + case SBI_ERR_INVALID_PARAM: + case SBI_ERR_INVALID_STATE: + return -EINVAL; + case SBI_ERR_BAD_RANGE: + return -ERANGE; + case SBI_ERR_INVALID_ADDRESS: + return -EFAULT; + case SBI_ERR_NO_SHMEM: + return -ENOMEM; + case SBI_ERR_TIMEOUT: + return -ETIMEDOUT; + case SBI_ERR_IO: + return -EIO; + case SBI_ERR_NOT_SUPPORTED: + case SBI_ERR_FAILURE: + default: + return -ENOTSUPP; + }; } -int sbi_err_map_linux_errno(int err); +extern bool sbi_debug_console_available; +int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); +int sbi_debug_console_read(char *bytes, unsigned int num_bytes); + #else /* CONFIG_RISCV_SBI */ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } static inline void sbi_init(void) {} #endif /* CONFIG_RISCV_SBI */ + +unsigned long riscv_get_mvendorid(void); +unsigned long riscv_get_marchid(void); +unsigned long riscv_cached_mvendorid(unsigned int cpu_id); +unsigned long riscv_cached_marchid(unsigned int cpu_id); +unsigned long riscv_cached_mimpid(unsigned int cpu_id); + +#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI) +DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence); +#define riscv_use_sbi_for_rfence() \ + static_branch_unlikely(&riscv_sbi_for_rfence) +void sbi_ipi_init(void); +#else +static inline bool riscv_use_sbi_for_rfence(void) { return false; } +static inline void sbi_ipi_init(void) { } +#endif + #endif /* _ASM_RISCV_SBI_H */ diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h new file mode 100644 index 000000000000..0e45db78b24b --- /dev/null +++ b/arch/riscv/include/asm/scs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SCS_H +#define _ASM_SCS_H + +#ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> + +#ifdef CONFIG_SHADOW_CALL_STACK + +/* Load init_shadow_call_stack to gp. */ +.macro scs_load_init_stack + la gp, init_shadow_call_stack + XIP_FIXUP_OFFSET gp +.endm + +/* Load the per-CPU IRQ shadow call stack to gp. */ +.macro scs_load_irq_stack tmp + load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp +.endm + +/* Load task_scs_sp(current) to gp. */ +.macro scs_load_current + REG_L gp, TASK_TI_SCS_SP(tp) +.endm + +/* Load task_scs_sp(current) to gp, but only if tp has changed. */ +.macro scs_load_current_if_task_changed prev + beq \prev, tp, _skip_scs + scs_load_current +_skip_scs: +.endm + +/* Save gp to task_scs_sp(current). */ +.macro scs_save_current + REG_S gp, TASK_TI_SCS_SP(tp) +.endm + +#else /* CONFIG_SHADOW_CALL_STACK */ + +.macro scs_load_init_stack +.endm +.macro scs_load_irq_stack tmp +.endm +.macro scs_load_current +.endm +.macro scs_load_current_if_task_changed prev +.endm +.macro scs_save_current +.endm + +#endif /* CONFIG_SHADOW_CALL_STACK */ +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCS_H */ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 32336e8a17cb..a393d5035c54 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -13,6 +13,7 @@ extern char _start_kernel[]; extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +extern char __exittext_begin[], __exittext_end[]; static inline bool is_va_kernel_text(uintptr_t va) { diff --git a/arch/riscv/include/asm/semihost.h b/arch/riscv/include/asm/semihost.h new file mode 100644 index 000000000000..557a34938193 --- /dev/null +++ b/arch/riscv/include/asm/semihost.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 tinylab.org + * Author: Bin Meng <bmeng@tinylab.org> + */ + +#ifndef _RISCV_SEMIHOST_H_ +#define _RISCV_SEMIHOST_H_ + +struct uart_port; + +static inline void smh_putc(struct uart_port *port, unsigned char c) +{ + asm volatile("addi a1, %0, 0\n" + "addi a0, zero, 3\n" + ".balign 16\n" + ".option push\n" + ".option norvc\n" + "slli zero, zero, 0x1f\n" + "ebreak\n" + "srai zero, zero, 0x7\n" + ".option pop\n" + : : "r" (&c) : "a0", "a1", "memory"); +} + +#endif /* _RISCV_SEMIHOST_H_ */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index a2c14d4b3993..ea263d3683ef 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -42,11 +42,12 @@ static inline int set_kernel_memory(char *startp, char *endp, int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) #ifdef CONFIG_64BIT #define SECTION_ALIGN (1 << 21) #else @@ -56,4 +57,7 @@ bool kernel_page_present(struct page *page); #define SECTION_ALIGN L1_CACHE_BYTES #endif /* CONFIG_STRICT_KERNEL_RWX */ +#define PECOFF_SECTION_ALIGNMENT 0x1000 +#define PECOFF_FILE_ALIGNMENT 0x200 + #endif /* _ASM_RISCV_SET_MEMORY_H */ diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h new file mode 100644 index 000000000000..adb50f3ec205 --- /dev/null +++ b/arch/riscv/include/asm/simd.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2023 SiFive + */ + +#ifndef __ASM_SIMD_H +#define __ASM_SIMD_H + +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/percpu.h> +#include <linux/preempt.h> +#include <linux/types.h> +#include <linux/thread_info.h> + +#include <asm/vector.h> + +#ifdef CONFIG_RISCV_ISA_V +/* + * may_use_simd - whether it is allowable at this time to issue vector + * instructions or access the vector register file + * + * Callers must not assume that the result remains true beyond the next + * preempt_enable() or return from softirq context. + */ +static __must_check inline bool may_use_simd(void) +{ + /* + * RISCV_KERNEL_MODE_V is only set while preemption is disabled, + * and is clear whenever preemption is enabled. + */ + if (in_hardirq() || in_nmi()) + return false; + + /* + * Nesting is achieved in preempt_v by spreading the control for + * preemptible and non-preemptible kernel-mode Vector into two fields. + * Always try to match with preempt_v if kernel V-context exists. Then, + * fallback to check non preempt_v if nesting happens, or if the config + * is not set. + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) && current->thread.kernel_vstate.datap) { + if (!riscv_preempt_v_started(current)) + return true; + } + /* + * Non-preemptible kernel-mode Vector temporarily disables bh. So we + * must not return true on irq_disabled(). Otherwise we would fail the + * lockdep check calling local_bh_enable() + */ + return !irqs_disabled() && !(riscv_v_flags() & RISCV_KERNEL_MODE_V); +} + +#else /* ! CONFIG_RISCV_ISA_V */ + +static __must_check inline bool may_use_simd(void) +{ + return false; +} + +#endif /* ! CONFIG_RISCV_ISA_V */ + +#endif diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 23170c933d73..7ac80e9f2288 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -15,12 +15,10 @@ struct seq_file; extern unsigned long boot_cpu_hartid; -struct riscv_ipi_ops { - void (*ipi_inject)(const struct cpumask *target); - void (*ipi_clear)(void); -}; - #ifdef CONFIG_SMP + +#include <linux/jump_label.h> + /* * Mapping between linux logical cpu index and hartid. */ @@ -33,22 +31,28 @@ void show_ipi_stats(struct seq_file *p, int prec); /* SMP initialization hook for setup_arch */ void __init setup_smp(void); -/* Called from C code, this handles an IPI. */ -void handle_IPI(struct pt_regs *regs); - /* Hook for the generic smp_call_function_many() routine. */ void arch_send_call_function_ipi_mask(struct cpumask *mask); /* Hook for the generic smp_call_function_single() routine. */ void arch_send_call_function_single_ipi(int cpu); -int riscv_hartid_to_cpuid(int hartid); +int riscv_hartid_to_cpuid(unsigned long hartid); + +/* Enable IPI for CPU hotplug */ +void riscv_ipi_enable(void); + +/* Disable IPI for CPU hotplug */ +void riscv_ipi_disable(void); + +/* Check if IPI interrupt numbers are available */ +bool riscv_ipi_have_virq_range(void); -/* Set custom IPI operations */ -void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); +/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */ +void riscv_ipi_set_virq_range(int virq, int nr); -/* Clear IPI for current CPU */ -void riscv_clear_ipi(void); +/* Check other CPUs stop or not */ +bool smp_crash_stop_failed(void); /* Secondary hart entry */ asmlinkage void smp_callin(void); @@ -61,7 +65,7 @@ asmlinkage void smp_callin(void); #if defined CONFIG_HOTPLUG_CPU int __cpu_disable(void); -void __cpu_die(unsigned int cpu); +static inline void __cpu_die(unsigned int cpu) { } #endif /* CONFIG_HOTPLUG_CPU */ #else @@ -70,7 +74,7 @@ static inline void show_ipi_stats(struct seq_file *p, int prec) { } -static inline int riscv_hartid_to_cpuid(int hartid) +static inline int riscv_hartid_to_cpuid(unsigned long hartid) { if (hartid == boot_cpu_hartid) return 0; @@ -82,11 +86,20 @@ static inline unsigned long cpuid_to_hartid_map(int cpu) return boot_cpu_hartid; } -static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) +static inline void riscv_ipi_enable(void) { } -static inline void riscv_clear_ipi(void) +static inline void riscv_ipi_disable(void) +{ +} + +static inline bool riscv_ipi_have_virq_range(void) +{ + return false; +} + +static inline void riscv_ipi_set_virq_range(int virq, int nr) { } diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h index 63acaecc3374..2f901a410586 100644 --- a/arch/riscv/include/asm/sparsemem.h +++ b/arch/riscv/include/asm/sparsemem.h @@ -7,7 +7,7 @@ #ifdef CONFIG_64BIT #define MAX_PHYSMEM_BITS 56 #else -#define MAX_PHYSMEM_BITS 34 +#define MAX_PHYSMEM_BITS 32 #endif /* CONFIG_64BIT */ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h new file mode 100644 index 000000000000..52f11bfd0079 --- /dev/null +++ b/arch/riscv/include/asm/spinlock.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_RISCV_SPINLOCK_H +#define __ASM_RISCV_SPINLOCK_H + +#ifdef CONFIG_QUEUED_SPINLOCKS +#define _Q_PENDING_LOOPS (1 << 9) +#endif + +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS + +#define __no_arch_spinlock_redefine +#include <asm/ticket_spinlock.h> +#include <asm/qspinlock.h> +#include <asm/jump_label.h> + +/* + * TODO: Use an alternative instead of a static key when we are able to parse + * the extensions string earlier in the boot process. + */ +DECLARE_STATIC_KEY_TRUE(qspinlock_key); + +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ +static __always_inline type arch_spin_##op(type_lock lock) \ +{ \ + if (static_branch_unlikely(&qspinlock_key)) \ + return queued_spin_##op(lock); \ + return ticket_spin_##op(lock); \ +} + +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) + +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) + +#include <asm/qspinlock.h> + +#else + +#include <asm/ticket_spinlock.h> + +#endif + +#include <asm/qrwlock.h> + +#endif /* __ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h index 09093af46565..43895b90fe3f 100644 --- a/arch/riscv/include/asm/stackprotector.h +++ b/arch/riscv/include/asm/stackprotector.h @@ -3,9 +3,6 @@ #ifndef _ASM_RISCV_STACKPROTECTOR_H #define _ASM_RISCV_STACKPROTECTOR_H -#include <linux/random.h> -#include <linux/version.h> - extern unsigned long __stack_chk_guard; /* @@ -16,12 +13,7 @@ extern unsigned long __stack_chk_guard; */ static __always_inline void boot_init_stack_canary(void) { - unsigned long canary; - - /* Try to get a semi random initial value. */ - get_random_bytes(&canary, sizeof(canary)); - canary ^= LINUX_VERSION_CODE; - canary &= CANARY_MASK; + unsigned long canary = get_random_canary(); current->stack_canary = canary; if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h index 3450c1912afd..b1495a7e06ce 100644 --- a/arch/riscv/include/asm/stacktrace.h +++ b/arch/riscv/include/asm/stacktrace.h @@ -16,4 +16,14 @@ extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *re extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task, const char *loglvl); +static inline bool on_thread_stack(void) +{ + return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); +} + + +#ifdef CONFIG_VMAP_STACK +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); +#endif /* CONFIG_VMAP_STACK */ + #endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index 909049366555..5ba77f60bf0b 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -18,6 +18,18 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t); #define __HAVE_ARCH_MEMMOVE extern asmlinkage void *memmove(void *, const void *, size_t); extern asmlinkage void *__memmove(void *, const void *, size_t); + +#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) +#define __HAVE_ARCH_STRCMP +extern asmlinkage int strcmp(const char *cs, const char *ct); + +#define __HAVE_ARCH_STRLEN +extern asmlinkage __kernel_size_t strlen(const char *); + +#define __HAVE_ARCH_STRNCMP +extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count); +#endif + /* For those files which don't want to check by kasan. */ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #define memcpy(dst, src, len) __memcpy(dst, src, len) diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 8be391c2aecb..dc5782b5fbad 100644 --- a/arch/riscv/include/asm/suspend.h +++ b/arch/riscv/include/asm/suspend.h @@ -13,14 +13,23 @@ struct suspend_context { /* Saved and restored by low-level functions */ struct pt_regs regs; /* Saved and restored by high-level functions */ - unsigned long scratch; + unsigned long envcfg; unsigned long tvec; unsigned long ie; #ifdef CONFIG_MMU unsigned long satp; + unsigned long stimecmp; +#if __riscv_xlen < 64 + unsigned long stimecmph; +#endif #endif }; +/* + * Used by hibernation core and cleared during resume sequence + */ +extern int in_suspend; + /* Low-level CPU suspend entry function */ int __cpu_suspend_enter(struct suspend_context *context); @@ -33,4 +42,24 @@ int cpu_suspend(unsigned long arg, /* Low-level CPU resume entry function */ int __cpu_resume_enter(unsigned long hartid, unsigned long context); +/* Used to save and restore the CSRs */ +void suspend_save_csrs(struct suspend_context *context); +void suspend_restore_csrs(struct suspend_context *context); + +/* Low-level API to support hibernation */ +int swsusp_arch_suspend(void); +int swsusp_arch_resume(void); +int arch_hibernation_header_save(void *addr, unsigned int max_size); +int arch_hibernation_header_restore(void *addr); +int __hibernate_cpu_resume(void); + +/* Used to resume on the CPU we hibernated on */ +int hibernate_resume_nonboot_cpu_disable(void); + +asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, + unsigned long cpu_resume); +asmlinkage int hibernate_core_restore_code(void); +bool riscv_sbi_hsm_is_supported(void); +bool riscv_sbi_suspend_state_is_valid(u32 state); +int riscv_sbi_hart_suspend(u32 state); #endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 0a3f4f95c555..0e71eb82f920 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -8,6 +8,9 @@ #include <linux/jump_label.h> #include <linux/sched/task_stack.h> +#include <linux/mm_types.h> +#include <asm/vector.h> +#include <asm/cpufeature.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/csr.h> @@ -45,38 +48,80 @@ static inline void fstate_restore(struct task_struct *task, } } -static inline void __switch_to_aux(struct task_struct *prev, +static inline void __switch_to_fpu(struct task_struct *prev, struct task_struct *next) { struct pt_regs *regs; regs = task_pt_regs(prev); - if (unlikely(regs->status & SR_SD)) - fstate_save(prev, regs); + fstate_save(prev, regs); fstate_restore(next, task_pt_regs(next)); } -extern struct static_key_false cpu_hwcap_fpu; static __always_inline bool has_fpu(void) { - return static_branch_likely(&cpu_hwcap_fpu); + return riscv_has_extension_likely(RISCV_ISA_EXT_f) || + riscv_has_extension_likely(RISCV_ISA_EXT_d); } #else static __always_inline bool has_fpu(void) { return false; } #define fstate_save(task, regs) do { } while (0) #define fstate_restore(task, regs) do { } while (0) -#define __switch_to_aux(__prev, __next) do { } while (0) +#define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void envcfg_update_bits(struct task_struct *task, + unsigned long mask, unsigned long val) +{ + unsigned long envcfg; + + envcfg = (task->thread.envcfg & ~mask) | val; + task->thread.envcfg = envcfg; + if (task == current) + csr_write(CSR_ENVCFG, envcfg); +} + +static inline void __switch_to_envcfg(struct task_struct *next) +{ + asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", + 0, RISCV_ISA_EXT_XLINUXENVCFG, 1) + :: "r" (next->thread.envcfg) : "memory"); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); +static inline bool switch_to_should_flush_icache(struct task_struct *task) +{ +#ifdef CONFIG_SMP + bool stale_mm = task->mm && task->mm->context.force_icache_flush; + bool stale_thread = task->thread.force_icache_flush; + bool thread_migrated = smp_processor_id() != task->thread.prev_cpu; + + return thread_migrated && (stale_mm || stale_thread); +#else + return false; +#endif +} + +#ifdef CONFIG_SMP +#define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id()) +#else +#define __set_prev_cpu(thread) +#endif + #define switch_to(prev, next, last) \ do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ + __set_prev_cpu(__prev->thread); \ if (has_fpu()) \ - __switch_to_aux(__prev, __next); \ + __switch_to_fpu(__prev, __next); \ + if (has_vector() || has_xtheadvector()) \ + __switch_to_vector(__prev, __next); \ + if (switch_to_should_flush_icache(__next)) \ + local_flush_icache_all(); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h new file mode 100644 index 000000000000..9153016da8f1 --- /dev/null +++ b/arch/riscv/include/asm/sync_core.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_SYNC_CORE_H +#define _ASM_RISCV_SYNC_CORE_H + +/* + * RISC-V implements return to user-space through an xRET instruction, + * which is not core serializing. + */ +static inline void sync_core_before_usermode(void) +{ + asm volatile ("fence.i" ::: "memory"); +} + +#ifdef CONFIG_SMP +/* + * Ensure the next switch_mm() on every CPU issues a core serializing + * instruction for the given @mm. + */ +static inline void prepare_sync_core_cmd(struct mm_struct *mm) +{ + cpumask_setall(&mm->context.icache_stale_mask); +} +#else +static inline void prepare_sync_core_cmd(struct mm_struct *mm) +{ +} +#endif /* CONFIG_SMP */ + +#endif /* _ASM_RISCV_SYNC_CORE_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 384a63b86420..34313387f977 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -10,6 +10,7 @@ #ifndef _ASM_RISCV_SYSCALL_H #define _ASM_RISCV_SYSCALL_H +#include <asm/hwprobe.h> #include <uapi/linux/audit.h> #include <linux/sched.h> #include <linux/err.h> @@ -29,6 +30,13 @@ static inline int syscall_get_nr(struct task_struct *task, return regs->a7; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + regs->a7 = nr; +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -61,8 +69,23 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { args[0] = regs->orig_a0; - args++; - memcpy(args, ®s->a1, 5 * sizeof(args[0])); + args[1] = regs->a1; + args[2] = regs->a2; + args[3] = regs->a3; + args[4] = regs->a4; + args[5] = regs->a5; +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->orig_a0 = args[0]; + regs->a1 = args[1]; + regs->a2 = args[2]; + regs->a3 = args[3]; + regs->a4 = args[4]; + regs->a5 = args[5]; } static inline int syscall_get_arch(struct task_struct *task) @@ -74,5 +97,28 @@ static inline int syscall_get_arch(struct task_struct *task) #endif } +typedef long (*syscall_t)(const struct pt_regs *); +static inline void syscall_handler(struct pt_regs *regs, ulong syscall) +{ + syscall_t fn; + +#ifdef CONFIG_COMPAT + if ((regs->status & SR_UXL) == SR_UXL_32) + fn = compat_sys_call_table[syscall]; + else +#endif + fn = sys_call_table[syscall]; + + regs->a0 = fn(regs); +} + +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) +{ + return false; +} + asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t); + +asmlinkage long sys_riscv_hwprobe(struct riscv_hwprobe *, size_t, size_t, + unsigned long *, unsigned int); #endif /* _ASM_RISCV_SYSCALL_H */ diff --git a/arch/riscv/include/asm/syscall_table.h b/arch/riscv/include/asm/syscall_table.h new file mode 100644 index 000000000000..0c2d61782813 --- /dev/null +++ b/arch/riscv/include/asm/syscall_table.h @@ -0,0 +1,7 @@ +#include <asm/bitsperlong.h> + +#if __BITS_PER_LONG == 64 +#include <asm/syscall_table_64.h> +#else +#include <asm/syscall_table_32.h> +#endif diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..ac80216549ff --- /dev/null +++ b/arch/riscv/include/asm/syscall_wrapper.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - riscv specific wrappers to syscall definitions + * + * Based on arch/arm64/include/syscall_wrapper.h + */ + +#ifndef __ASM_SYSCALL_WRAPPER_H +#define __ASM_SYSCALL_WRAPPER_H + +#include <asm/ptrace.h> + +asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *); + +#ifdef CONFIG_64BIT + +#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \ + static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) + +#define SC_RISCV_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->orig_a0,,regs->a1,,regs->a2 \ + ,,regs->a3,,regs->a4,,regs->a5,,regs->a6) + +#else +/* + * Use type aliasing to ensure registers a0-a6 are correctly passed to the syscall + * implementation when >word-size arguments are used. + */ +#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments"); \ + static long __se_##prefix##name(ulong, ulong, ulong, ulong, ulong, ulong, \ + ulong) \ + __attribute__((alias(__stringify(___se_##prefix##name)))); \ + __diag_pop(); \ + static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + __used; \ + static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) + +#define SC_RISCV_REGS_TO_ARGS(x, ...) \ + regs->orig_a0,regs->a1,regs->a2,regs->a3,regs->a4,regs->a5,regs->a6 + +#endif /* CONFIG_64BIT */ + +#ifdef CONFIG_COMPAT + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + __SYSCALL_SE_DEFINEx(x, compat_sys, name, __VA_ARGS__) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__riscv_compat_sys_##sname, ERRNO); \ + asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL_COMPAT(name) \ + asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs); \ + asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + __SYSCALL_SE_DEFINEx(x, sys, name, __VA_ARGS__) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__riscv_sys_##sname, ERRNO); \ + asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL(name) \ + asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs); \ + asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/text-patching.h index 9a7d7346001e..7228e266b9a1 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/text-patching.h @@ -6,7 +6,11 @@ #ifndef _ASM_RISCV_PATCH_H #define _ASM_RISCV_PATCH_H +int patch_insn_write(void *addr, const void *insn, size_t len); int patch_text_nosync(void *addr, const void *insns, size_t len); -int patch_text(void *addr, u32 insn); +int patch_text_set_nosync(void *addr, u8 c, size_t len); +int patch_text(void *addr, u32 *insns, size_t len); + +extern int riscv_patch_in_stop_machine; #endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 78933ac04995..f5916a70879a 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -10,19 +10,15 @@ #include <asm/page.h> #include <linux/const.h> - -#ifdef CONFIG_KASAN -#define KASAN_STACK_ORDER 1 -#else -#define KASAN_STACK_ORDER 0 -#endif +#include <linux/sizes.h> /* thread information allocation */ -#ifdef CONFIG_64BIT -#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 #else -#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) +#define KASAN_STACK_ORDER 0 #endif +#define THREAD_SIZE_ORDER (CONFIG_THREAD_SIZE_ORDER + KASAN_STACK_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) /* @@ -38,7 +34,8 @@ #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) #define OVERFLOW_STACK_SIZE SZ_4K -#define SHADOW_OVERFLOW_STACK_SIZE (1024) + +#define IRQ_STACK_SIZE THREAD_SIZE #ifndef __ASSEMBLY__ @@ -64,8 +61,28 @@ struct thread_info { long kernel_sp; /* Kernel stack pointer */ long user_sp; /* User stack pointer */ int cpu; + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ +#ifdef CONFIG_SHADOW_CALL_STACK + void *scs_base; + void *scs_sp; +#endif +#ifdef CONFIG_64BIT + /* + * Used in handle_exception() to save a0, a1 and a2 before knowing if we + * can access the kernel stack. + */ + unsigned long a0, a1, a2; +#endif }; +#ifdef CONFIG_SHADOW_CALL_STACK +#define INIT_SCS \ + .scs_base = init_shadow_call_stack, \ + .scs_sp = init_shadow_call_stack, +#else +#define INIT_SCS +#endif + /* * macros/functions for gaining access to the thread information structure * @@ -75,8 +92,12 @@ struct thread_info { { \ .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ + INIT_SCS \ } +void arch_release_task_struct(struct task_struct *tsk); +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + #endif /* !__ASSEMBLY__ */ /* @@ -86,35 +107,23 @@ struct thread_info { * - pending work-to-be-done flags are in lowest half-word * - other flags in upper half-word(s) */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ -#define TIF_SIGPENDING 2 /* signal pending */ -#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_NEED_RESCHED 0 /* rescheduling necessary */ +#define TIF_NEED_RESCHED_LAZY 1 /* Lazy rescheduling needed */ +#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_SIGPENDING 3 /* signal pending */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ -#define TIF_SECCOMP 8 /* syscall secure computing */ #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define TIF_32BIT 11 /* compat-mode 32bit process */ +#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */ -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) - -#define _TIF_WORK_MASK \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) - -#define _TIF_SYSCALL_WORK \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ - _TIF_SECCOMP) +#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index d6a7428f6248..a06697846e69 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -88,6 +88,4 @@ static inline int read_current_timer(unsigned long *timer_val) return 0; } -extern void time_init(void); - #endif /* _ASM_RISCV_TIMEX_H */ diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8a8..50b63b5c15bd 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbbatch.h b/arch/riscv/include/asm/tlbbatch.h new file mode 100644 index 000000000000..46014f70b9da --- /dev/null +++ b/arch/riscv/include/asm/tlbbatch.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +#ifndef _ASM_RISCV_TLBBATCH_H +#define _ASM_RISCV_TLBBATCH_H + +#include <linux/cpumask.h> + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +#endif /* _ASM_RISCV_TLBBATCH_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 801019381dea..1a20dd746a49 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -11,52 +11,64 @@ #include <asm/smp.h> #include <asm/errata_list.h> +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) + #ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); } +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ASID(asid); + else + local_flush_tlb_all(); +} + /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); + ALT_SFENCE_VMA_ADDR(addr); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ADDR_ASID(addr, asid); + else + local_flush_tlb_page(addr); } -#else /* CONFIG_MMU */ -#define local_flush_tlb_all() do { } while (0) -#define local_flush_tlb_page(addr) do { } while (0) -#endif /* CONFIG_MMU */ -#if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); #endif -#else /* CONFIG_SMP && CONFIG_MMU */ -#define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) +bool arch_tlbbatch_should_defer(struct mm_struct *mm); +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, unsigned long start, unsigned long end); +void arch_flush_tlb_batched_pending(struct mm_struct *mm); +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - local_flush_tlb_all(); -} - -#define flush_tlb_mm(mm) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ - -/* Flush a range of kernel pages */ -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} +extern unsigned long tlb_flush_all_threshold; +#else /* CONFIG_MMU */ +#define local_flush_tlb_all() do { } while (0) +#endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h new file mode 100644 index 000000000000..fe1a8bf6902d --- /dev/null +++ b/arch/riscv/include/asm/topology.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_TOPOLOGY_H +#define _ASM_RISCV_TOPOLOGY_H + +#include <linux/arch_topology.h> + +#ifdef CONFIG_NUMA +#include <asm/numa.h> +#endif + +/* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_tick topology_scale_freq_tick +#define arch_set_freq_scale topology_set_freq_scale +#define arch_scale_freq_capacity topology_get_freq_scale +#define arch_scale_freq_invariant topology_scale_freq_invariant +#define arch_scale_freq_ref topology_get_freq_ref + +/* Replace task scheduler's default cpu-invariant accounting */ +#define arch_scale_cpu_capacity topology_get_cpu_scale + +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + +#include <asm-generic/topology.h> + +#endif /* _ASM_RISCV_TOPOLOGY_H */ diff --git a/arch/riscv/include/asm/trace.h b/arch/riscv/include/asm/trace.h new file mode 100644 index 000000000000..6151cee5450c --- /dev/null +++ b/arch/riscv/include/asm/trace.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM riscv + +#if !defined(_TRACE_RISCV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RISCV_H + +#include <linux/tracepoint.h> + +TRACE_EVENT_CONDITION(sbi_call, + TP_PROTO(int ext, int fid), + TP_ARGS(ext, fid), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(int, ext) + __field(int, fid) + ), + + TP_fast_assign( + __entry->ext = ext; + __entry->fid = fid; + ), + + TP_printk("ext=0x%x fid=%d", __entry->ext, __entry->fid) +); + +TRACE_EVENT_CONDITION(sbi_return, + TP_PROTO(int ext, long error, long value), + TP_ARGS(ext, error, value), + TP_CONDITION(ext != SBI_EXT_HSM), + + TP_STRUCT__entry( + __field(long, error) + __field(long, value) + ), + + TP_fast_assign( + __entry->error = error; + __entry->value = value; + ), + + TP_printk("error=%ld value=0x%lx", __entry->error, __entry->value) +); + +#endif /* _TRACE_RISCV_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 855450bed9f5..d472da4450e6 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -9,8 +9,41 @@ #define _ASM_RISCV_UACCESS_H #include <asm/asm-extable.h> +#include <asm/cpufeature.h> #include <asm/pgtable.h> /* for TASK_SIZE */ +#ifdef CONFIG_RISCV_ISA_SUPM +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) +{ + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) { + u8 pmlen = mm->context.pmlen; + + /* Virtual addresses are sign-extended; physical addresses are zero-extended. */ + if (IS_ENABLED(CONFIG_MMU)) + return (long)(addr << pmlen) >> pmlen; + else + return (addr << pmlen) >> pmlen; + } + + return addr; +} + +#define untagged_addr(addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \ +}) + +#define untagged_addr_remote(mm, addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + mmap_assert_locked(mm); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ +}) + +#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size)) +#else +#define untagged_addr(addr) (addr) +#endif + /* * User space memory access functions */ @@ -29,6 +62,19 @@ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") /* + * This is the smallest unsigned integer type that can fit a value + * (up to 'long long') + */ +#define __inttype(x) __typeof__( \ + __typefits(x, char, \ + __typefits(x, short, \ + __typefits(x, int, \ + __typefits(x, long, 0ULL))))) + +#define __typefits(x, type, not) \ + __builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not) + +/* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are @@ -50,27 +96,58 @@ * call. */ -#define __get_user_asm(insn, x, ptr, err) \ +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT +#define __get_user_asm(insn, x, ptr, label) \ + asm_goto_output( \ + "1:\n" \ + " " insn " %0, %1\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, %l2, %0) \ + : "=&r" (x) \ + : "m" (*(ptr)) : : label) +#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ +#define __get_user_asm(insn, x, ptr, label) \ do { \ - __typeof__(x) __x; \ + long __gua_err = 0; \ __asm__ __volatile__ ( \ "1:\n" \ " " insn " %1, %2\n" \ "2:\n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ - : "+r" (err), "=&r" (__x) \ + : "+r" (__gua_err), "=&r" (x) \ : "m" (*(ptr))); \ - (x) = __x; \ + if (__gua_err) \ + goto label; \ } while (0) +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ #ifdef CONFIG_64BIT -#define __get_user_8(x, ptr, err) \ - __get_user_asm("ld", x, ptr, err) +#define __get_user_8(x, ptr, label) \ + __get_user_asm("ld", x, ptr, label) #else /* !CONFIG_64BIT */ -#define __get_user_8(x, ptr, err) \ + +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT +#define __get_user_8(x, ptr, label) \ + u32 __user *__ptr = (u32 __user *)(ptr); \ + u32 __lo, __hi; \ + asm_goto_output( \ + "1:\n" \ + " lw %0, %2\n" \ + "2:\n" \ + " lw %1, %3\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, %l4, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, %l4, %0) \ + : "=&r" (__lo), "=r" (__hi) \ + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]) \ + : : label); \ + (x) = (__typeof__(x))((__typeof__((x) - (x)))( \ + (((u64)__hi << 32) | __lo))); \ + +#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ +#define __get_user_8(x, ptr, label) \ do { \ u32 __user *__ptr = (u32 __user *)(ptr); \ u32 __lo, __hi; \ + long __gu8_err = 0; \ __asm__ __volatile__ ( \ "1:\n" \ " lw %1, %3\n" \ @@ -79,35 +156,62 @@ do { \ "3:\n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ - : "+r" (err), "=&r" (__lo), "=r" (__hi) \ + : "+r" (__gu8_err), "=&r" (__lo), "=r" (__hi) \ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ - if (err) \ + if (__gu8_err) { \ __hi = 0; \ - (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ + goto label; \ + } \ + (x) = (__typeof__(x))((__typeof__((x) - (x)))( \ (((u64)__hi << 32) | __lo))); \ } while (0) +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + #endif /* CONFIG_64BIT */ -#define __get_user_nocheck(x, __gu_ptr, __gu_err) \ +unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to, + const void *from, unsigned long n); +unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to, + const void __user *from, unsigned long n); + +#define __get_user_nocheck(x, __gu_ptr, label) \ do { \ + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \ + if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \ + goto label; \ + break; \ + } \ switch (sizeof(*__gu_ptr)) { \ case 1: \ - __get_user_asm("lb", (x), __gu_ptr, __gu_err); \ + __get_user_asm("lb", (x), __gu_ptr, label); \ break; \ case 2: \ - __get_user_asm("lh", (x), __gu_ptr, __gu_err); \ + __get_user_asm("lh", (x), __gu_ptr, label); \ break; \ case 4: \ - __get_user_asm("lw", (x), __gu_ptr, __gu_err); \ + __get_user_asm("lw", (x), __gu_ptr, label); \ break; \ case 8: \ - __get_user_8((x), __gu_ptr, __gu_err); \ + __get_user_8((x), __gu_ptr, label); \ break; \ default: \ BUILD_BUG(); \ } \ } while (0) +#define __get_user_error(x, ptr, err) \ +do { \ + __label__ __gu_failed; \ + \ + __get_user_nocheck(x, ptr, __gu_failed); \ + err = 0; \ + break; \ +__gu_failed: \ + x = 0; \ + err = -EFAULT; \ +} while (0) + /** * __get_user: - Get a simple variable from user space, with less checking. * @x: Variable to store result. @@ -130,15 +234,18 @@ do { \ */ #define __get_user(x, ptr) \ ({ \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ long __gu_err = 0; \ + __typeof__(x) __gu_val; \ \ __chk_user_ptr(__gu_ptr); \ \ __enable_user_access(); \ - __get_user_nocheck(x, __gu_ptr, __gu_err); \ + __get_user_error(__gu_val, __gu_ptr, __gu_err); \ __disable_user_access(); \ \ + (x) = __gu_val; \ + \ __gu_err; \ }) @@ -165,64 +272,76 @@ do { \ might_fault(); \ access_ok(__p, sizeof(*__p)) ? \ __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + ((x) = (__force __typeof__(x))0, -EFAULT); \ }) -#define __put_user_asm(insn, x, ptr, err) \ +#define __put_user_asm(insn, x, ptr, label) \ do { \ __typeof__(*(ptr)) __x = x; \ - __asm__ __volatile__ ( \ + asm goto( \ "1:\n" \ - " " insn " %z2, %1\n" \ - "2:\n" \ - _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ - : "+r" (err), "=m" (*(ptr)) \ - : "rJ" (__x)); \ + " " insn " %z0, %1\n" \ + _ASM_EXTABLE(1b, %l2) \ + : : "rJ" (__x), "m"(*(ptr)) : : label); \ } while (0) #ifdef CONFIG_64BIT -#define __put_user_8(x, ptr, err) \ - __put_user_asm("sd", x, ptr, err) +#define __put_user_8(x, ptr, label) \ + __put_user_asm("sd", x, ptr, label) #else /* !CONFIG_64BIT */ -#define __put_user_8(x, ptr, err) \ +#define __put_user_8(x, ptr, label) \ do { \ u32 __user *__ptr = (u32 __user *)(ptr); \ u64 __x = (__typeof__((x)-(x)))(x); \ - __asm__ __volatile__ ( \ + asm goto( \ "1:\n" \ - " sw %z3, %1\n" \ + " sw %z0, %2\n" \ "2:\n" \ - " sw %z4, %2\n" \ - "3:\n" \ - _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ - _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ - : "+r" (err), \ - "=m" (__ptr[__LSW]), \ - "=m" (__ptr[__MSW]) \ - : "rJ" (__x), "rJ" (__x >> 32)); \ + " sw %z1, %3\n" \ + _ASM_EXTABLE(1b, %l4) \ + _ASM_EXTABLE(2b, %l4) \ + : : "rJ" (__x), "rJ" (__x >> 32), \ + "m" (__ptr[__LSW]), \ + "m" (__ptr[__MSW]) : : label); \ } while (0) #endif /* CONFIG_64BIT */ -#define __put_user_nocheck(x, __gu_ptr, __pu_err) \ +#define __put_user_nocheck(x, __gu_ptr, label) \ do { \ + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \ + __inttype(x) val = (__inttype(x))x; \ + if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \ + goto label; \ + break; \ + } \ switch (sizeof(*__gu_ptr)) { \ case 1: \ - __put_user_asm("sb", (x), __gu_ptr, __pu_err); \ + __put_user_asm("sb", (x), __gu_ptr, label); \ break; \ case 2: \ - __put_user_asm("sh", (x), __gu_ptr, __pu_err); \ + __put_user_asm("sh", (x), __gu_ptr, label); \ break; \ case 4: \ - __put_user_asm("sw", (x), __gu_ptr, __pu_err); \ + __put_user_asm("sw", (x), __gu_ptr, label); \ break; \ case 8: \ - __put_user_8((x), __gu_ptr, __pu_err); \ + __put_user_8((x), __gu_ptr, label); \ break; \ default: \ BUILD_BUG(); \ } \ } while (0) +#define __put_user_error(x, ptr, err) \ +do { \ + __label__ err_label; \ + __put_user_nocheck(x, ptr, err_label); \ + break; \ +err_label: \ + (err) = -EFAULT; \ +} while (0) + /** * __put_user: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. @@ -246,14 +365,14 @@ do { \ */ #define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ __typeof__(*__gu_ptr) __val = (x); \ long __pu_err = 0; \ \ __chk_user_ptr(__gu_ptr); \ \ __enable_user_access(); \ - __put_user_nocheck(__val, __gu_ptr, __pu_err); \ + __put_user_error(__val, __gu_ptr, __pu_err); \ __disable_user_access(); \ \ __pu_err; \ @@ -293,13 +412,13 @@ unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_from_user(to, untagged_addr(from), n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_to_user(untagged_addr(to), from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -314,27 +433,49 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return access_ok(to, n) ? - __clear_user(to, n) : n; + __clear_user(untagged_addr(to), n) : n; } #define __get_kernel_nofault(dst, src, type, err_label) \ -do { \ - long __kr_err; \ - \ - __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ - if (unlikely(__kr_err)) \ - goto err_label; \ -} while (0) + __get_user_nocheck(*((type *)(dst)), (type *)(src), err_label) #define __put_kernel_nofault(dst, src, type, err_label) \ -do { \ - long __kr_err; \ - \ - __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ - if (unlikely(__kr_err)) \ - goto err_label; \ + __put_user_nocheck(*((type *)(src)), (type *)(dst), err_label) + +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return 0; + __enable_user_access(); + return 1; +} +#define user_access_begin user_access_begin +#define user_access_end __disable_user_access + +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long enabled) { } + +/* + * We want the unsafe accessors to always be inlined and use + * the error labels - thus the macro games. + */ +#define unsafe_put_user(x, ptr, label) \ + __put_user_nocheck(x, (ptr), label) + +#define unsafe_get_user(x, ptr, label) do { \ + __inttype(*(ptr)) __gu_val; \ + __get_user_nocheck(__gu_val, (ptr), label); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +#define unsafe_copy_to_user(_dst, _src, _len, label) \ + if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \ + goto label; + +#define unsafe_copy_from_user(_dst, _src, _len, label) \ + if (__asm_copy_from_user_sum_enabled(_dst, _src, _len)) \ + goto label; + #else /* CONFIG_MMU */ #include <asm-generic/uaccess.h> #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index 221630bdbd07..e6d904fa67c5 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -3,11 +3,6 @@ * Copyright (C) 2012 Regents of the University of California */ -/* - * There is explicitly no include guard here because this file is expected to - * be included multiple times. - */ - #define __ARCH_WANT_SYS_CLONE #ifdef CONFIG_COMPAT @@ -21,6 +16,14 @@ #define __ARCH_WANT_COMPAT_FADVISE64_64 #endif +#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif /* __LP64__ */ + +#define __ARCH_WANT_MEMFD_SECRET + + #include <uapi/asm/unistd.h> #define NR_syscalls (__NR_syscalls) diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h index f2183e00fdd2..5008f76cdc27 100644 --- a/arch/riscv/include/asm/uprobes.h +++ b/arch/riscv/include/asm/uprobes.h @@ -4,7 +4,7 @@ #define _ASM_RISCV_UPROBES_H #include <asm/probes.h> -#include <asm/patch.h> +#include <asm/text-patching.h> #include <asm/bug.h> #define MAX_UINSN_BYTES 8 @@ -34,7 +34,18 @@ struct arch_uprobe { bool simulate; }; +#ifdef CONFIG_UPROBES bool uprobe_breakpoint_handler(struct pt_regs *regs); bool uprobe_single_step_handler(struct pt_regs *regs); - +#else +static inline bool uprobe_breakpoint_handler(struct pt_regs *regs) +{ + return false; +} + +static inline bool uprobe_single_step_handler(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_UPROBES */ #endif /* _ASM_RISCV_UPROBES_H */ diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index af981426fe0f..c130d8100232 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -10,11 +10,11 @@ /* * All systems with an MMU have a VDSO, but systems without an MMU don't - * support shared libraries and therefor don't have one. + * support shared libraries and therefore don't have one. */ #ifdef CONFIG_MMU -#define __VVAR_PAGES 2 +#define __VDSO_PAGES 4 #ifndef __ASSEMBLY__ #include <generated/vdso-offsets.h> @@ -28,8 +28,12 @@ #define COMPAT_VDSO_SYMBOL(base, name) \ (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset) +extern char compat_vdso_start[], compat_vdso_end[]; + #endif /* CONFIG_COMPAT */ +extern char vdso_start[], vdso_end[]; + #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/vdso/arch_data.h b/arch/riscv/include/asm/vdso/arch_data.h new file mode 100644 index 000000000000..da57a3786f7a --- /dev/null +++ b/arch/riscv/include/asm/vdso/arch_data.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __RISCV_ASM_VDSO_ARCH_DATA_H +#define __RISCV_ASM_VDSO_ARCH_DATA_H + +#include <linux/types.h> +#include <vdso/datapage.h> +#include <asm/hwprobe.h> + +struct vdso_arch_data { + /* Stash static answers to the hwprobe queries when all CPUs are selected. */ + __u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1]; + + /* Boolean indicating all CPUs have the same static hwprobe values. */ + __u8 homogeneous_cpus; +}; + +#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */ diff --git a/arch/riscv/include/asm/vdso/getrandom.h b/arch/riscv/include/asm/vdso/getrandom.h new file mode 100644 index 000000000000..8dc92441702a --- /dev/null +++ b/arch/riscv/include/asm/vdso/getrandom.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved. + */ +#ifndef __ASM_VDSO_GETRANDOM_H +#define __ASM_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +#include <asm/unistd.h> + +static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags) +{ + register long ret asm("a0"); + register long nr asm("a7") = __NR_getrandom; + register void *buffer asm("a0") = _buffer; + register size_t len asm("a1") = _len; + register unsigned int flags asm("a2") = _flags; + + asm volatile ("ecall\n" + : "+r" (ret) + : "r" (nr), "r" (buffer), "r" (len), "r" (flags) + : "memory"); + + return ret; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h index 77d9c2f721c4..29164f84f93c 100644 --- a/arch/riscv/include/asm/vdso/gettimeofday.h +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -9,6 +9,12 @@ #include <asm/csr.h> #include <uapi/linux/time.h> +/* + * 32-bit land is lacking generic time vsyscalls as well as the legacy 32-bit + * time syscalls like gettimeofday. Skip these definitions since on 32-bit. + */ +#ifdef CONFIG_GENERIC_TIME_VSYSCALL + #define VDSO_HAS_CLOCK_GETRES 1 static __always_inline @@ -60,8 +66,10 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) return ret; } +#endif /* CONFIG_GENERIC_TIME_VSYSCALL */ + static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { /* * The purpose of csr_read(CSR_TIME) is to trap the system into @@ -71,18 +79,6 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, return csr_read(CSR_TIME); } -static __always_inline const struct vdso_data *__arch_get_vdso_data(void) -{ - return _vdso_data; -} - -#ifdef CONFIG_TIME_NS -static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) -{ - return _timens_data; -} -#endif #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index 134388cbaaa1..8f383f05a290 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <asm/barrier.h> +#include <asm/insn-def.h> static inline void cpu_relax(void) { @@ -13,6 +14,12 @@ static inline void cpu_relax(void) /* In lieu of a halt instruction, induce a long-latency stall. */ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif + + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ + __asm__ __volatile__ (RISCV_PAUSE); barrier(); } diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h index 82fd5d83bd60..1140b54b4bc8 100644 --- a/arch/riscv/include/asm/vdso/vsyscall.h +++ b/arch/riscv/include/asm/vdso/vsyscall.h @@ -4,21 +4,8 @@ #ifndef __ASSEMBLY__ -#include <linux/timekeeper_internal.h> #include <vdso/datapage.h> -extern struct vdso_data *vdso_data; - -/* - * Update the vDSO data page to keep in sync with kernel timekeeping. - */ -static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void) -{ - return vdso_data; -} - -#define __arch_get_k_vdso_data __riscv_get_k_vdso_data - /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h new file mode 100644 index 000000000000..45c9b426fcc5 --- /dev/null +++ b/arch/riscv/include/asm/vector.h @@ -0,0 +1,440 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 SiFive + */ + +#ifndef __ASM_RISCV_VECTOR_H +#define __ASM_RISCV_VECTOR_H + +#include <linux/types.h> +#include <uapi/asm-generic/errno.h> + +#ifdef CONFIG_RISCV_ISA_V + +#include <linux/stringify.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <asm/ptrace.h> +#include <asm/cpufeature.h> +#include <asm/csr.h> +#include <asm/asm.h> +#include <asm/vendorid_list.h> +#include <asm/vendor_extensions.h> +#include <asm/vendor_extensions/thead.h> + +#define __riscv_v_vstate_or(_val, TYPE) ({ \ + typeof(_val) _res = _val; \ + if (has_xtheadvector()) \ + _res = (_res & ~SR_VS_THEAD) | SR_VS_##TYPE##_THEAD; \ + else \ + _res = (_res & ~SR_VS) | SR_VS_##TYPE; \ + _res; \ +}) + +#define __riscv_v_vstate_check(_val, TYPE) ({ \ + bool _res; \ + if (has_xtheadvector()) \ + _res = ((_val) & SR_VS_THEAD) == SR_VS_##TYPE##_THEAD; \ + else \ + _res = ((_val) & SR_VS) == SR_VS_##TYPE; \ + _res; \ +}) + +extern unsigned long riscv_v_vsize; +int riscv_v_setup_vsize(void); +bool insn_is_vector(u32 insn_buf); +bool riscv_v_first_use_handler(struct pt_regs *regs); +void kernel_vector_begin(void); +void kernel_vector_end(void); +void get_cpu_vector_context(void); +void put_cpu_vector_context(void); +void riscv_v_thread_free(struct task_struct *tsk); +void __init riscv_v_setup_ctx_cache(void); +void riscv_v_thread_alloc(struct task_struct *tsk); + +static inline u32 riscv_v_flags(void) +{ + return READ_ONCE(current->thread.riscv_v_flags); +} + +static __always_inline bool has_vector(void) +{ + return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); +} + +static __always_inline bool has_xtheadvector_no_alternatives(void) +{ + if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) + return riscv_isa_vendor_extension_available(THEAD_VENDOR_ID, XTHEADVECTOR); + else + return false; +} + +static __always_inline bool has_xtheadvector(void) +{ + if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) + return riscv_has_vendor_extension_unlikely(THEAD_VENDOR_ID, + RISCV_ISA_VENDOR_EXT_XTHEADVECTOR); + else + return false; +} + +static inline void __riscv_v_vstate_clean(struct pt_regs *regs) +{ + regs->status = __riscv_v_vstate_or(regs->status, CLEAN); +} + +static inline void __riscv_v_vstate_dirty(struct pt_regs *regs) +{ + regs->status = __riscv_v_vstate_or(regs->status, DIRTY); +} + +static inline void riscv_v_vstate_off(struct pt_regs *regs) +{ + regs->status = __riscv_v_vstate_or(regs->status, OFF); +} + +static inline void riscv_v_vstate_on(struct pt_regs *regs) +{ + regs->status = __riscv_v_vstate_or(regs->status, INITIAL); +} + +static inline bool riscv_v_vstate_query(struct pt_regs *regs) +{ + return !__riscv_v_vstate_check(regs->status, OFF); +} + +static __always_inline void riscv_v_enable(void) +{ + if (has_xtheadvector()) + csr_set(CSR_SSTATUS, SR_VS_THEAD); + else + csr_set(CSR_SSTATUS, SR_VS); +} + +static __always_inline void riscv_v_disable(void) +{ + if (has_xtheadvector()) + csr_clear(CSR_SSTATUS, SR_VS_THEAD); + else + csr_clear(CSR_SSTATUS, SR_VS); +} + +static __always_inline bool riscv_v_is_on(void) +{ + return !!(csr_read(CSR_SSTATUS) & SR_VS); +} + +static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest) +{ + asm volatile ( + "csrr %0, " __stringify(CSR_VSTART) "\n\t" + "csrr %1, " __stringify(CSR_VTYPE) "\n\t" + "csrr %2, " __stringify(CSR_VL) "\n\t" + : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl), + "=r" (dest->vcsr) : :); + + if (has_xtheadvector()) { + unsigned long status; + + /* + * CSR_VCSR is defined as + * [2:1] - vxrm[1:0] + * [0] - vxsat + * The earlier vector spec implemented by T-Head uses separate + * registers for the same bit-elements, so just combine those + * into the existing output field. + * + * Additionally T-Head cores need FS to be enabled when accessing + * the VXRM and VXSAT CSRs, otherwise ending in illegal instructions. + * Though the cores do not implement the VXRM and VXSAT fields in the + * FCSR CSR that vector-0.7.1 specifies. + */ + status = csr_read_set(CSR_STATUS, SR_FS_DIRTY); + dest->vcsr = csr_read(CSR_VXSAT) | csr_read(CSR_VXRM) << CSR_VXRM_SHIFT; + + dest->vlenb = riscv_v_vsize / 32; + + if ((status & SR_FS) != SR_FS_DIRTY) + csr_write(CSR_STATUS, status); + } else { + dest->vcsr = csr_read(CSR_VCSR); + dest->vlenb = csr_read(CSR_VLENB); + } +} + +static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src) +{ + asm volatile ( + ".option push\n\t" + ".option arch, +zve32x\n\t" + "vsetvl x0, %2, %1\n\t" + ".option pop\n\t" + "csrw " __stringify(CSR_VSTART) ", %0\n\t" + : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl)); + + if (has_xtheadvector()) { + unsigned long status = csr_read(CSR_SSTATUS); + + /* + * Similar to __vstate_csr_save above, restore values for the + * separate VXRM and VXSAT CSRs from the vcsr variable. + */ + status = csr_read_set(CSR_STATUS, SR_FS_DIRTY); + + csr_write(CSR_VXRM, (src->vcsr >> CSR_VXRM_SHIFT) & CSR_VXRM_MASK); + csr_write(CSR_VXSAT, src->vcsr & CSR_VXSAT_MASK); + + if ((status & SR_FS) != SR_FS_DIRTY) + csr_write(CSR_STATUS, status); + } else { + csr_write(CSR_VCSR, src->vcsr); + } +} + +static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, + void *datap) +{ + unsigned long vl; + + riscv_v_enable(); + __vstate_csr_save(save_to); + if (has_xtheadvector()) { + asm volatile ( + "mv t0, %0\n\t" + THEAD_VSETVLI_T4X0E8M8D1 + THEAD_VSB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VSB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VSB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VSB_V_V0T0 + : : "r" (datap) : "memory", "t0", "t4"); + } else { + asm volatile ( + ".option push\n\t" + ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vse8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" + "vse8.v v8, (%1)\n\t" + "add %1, %1, %0\n\t" + "vse8.v v16, (%1)\n\t" + "add %1, %1, %0\n\t" + "vse8.v v24, (%1)\n\t" + ".option pop\n\t" + : "=&r" (vl) : "r" (datap) : "memory"); + } + riscv_v_disable(); +} + +static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from, + void *datap) +{ + unsigned long vl; + + riscv_v_enable(); + if (has_xtheadvector()) { + asm volatile ( + "mv t0, %0\n\t" + THEAD_VSETVLI_T4X0E8M8D1 + THEAD_VLB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VLB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VLB_V_V0T0 + "add t0, t0, t4\n\t" + THEAD_VLB_V_V0T0 + : : "r" (datap) : "memory", "t0", "t4"); + } else { + asm volatile ( + ".option push\n\t" + ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vle8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" + "vle8.v v8, (%1)\n\t" + "add %1, %1, %0\n\t" + "vle8.v v16, (%1)\n\t" + "add %1, %1, %0\n\t" + "vle8.v v24, (%1)\n\t" + ".option pop\n\t" + : "=&r" (vl) : "r" (datap) : "memory"); + } + __vstate_csr_restore(restore_from); + riscv_v_disable(); +} + +static inline void __riscv_v_vstate_discard(void) +{ + unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1); + + riscv_v_enable(); + if (has_xtheadvector()) + asm volatile (THEAD_VSETVLI_T4X0E8M8D1 : : : "t4"); + else + asm volatile ( + ".option push\n\t" + ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + ".option pop\n\t": "=&r" (vl)); + + asm volatile ( + ".option push\n\t" + ".option arch, +zve32x\n\t" + "vmv.v.i v0, -1\n\t" + "vmv.v.i v8, -1\n\t" + "vmv.v.i v16, -1\n\t" + "vmv.v.i v24, -1\n\t" + "vsetvl %0, x0, %1\n\t" + ".option pop\n\t" + : "=&r" (vl) : "r" (vtype_inval)); + + riscv_v_disable(); +} + +static inline void riscv_v_vstate_discard(struct pt_regs *regs) +{ + if (riscv_v_vstate_query(regs)) { + __riscv_v_vstate_discard(); + __riscv_v_vstate_dirty(regs); + } +} + +static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate, + struct pt_regs *regs) +{ + if (__riscv_v_vstate_check(regs->status, DIRTY)) { + __riscv_v_vstate_save(vstate, vstate->datap); + __riscv_v_vstate_clean(regs); + } +} + +static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate, + struct pt_regs *regs) +{ + if (riscv_v_vstate_query(regs)) { + __riscv_v_vstate_restore(vstate, vstate->datap); + __riscv_v_vstate_clean(regs); + } +} + +static inline void riscv_v_vstate_set_restore(struct task_struct *task, + struct pt_regs *regs) +{ + if (riscv_v_vstate_query(regs)) { + set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE); + riscv_v_vstate_on(regs); + } +} + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static inline bool riscv_preempt_v_dirty(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY); +} + +static inline bool riscv_preempt_v_restore(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE); +} + +static inline void riscv_preempt_v_clear_dirty(struct task_struct *task) +{ + barrier(); + task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY; +} + +static inline void riscv_preempt_v_set_restore(struct task_struct *task) +{ + barrier(); + task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE; +} + +static inline bool riscv_preempt_v_started(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V); +} + +#else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */ +static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; } +static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; } +static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; } +#define riscv_preempt_v_clear_dirty(tsk) do {} while (0) +#define riscv_preempt_v_set_restore(tsk) do {} while (0) +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + +static inline void __switch_to_vector(struct task_struct *prev, + struct task_struct *next) +{ + struct pt_regs *regs; + + if (riscv_preempt_v_started(prev)) { + if (riscv_v_is_on()) { + WARN_ON(prev->thread.riscv_v_flags & RISCV_V_CTX_DEPTH_MASK); + riscv_v_disable(); + prev->thread.riscv_v_flags |= RISCV_PREEMPT_V_IN_SCHEDULE; + } + if (riscv_preempt_v_dirty(prev)) { + __riscv_v_vstate_save(&prev->thread.kernel_vstate, + prev->thread.kernel_vstate.datap); + riscv_preempt_v_clear_dirty(prev); + } + } else { + regs = task_pt_regs(prev); + riscv_v_vstate_save(&prev->thread.vstate, regs); + } + + if (riscv_preempt_v_started(next)) { + if (next->thread.riscv_v_flags & RISCV_PREEMPT_V_IN_SCHEDULE) { + next->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_IN_SCHEDULE; + riscv_v_enable(); + } else { + riscv_preempt_v_set_restore(next); + } + } else { + riscv_v_vstate_set_restore(next, task_pt_regs(next)); + } +} + +void riscv_v_vstate_ctrl_init(struct task_struct *tsk); +bool riscv_v_vstate_ctrl_user_allowed(void); + +#else /* ! CONFIG_RISCV_ISA_V */ + +struct pt_regs; + +static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } +static __always_inline bool has_vector(void) { return false; } +static __always_inline bool insn_is_vector(u32 insn_buf) { return false; } +static __always_inline bool has_xtheadvector_no_alternatives(void) { return false; } +static __always_inline bool has_xtheadvector(void) { return false; } +static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } +static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } +static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } +#define riscv_v_vsize (0) +#define riscv_v_vstate_discard(regs) do {} while (0) +#define riscv_v_vstate_save(vstate, regs) do {} while (0) +#define riscv_v_vstate_restore(vstate, regs) do {} while (0) +#define __switch_to_vector(__prev, __next) do {} while (0) +#define riscv_v_vstate_off(regs) do {} while (0) +#define riscv_v_vstate_on(regs) do {} while (0) +#define riscv_v_thread_free(tsk) do {} while (0) +#define riscv_v_setup_ctx_cache() do {} while (0) +#define riscv_v_thread_alloc(tsk) do {} while (0) + +#endif /* CONFIG_RISCV_ISA_V */ + +/* + * Return the implementation's vlen value. + * + * riscv_v_vsize contains the value of "32 vector registers with vlenb length" + * so rebuild the vlen value in bits from it. + */ +static inline int riscv_vector_vlen(void) +{ + return riscv_v_vsize / 32 * 8; +} + +#endif /* ! __ASM_RISCV_VECTOR_H */ diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h new file mode 100644 index 000000000000..7437304a71b9 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2024 Rivos, Inc + */ + +#ifndef _ASM_VENDOR_EXTENSIONS_H +#define _ASM_VENDOR_EXTENSIONS_H + +#include <asm/cpufeature.h> + +#include <linux/array_size.h> +#include <linux/types.h> + +/* + * The extension keys of each vendor must be strictly less than this value. + */ +#define RISCV_ISA_VENDOR_EXT_MAX 32 + +struct riscv_isavendorinfo { + DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX); +}; + +struct riscv_isa_vendor_ext_data_list { + bool is_initialized; + const size_t ext_data_count; + const struct riscv_isa_ext_data *ext_data; + struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS]; + struct riscv_isavendorinfo all_harts_isa_bitmap; +}; + +extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[]; + +extern const size_t riscv_isa_vendor_ext_list_size; + +/* + * The alternatives need some way of distinguishing between vendor extensions + * and errata. Incrementing all of the vendor extension keys so they are at + * least 0x8000 accomplishes that. + */ +#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000 + +#define VENDOR_EXT_ALL_CPUS -1 + +bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit); +#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \ + __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext) +#define riscv_isa_vendor_extension_available(vendor, ext) \ + __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \ + RISCV_ISA_VENDOR_EXT_##ext) + +static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(vendor, + ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); + + return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); +} + +static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(vendor, + ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); + + return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); +} + +static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor, + int cpu, const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) + return true; + + return __riscv_isa_vendor_extension_available(cpu, vendor, ext); +} + +static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor, + int cpu, + const unsigned long ext) +{ + if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) + return false; + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && + __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) + return true; + + return __riscv_isa_vendor_extension_available(cpu, vendor, ext); +} + +#endif /* _ASM_VENDOR_EXTENSIONS_H */ diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h new file mode 100644 index 000000000000..7bb2fc43438f --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/andes.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H + +#include <asm/vendor_extensions.h> + +#include <linux/types.h> + +#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0 + +/* + * Extension keys should be strictly less than max. + * It is safe to increment this when necessary. + */ +#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32 + +extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes; + +#endif diff --git a/arch/riscv/include/asm/vendor_extensions/sifive.h b/arch/riscv/include/asm/vendor_extensions/sifive.h new file mode 100644 index 000000000000..ac00e500361c --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/sifive.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_SIFIVE_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_SIFIVE_H + +#include <asm/vendor_extensions.h> + +#include <linux/types.h> + +#define RISCV_ISA_VENDOR_EXT_XSFVQMACCDOD 0 +#define RISCV_ISA_VENDOR_EXT_XSFVQMACCQOQ 1 +#define RISCV_ISA_VENDOR_EXT_XSFVFNRCLIPXFQF 2 +#define RISCV_ISA_VENDOR_EXT_XSFVFWMACCQQQ 3 + +extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_sifive; + +#endif diff --git a/arch/riscv/include/asm/vendor_extensions/sifive_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/sifive_hwprobe.h new file mode 100644 index 000000000000..90a61abd033c --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/sifive_hwprobe.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_SIFIVE_HWPROBE_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_SIFIVE_HWPROBE_H + +#include <linux/cpumask.h> + +#include <uapi/asm/hwprobe.h> + +#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_SIFIVE +void hwprobe_isa_vendor_ext_sifive_0(struct riscv_hwprobe *pair, const struct cpumask *cpus); +#else +static inline void hwprobe_isa_vendor_ext_sifive_0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + pair->value = 0; +} +#endif + +#endif diff --git a/arch/riscv/include/asm/vendor_extensions/thead.h b/arch/riscv/include/asm/vendor_extensions/thead.h new file mode 100644 index 000000000000..e85c75b3b340 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/thead.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H + +#include <asm/vendor_extensions.h> + +#include <linux/types.h> + +/* + * Extension keys must be strictly less than RISCV_ISA_VENDOR_EXT_MAX. + */ +#define RISCV_ISA_VENDOR_EXT_XTHEADVECTOR 0 + +extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead; + +#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD +void disable_xtheadvector(void); +#else +static inline void disable_xtheadvector(void) { } +#endif + +/* Extension specific helpers */ + +/* + * Vector 0.7.1 as used for example on T-Head Xuantie cores, uses an older + * encoding for vsetvli (ta, ma vs. d1), so provide an instruction for + * vsetvli t4, x0, e8, m8, d1 + */ +#define THEAD_VSETVLI_T4X0E8M8D1 ".long 0x00307ed7\n\t" + +/* + * While in theory, the vector-0.7.1 vsb.v and vlb.v result in the same + * encoding as the standard vse8.v and vle8.v, compilers seem to optimize + * the call resulting in a different encoding and then using a value for + * the "mop" field that is not part of vector-0.7.1 + * So encode specific variants for vstate_save and _restore. + */ +#define THEAD_VSB_V_V0T0 ".long 0x02028027\n\t" +#define THEAD_VSB_V_V8T0 ".long 0x02028427\n\t" +#define THEAD_VSB_V_V16T0 ".long 0x02028827\n\t" +#define THEAD_VSB_V_V24T0 ".long 0x02028c27\n\t" +#define THEAD_VLB_V_V0T0 ".long 0x012028007\n\t" +#define THEAD_VLB_V_V8T0 ".long 0x012028407\n\t" +#define THEAD_VLB_V_V16T0 ".long 0x012028807\n\t" +#define THEAD_VLB_V_V24T0 ".long 0x012028c07\n\t" + +#endif diff --git a/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h new file mode 100644 index 000000000000..65a9c5612466 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H + +#include <linux/cpumask.h> + +#include <uapi/asm/hwprobe.h> + +#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD +void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus); +#else +static inline void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + pair->value = 0; +} +#endif + +#endif diff --git a/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h new file mode 100644 index 000000000000..6b9293e984a9 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2024 Rivos, Inc + */ + +#ifndef _ASM_RISCV_SYS_HWPROBE_H +#define _ASM_RISCV_SYS_HWPROBE_H + +#include <asm/cpufeature.h> + +#define VENDOR_EXT_KEY(ext) \ + do { \ + if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_VENDOR_EXT_##ext)) \ + pair->value |= RISCV_HWPROBE_VENDOR_EXT_##ext; \ + else \ + missing |= RISCV_HWPROBE_VENDOR_EXT_##ext; \ + } while (false) + +/* + * Loop through and record extensions that 1) anyone has, and 2) anyone + * doesn't have. + * + * _extension_checks is an arbitrary C block to set the values of pair->value + * and missing. It should be filled with VENDOR_EXT_KEY expressions. + */ +#define VENDOR_EXTENSION_SUPPORTED(pair, cpus, per_hart_vendor_bitmap, _extension_checks) \ + do { \ + int cpu; \ + u64 missing = 0; \ + for_each_cpu(cpu, (cpus)) { \ + struct riscv_isavendorinfo *isainfo = &(per_hart_vendor_bitmap)[cpu]; \ + _extension_checks \ + } \ + (pair)->value &= ~missing; \ + } while (false) \ + +#endif /* _ASM_RISCV_SYS_HWPROBE_H */ diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h index cb89af3f0704..a5150cdf34d8 100644 --- a/arch/riscv/include/asm/vendorid_list.h +++ b/arch/riscv/include/asm/vendorid_list.h @@ -5,6 +5,8 @@ #ifndef ASM_VENDOR_LIST_H #define ASM_VENDOR_LIST_H +#define ANDES_VENDOR_ID 0x31e +#define MICROCHIP_VENDOR_ID 0x029 #define SIFIVE_VENDOR_ID 0x489 #define THEAD_VENDOR_ID 0x5b7 diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index ff9abc00d139..fefe94dc98e2 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -1,4 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_VMALLOC_H #define _ASM_RISCV_VMALLOC_H +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +extern bool pgtable_l4_enabled, pgtable_l5_enabled; + +#define IOREMAP_MAX_ORDER (PUD_SHIFT) + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return pgtable_l4_enabled || pgtable_l5_enabled; +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return true; +} + +#endif + #endif /* _ASM_RISCV_VMALLOC_H */ diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h index 7c086ac6ecd4..3802cda71ab7 100644 --- a/arch/riscv/include/asm/word-at-a-time.h +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -9,7 +9,9 @@ #define _ASM_RISCV_WORD_AT_A_TIME_H -#include <linux/kernel.h> +#include <asm/asm-extable.h> +#include <linux/bitops.h> +#include <linux/wordpart.h> struct word_at_a_time { const unsigned long one_bits, high_bits; @@ -45,4 +47,30 @@ static inline unsigned long find_zero(unsigned long mask) /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) +#ifdef CONFIG_DCACHE_WORD_ACCESS + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret; + + /* Load word from unaligned pointer addr */ + asm( + "1: " REG_L " %0, %2\n" + "2:\n" + _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1) + : "=&r" (ret) + : "r" (addr), "m" (*(unsigned long *)addr)); + + return ret; +} + +#endif /* CONFIG_DCACHE_WORD_ACCESS */ + #endif /* _ASM_RISCV_WORD_AT_A_TIME_H */ diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h index d4ffc3c37649..f3d56299bc22 100644 --- a/arch/riscv/include/asm/xip_fixup.h +++ b/arch/riscv/include/asm/xip_fixup.h @@ -9,18 +9,36 @@ #ifdef CONFIG_XIP_KERNEL .macro XIP_FIXUP_OFFSET reg - REG_L t0, _xip_fixup + /* Fix-up address in Flash into address in RAM early during boot before + * MMU is up. Because generated code "thinks" data is in Flash, but it + * is actually in RAM (actually data is also in Flash, but Flash is + * read-only, thus we need to use the data residing in RAM). + * + * The start of data in Flash is _sdata and the start of data in RAM is + * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this: + * reg += CONFIG_PHYS_RAM_BASE - _start + */ + li t0, CONFIG_PHYS_RAM_BASE add \reg, \reg, t0 + la t0, _sdata + sub \reg, \reg, t0 .endm .macro XIP_FIXUP_FLASH_OFFSET reg - la t1, __data_loc - REG_L t1, _xip_phys_offset - sub \reg, \reg, t1 + /* In linker script, at the transition from read-only section to + * writable section, the VMA is increased while LMA remains the same. + * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is + * changed) + * + * Consequently, early during boot before MMU is up, the generated code + * reads the "writable" section at wrong addresses, because VMA is used + * by compiler to generate code, but the data is located in Flash using + * LMA. + */ + la t0, _sdata + sub \reg, \reg, t0 + la t0, __data_loc add \reg, \reg, t0 .endm - -_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET -_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET #else .macro XIP_FIXUP_OFFSET reg .endm diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h new file mode 100644 index 000000000000..96011861e46b --- /dev/null +++ b/arch/riscv/include/asm/xor.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 SiFive + */ + +#include <linux/hardirq.h> +#include <asm-generic/xor.h> +#ifdef CONFIG_RISCV_ISA_V +#include <asm/vector.h> +#include <asm/switch_to.h> +#include <asm/asm-prototypes.h> + +static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2) +{ + kernel_vector_begin(); + xor_regs_2_(bytes, p1, p2); + kernel_vector_end(); +} + +static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3) +{ + kernel_vector_begin(); + xor_regs_3_(bytes, p1, p2, p3); + kernel_vector_end(); +} + +static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4) +{ + kernel_vector_begin(); + xor_regs_4_(bytes, p1, p2, p3, p4); + kernel_vector_end(); +} + +static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5) +{ + kernel_vector_begin(); + xor_regs_5_(bytes, p1, p2, p3, p4, p5); + kernel_vector_end(); +} + +static struct xor_block_template xor_block_rvv = { + .name = "rvv", + .do_2 = xor_vector_2, + .do_3 = xor_vector_3, + .do_4 = xor_vector_4, + .do_5 = xor_vector_5 +}; + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + if (has_vector()) { \ + xor_speed(&xor_block_rvv);\ + } \ + } while (0) +#endif diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild index f66554cd5c45..89ac01faa5ae 100644 --- a/arch/riscv/include/uapi/asm/Kbuild +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -1 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += unistd_32.h +syscall-y += unistd_64.h diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index 32c73ba1d531..95050ebe9ad0 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -30,8 +30,11 @@ #define AT_L1D_CACHEGEOMETRY 43 #define AT_L2_CACHESIZE 44 #define AT_L2_CACHEGEOMETRY 45 +#define AT_L3_CACHESIZE 46 +#define AT_L3_CACHEGEOMETRY 47 /* entries in ARCH_DLINFO */ -#define AT_VECTOR_SIZE_ARCH 7 +#define AT_VECTOR_SIZE_ARCH 10 +#define AT_MINSIGSTKSZ 51 #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index d696d6610231..11a71b8533d5 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_TLS_DTPREL64 9 #define R_RISCV_TLS_TPREL32 10 #define R_RISCV_TLS_TPREL64 11 +#define R_RISCV_IRELATIVE 58 /* Relocation types not used by the dynamic linker */ #define R_RISCV_BRANCH 16 @@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_ALIGN 43 #define R_RISCV_RVC_BRANCH 44 #define R_RISCV_RVC_JUMP 45 -#define R_RISCV_LUI 46 #define R_RISCV_GPREL_I 47 #define R_RISCV_GPREL_S 48 #define R_RISCV_TPREL_I 49 @@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_SET16 55 #define R_RISCV_SET32 56 #define R_RISCV_32_PCREL 57 +#define R_RISCV_PLT32 59 +#define R_RISCV_SET_ULEB128 60 +#define R_RISCV_SUB_ULEB128 61 #endif /* _UAPI_ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h index 46dc3f5ee99f..c52bb7bbbabe 100644 --- a/arch/riscv/include/uapi/asm/hwcap.h +++ b/arch/riscv/include/uapi/asm/hwcap.h @@ -21,5 +21,6 @@ #define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A')) #define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A')) #define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A')) +#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) #endif /* _UAPI_ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h new file mode 100644 index 000000000000..aaf6ad970499 --- /dev/null +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright 2023-2024 Rivos, Inc + */ + +#ifndef _UAPI_ASM_HWPROBE_H +#define _UAPI_ASM_HWPROBE_H + +#include <linux/types.h> + +/* + * Interface for probing hardware capabilities from userspace, see + * Documentation/arch/riscv/hwprobe.rst for more information. + */ +struct riscv_hwprobe { + __s64 key; + __u64 value; +}; + +#define RISCV_HWPROBE_KEY_MVENDORID 0 +#define RISCV_HWPROBE_KEY_MARCHID 1 +#define RISCV_HWPROBE_KEY_MIMPID 2 +#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3 +#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0) +#define RISCV_HWPROBE_KEY_IMA_EXT_0 4 +#define RISCV_HWPROBE_IMA_FD (1 << 0) +#define RISCV_HWPROBE_IMA_C (1 << 1) +#define RISCV_HWPROBE_IMA_V (1 << 2) +#define RISCV_HWPROBE_EXT_ZBA (1 << 3) +#define RISCV_HWPROBE_EXT_ZBB (1 << 4) +#define RISCV_HWPROBE_EXT_ZBS (1 << 5) +#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) +#define RISCV_HWPROBE_EXT_ZBC (1 << 7) +#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) +#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) +#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) +#define RISCV_HWPROBE_EXT_ZKND (1 << 11) +#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) +#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) +#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) +#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) +#define RISCV_HWPROBE_EXT_ZKT (1 << 16) +#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) +#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) +#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) +#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) +#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) +#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) +#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) +#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) +#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) +#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) +#define RISCV_HWPROBE_EXT_ZFH (1 << 27) +#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) +#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) +#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) +#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) +#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) +#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) +#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) +#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) +#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) +#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) +#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) +#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) +#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) +#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) +#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) +#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) +#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) +#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) +#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) +#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) +#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) +#define RISCV_HWPROBE_EXT_ZICNTR (1ULL << 50) +#define RISCV_HWPROBE_EXT_ZIHPM (1ULL << 51) +#define RISCV_HWPROBE_EXT_ZFBFMIN (1ULL << 52) +#define RISCV_HWPROBE_EXT_ZVFBFMIN (1ULL << 53) +#define RISCV_HWPROBE_EXT_ZVFBFWMA (1ULL << 54) +#define RISCV_HWPROBE_EXT_ZICBOM (1ULL << 55) +#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56) +#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57) +#define RISCV_HWPROBE_EXT_ZABHA (1ULL << 58) +#define RISCV_HWPROBE_KEY_CPUPERF_0 5 +#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) +#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) +#define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0) +#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) +#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) +#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) +#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 +#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 +#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 +#define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF 9 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED 1 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11 +#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12 +#define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13 +/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ + +/* Flags */ +#define RISCV_HWPROBE_WHICH_CPUS (1 << 0) + +#endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 6119368ba6d5..5f59fd226cc5 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -12,9 +12,10 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <asm/bitsperlong.h> #include <asm/ptrace.h> -#define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_IRQ_LINE #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -48,6 +49,12 @@ struct kvm_sregs { /* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_config { unsigned long isa; + unsigned long zicbom_block_size; + unsigned long mvendorid; + unsigned long marchid; + unsigned long mimpid; + unsigned long zicboz_block_size; + unsigned long satp_mode; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -60,7 +67,7 @@ struct kvm_riscv_core { #define KVM_RISCV_MODE_S 1 #define KVM_RISCV_MODE_U 0 -/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +/* General CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_csr { unsigned long sstatus; unsigned long sie; @@ -72,6 +79,23 @@ struct kvm_riscv_csr { unsigned long sip; unsigned long satp; unsigned long scounteren; + unsigned long senvcfg; +}; + +/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_aia_csr { + unsigned long siselect; + unsigned long iprio1; + unsigned long iprio2; + unsigned long sieh; + unsigned long siph; + unsigned long iprio1h; + unsigned long iprio2h; +}; + +/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_smstateen_csr { + unsigned long sstateen0; }; /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -96,19 +120,108 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_H, KVM_RISCV_ISA_EXT_I, KVM_RISCV_ISA_EXT_M, + KVM_RISCV_ISA_EXT_SVPBMT, + KVM_RISCV_ISA_EXT_SSTC, + KVM_RISCV_ISA_EXT_SVINVAL, + KVM_RISCV_ISA_EXT_ZIHINTPAUSE, + KVM_RISCV_ISA_EXT_ZICBOM, + KVM_RISCV_ISA_EXT_ZICBOZ, + KVM_RISCV_ISA_EXT_ZBB, + KVM_RISCV_ISA_EXT_SSAIA, + KVM_RISCV_ISA_EXT_V, + KVM_RISCV_ISA_EXT_SVNAPOT, + KVM_RISCV_ISA_EXT_ZBA, + KVM_RISCV_ISA_EXT_ZBS, + KVM_RISCV_ISA_EXT_ZICNTR, + KVM_RISCV_ISA_EXT_ZICSR, + KVM_RISCV_ISA_EXT_ZIFENCEI, + KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, + KVM_RISCV_ISA_EXT_ZBC, + KVM_RISCV_ISA_EXT_ZBKB, + KVM_RISCV_ISA_EXT_ZBKC, + KVM_RISCV_ISA_EXT_ZBKX, + KVM_RISCV_ISA_EXT_ZKND, + KVM_RISCV_ISA_EXT_ZKNE, + KVM_RISCV_ISA_EXT_ZKNH, + KVM_RISCV_ISA_EXT_ZKR, + KVM_RISCV_ISA_EXT_ZKSED, + KVM_RISCV_ISA_EXT_ZKSH, + KVM_RISCV_ISA_EXT_ZKT, + KVM_RISCV_ISA_EXT_ZVBB, + KVM_RISCV_ISA_EXT_ZVBC, + KVM_RISCV_ISA_EXT_ZVKB, + KVM_RISCV_ISA_EXT_ZVKG, + KVM_RISCV_ISA_EXT_ZVKNED, + KVM_RISCV_ISA_EXT_ZVKNHA, + KVM_RISCV_ISA_EXT_ZVKNHB, + KVM_RISCV_ISA_EXT_ZVKSED, + KVM_RISCV_ISA_EXT_ZVKSH, + KVM_RISCV_ISA_EXT_ZVKT, + KVM_RISCV_ISA_EXT_ZFH, + KVM_RISCV_ISA_EXT_ZFHMIN, + KVM_RISCV_ISA_EXT_ZIHINTNTL, + KVM_RISCV_ISA_EXT_ZVFH, + KVM_RISCV_ISA_EXT_ZVFHMIN, + KVM_RISCV_ISA_EXT_ZFA, + KVM_RISCV_ISA_EXT_ZTSO, + KVM_RISCV_ISA_EXT_ZACAS, + KVM_RISCV_ISA_EXT_SSCOFPMF, + KVM_RISCV_ISA_EXT_ZIMOP, + KVM_RISCV_ISA_EXT_ZCA, + KVM_RISCV_ISA_EXT_ZCB, + KVM_RISCV_ISA_EXT_ZCD, + KVM_RISCV_ISA_EXT_ZCF, + KVM_RISCV_ISA_EXT_ZCMOP, + KVM_RISCV_ISA_EXT_ZAWRS, + KVM_RISCV_ISA_EXT_SMNPM, + KVM_RISCV_ISA_EXT_SSNPM, + KVM_RISCV_ISA_EXT_SVADE, + KVM_RISCV_ISA_EXT_SVADU, + KVM_RISCV_ISA_EXT_SVVPTC, + KVM_RISCV_ISA_EXT_ZABHA, + KVM_RISCV_ISA_EXT_ZICCRSE, + KVM_RISCV_ISA_EXT_ZAAMO, + KVM_RISCV_ISA_EXT_ZALRSC, KVM_RISCV_ISA_EXT_MAX, }; +/* + * SBI extension IDs specific to KVM. This is not the same as the SBI + * extension IDs defined by the RISC-V SBI specification. + */ +enum KVM_RISCV_SBI_EXT_ID { + KVM_RISCV_SBI_EXT_V01 = 0, + KVM_RISCV_SBI_EXT_TIME, + KVM_RISCV_SBI_EXT_IPI, + KVM_RISCV_SBI_EXT_RFENCE, + KVM_RISCV_SBI_EXT_SRST, + KVM_RISCV_SBI_EXT_HSM, + KVM_RISCV_SBI_EXT_PMU, + KVM_RISCV_SBI_EXT_EXPERIMENTAL, + KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, + KVM_RISCV_SBI_EXT_STA, + KVM_RISCV_SBI_EXT_SUSP, + KVM_RISCV_SBI_EXT_MAX, +}; + +/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_sbi_sta { + unsigned long shmem_lo; + unsigned long shmem_hi; +}; + /* Possible states for kvm_riscv_timer */ #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - /* If you need to interpret the index values, here is the key: */ #define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000 #define KVM_REG_RISCV_TYPE_SHIFT 24 +#define KVM_REG_RISCV_SUBTYPE_MASK 0x0000000000FF0000 +#define KVM_REG_RISCV_SUBTYPE_SHIFT 16 /* Config registers are mapped as type 1 */ #define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT) @@ -122,8 +235,15 @@ enum KVM_RISCV_ISA_EXT_ID { /* Control and status registers are mapped as type 3 */ #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_AIA_REG(name) \ + (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ + (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) /* Timer registers are mapped as type 4 */ #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) @@ -142,6 +262,111 @@ enum KVM_RISCV_ISA_EXT_ID { /* ISA Extension registers are mapped as type 7 */ #define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_ISA_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_ISA_MULTI_REG(__ext_id) \ + ((__ext_id) / __BITS_PER_LONG) +#define KVM_REG_RISCV_ISA_MULTI_MASK(__ext_id) \ + (1UL << ((__ext_id) % __BITS_PER_LONG)) +#define KVM_REG_RISCV_ISA_MULTI_REG_LAST \ + KVM_REG_RISCV_ISA_MULTI_REG(KVM_RISCV_ISA_EXT_MAX - 1) + +/* SBI extension registers are mapped as type 8 */ +#define KVM_REG_RISCV_SBI_EXT (0x08 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_SBI_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_MULTI_REG(__ext_id) \ + ((__ext_id) / __BITS_PER_LONG) +#define KVM_REG_RISCV_SBI_MULTI_MASK(__ext_id) \ + (1UL << ((__ext_id) % __BITS_PER_LONG)) +#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \ + KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1) + +/* V extension registers are mapped as type 9 */ +#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \ + (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_VECTOR_REG(n) \ + ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) + +/* Registers for specific SBI extensions are mapped as type 10 */ +#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA_REG(name) \ + (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long)) + +/* Device Control API: RISC-V AIA */ +#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000 +#define KVM_DEV_RISCV_APLIC_SIZE 0x4000 +#define KVM_DEV_RISCV_APLIC_MAX_HARTS 0x4000 +#define KVM_DEV_RISCV_IMSIC_ALIGN 0x1000 +#define KVM_DEV_RISCV_IMSIC_SIZE 0x1000 + +#define KVM_DEV_RISCV_AIA_GRP_CONFIG 0 +#define KVM_DEV_RISCV_AIA_CONFIG_MODE 0 +#define KVM_DEV_RISCV_AIA_CONFIG_IDS 1 +#define KVM_DEV_RISCV_AIA_CONFIG_SRCS 2 +#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS 3 +#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT 4 +#define KVM_DEV_RISCV_AIA_CONFIG_HART_BITS 5 +#define KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS 6 + +/* + * Modes of RISC-V AIA device: + * 1) EMUL (aka Emulation): Trap-n-emulate IMSIC + * 2) HWACCEL (aka HW Acceleration): Virtualize IMSIC using IMSIC guest files + * 3) AUTO (aka Automatic): Virtualize IMSIC using IMSIC guest files whenever + * available otherwise fallback to trap-n-emulation + */ +#define KVM_DEV_RISCV_AIA_MODE_EMUL 0 +#define KVM_DEV_RISCV_AIA_MODE_HWACCEL 1 +#define KVM_DEV_RISCV_AIA_MODE_AUTO 2 + +#define KVM_DEV_RISCV_AIA_IDS_MIN 63 +#define KVM_DEV_RISCV_AIA_IDS_MAX 2048 +#define KVM_DEV_RISCV_AIA_SRCS_MAX 1024 +#define KVM_DEV_RISCV_AIA_GROUP_BITS_MAX 8 +#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN 24 +#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX 56 +#define KVM_DEV_RISCV_AIA_HART_BITS_MAX 16 +#define KVM_DEV_RISCV_AIA_GUEST_BITS_MAX 8 + +#define KVM_DEV_RISCV_AIA_GRP_ADDR 1 +#define KVM_DEV_RISCV_AIA_ADDR_APLIC 0 +#define KVM_DEV_RISCV_AIA_ADDR_IMSIC(__vcpu) (1 + (__vcpu)) +#define KVM_DEV_RISCV_AIA_ADDR_MAX \ + (1 + KVM_DEV_RISCV_APLIC_MAX_HARTS) + +#define KVM_DEV_RISCV_AIA_GRP_CTRL 2 +#define KVM_DEV_RISCV_AIA_CTRL_INIT 0 + +/* + * The device attribute type contains the memory mapped offset of the + * APLIC register (range 0x0000-0x3FFF) and it must be 4-byte aligned. + */ +#define KVM_DEV_RISCV_AIA_GRP_APLIC 3 + +/* + * The lower 12-bits of the device attribute type contains the iselect + * value of the IMSIC register (range 0x70-0xFF) whereas the higher order + * bits contains the VCPU id. + */ +#define KVM_DEV_RISCV_AIA_GRP_IMSIC 4 +#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS 12 +#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK \ + ((1U << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) - 1) +#define KVM_DEV_RISCV_AIA_IMSIC_MKATTR(__vcpu, __isel) \ + (((__vcpu) << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) | \ + ((__isel) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK)) +#define KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(__attr) \ + ((__attr) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK) +#define KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(__attr) \ + ((__attr) >> KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) + +/* One single KVM irqchip, ie. the AIA */ +#define KVM_NR_IRQCHIPS 1 #endif diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index 882547f6bd5c..a38268b19c3d 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -10,6 +10,11 @@ #include <linux/types.h> +#define PTRACE_GETFDPIC 33 + +#define PTRACE_GETFDPIC_EXEC 0 +#define PTRACE_GETFDPIC_INTERP 1 + /* * User-mode register state for core dumps, ptrace, sigcontext * @@ -71,12 +76,57 @@ struct __riscv_q_ext_state { __u32 reserved[3]; }; +struct __riscv_ctx_hdr { + __u32 magic; + __u32 size; +}; + +struct __riscv_extra_ext_header { + __u32 __padding[129] __attribute__((aligned(16))); + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved; + struct __riscv_ctx_hdr hdr; +}; + union __riscv_fp_state { struct __riscv_f_ext_state f; struct __riscv_d_ext_state d; struct __riscv_q_ext_state q; }; +struct __riscv_v_ext_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + void *datap; + /* + * In signal handler, datap will be set a correct user stack offset + * and vector registers will be copied to the address of datap + * pointer. + */ +}; + +struct __riscv_v_regset_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + char vreg[]; +}; + +/* + * According to spec: The number of bits in a single vector register, + * VLEN >= ELEN, which must be a power of 2, and must be no greater than + * 2^16 = 65536bits = 8192bytes + */ +#define RISCV_MAX_VLENB (8192) + #endif /* __ASSEMBLY__ */ #endif /* _UAPI_ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h new file mode 100644 index 000000000000..66b13a522880 --- /dev/null +++ b/arch/riscv/include/uapi/asm/setup.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_RISCV_SETUP_H +#define _UAPI_ASM_RISCV_SETUP_H + +#define COMMAND_LINE_SIZE 1024 + +#endif /* _UAPI_ASM_RISCV_SETUP_H */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h index 84f2dfcfdbce..cd4f175dc837 100644 --- a/arch/riscv/include/uapi/asm/sigcontext.h +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -8,15 +8,33 @@ #include <asm/ptrace.h> +/* The Magic number for signal context frame header. */ +#define RISCV_V_MAGIC 0x53465457 +#define END_MAGIC 0x0 + +/* The size of END signal context header. */ +#define END_HDR_SIZE 0x0 + +#ifndef __ASSEMBLY__ + +struct __sc_riscv_v_state { + struct __riscv_v_ext_state v_state; +} __attribute__((aligned(16))); + /* * Signal context structure * * This contains the context saved before a signal handler is invoked; - * it is restored by sys_sigreturn / sys_rt_sigreturn. + * it is restored by sys_rt_sigreturn. */ struct sigcontext { struct user_regs_struct sc_regs; - union __riscv_fp_state sc_fpregs; + union { + union __riscv_fp_state sc_fpregs; + struct __riscv_extra_ext_header sc_extdesc; + }; }; +#endif /*!__ASSEMBLY__*/ + #endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */ diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h index 44eb993950e5..516bd0bb0da5 100644 --- a/arch/riscv/include/uapi/asm/ucontext.h +++ b/arch/riscv/include/uapi/asm/ucontext.h @@ -15,19 +15,23 @@ struct ucontext { struct ucontext *uc_link; stack_t uc_stack; sigset_t uc_sigmask; - /* There's some padding here to allow sigset_t to be expanded in the + /* + * There's some padding here to allow sigset_t to be expanded in the * future. Though this is unlikely, other architectures put uc_sigmask * at the end of this structure and explicitly state it can be - * expanded, so we didn't want to box ourselves in here. */ + * expanded, so we didn't want to box ourselves in here. + */ __u8 __unused[1024 / 8 - sizeof(sigset_t)]; - /* We can't put uc_sigmask at the end of this structure because we need + /* + * We can't put uc_sigmask at the end of this structure because we need * to be able to expand sigcontext in the future. For example, the * vector ISA extension will almost certainly add ISA state. We want * to ensure all user-visible ISA state can be saved and restored via a * ucontext, so we're putting this at the end in order to allow for * infinite extensibility. Since we know this will be extended and we * assume sigset_t won't be extended an extreme amount, we're - * prioritizing this. */ + * prioritizing this. + */ struct sigcontext uc_mcontext; }; diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h index 73d7cdd2ec49..81896bbbf727 100644 --- a/arch/riscv/include/uapi/asm/unistd.h +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -14,32 +14,10 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ +#include <asm/bitsperlong.h> -#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) -#define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_SET_GET_RLIMIT -#endif /* __LP64__ */ - -#define __ARCH_WANT_SYS_CLONE3 -#define __ARCH_WANT_MEMFD_SECRET - -#include <asm-generic/unistd.h> - -/* - * Allows the instruction cache to be flushed from userspace. Despite RISC-V - * having a direct 'fence.i' instruction available to userspace (which we - * can't trap!), that's not actually viable when running on Linux because the - * kernel might schedule a process on another hart. There is no way for - * userspace to handle this without invoking the kernel (as it doesn't know the - * thread->hart mappings), so we've defined a RISC-V specific system call to - * flush the instruction cache. - * - * __NR_riscv_flush_icache is defined to flush the instruction cache over an - * address range, with the flush applying to either all threads or just the - * caller. We don't currently do anything with the address range, that's just - * in there for forwards compatibility. - */ -#ifndef __NR_riscv_flush_icache -#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) +#if __BITS_PER_LONG == 64 +#include <asm/unistd_64.h> +#else +#include <asm/unistd_32.h> #endif -__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) diff --git a/arch/riscv/include/uapi/asm/vendor/sifive.h b/arch/riscv/include/uapi/asm/vendor/sifive.h new file mode 100644 index 000000000000..9f3278a4b298 --- /dev/null +++ b/arch/riscv/include/uapi/asm/vendor/sifive.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#define RISCV_HWPROBE_VENDOR_EXT_XSFVQMACCDOD (1 << 0) +#define RISCV_HWPROBE_VENDOR_EXT_XSFVQMACCQOQ (1 << 1) +#define RISCV_HWPROBE_VENDOR_EXT_XSFVFNRCLIPXFQF (1 << 2) +#define RISCV_HWPROBE_VENDOR_EXT_XSFVFWMACCQQQ (1 << 3) diff --git a/arch/riscv/include/uapi/asm/vendor/thead.h b/arch/riscv/include/uapi/asm/vendor/thead.h new file mode 100644 index 000000000000..43790ebe5faf --- /dev/null +++ b/arch/riscv/include/uapi/asm/vendor/thead.h @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#define RISCV_HWPROBE_VENDOR_EXT_XTHEADVECTOR (1 << 0) |