From 3c9c1dcde7c3a6c6203686ccd620620ae5b2a905 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 31 Dec 2019 01:54:57 -0800 Subject: arm64: Kconfig: Remove CONFIG_ prefix from ARM64_PSEUDO_NMI section Remove the CONFIG_ prefix from the select statement for ARM_GIC_V3. Acked-by: Catalin Marinas Signed-off-by: Joe Perches Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..e9b1fc22f72e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1544,7 +1544,7 @@ config ARM64_MODULE_PLTS config ARM64_PSEUDO_NMI bool "Support for NMI-like interrupts" - select CONFIG_ARM_GIC_V3 + select ARM_GIC_V3 help Adds support for mimicking Non-Maskable Interrupts through the use of GIC interrupt priority. This support requires version 3 or later of -- cgit v1.2.3-59-g8ed1b From 395af861377d14616c221831430f58e5786b92f1 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 15 Jan 2020 11:30:08 +0000 Subject: arm64: Move the LSE gas support detection to Kconfig As the Kconfig syntax gained support for $(as-instr) tests, move the LSE gas support detection from Makefile to the main arm64 Kconfig and remove the additional CONFIG_AS_LSE definition and check. Cc: Will Deacon Reviewed-by: Vladimir Murzin Tested-by: Vladimir Murzin Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 5 +++++ arch/arm64/Makefile | 11 ++++------- arch/arm64/include/asm/atomic_ll_sc.h | 2 +- arch/arm64/include/asm/lse.h | 6 +++--- arch/arm64/kernel/cpufeature.c | 4 ++-- 5 files changed, 15 insertions(+), 13 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..cf3b6d2a67cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1363,6 +1363,11 @@ config ARM64_PAN instruction if the cpu does not implement the feature. config ARM64_LSE_ATOMICS + bool + default ARM64_USE_LSE_ATOMICS + depends on $(as-instr,.arch_extension lse) + +config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" depends on JUMP_LABEL default y diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 1fbe24d4fdb6..6dd8ecacc428 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -30,11 +30,8 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif endif -# Check for binutils support for specific extensions -lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1) - -ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y) - ifeq ($(lseinstr),) +ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) + ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) $(warning LSE atomics not supported by binutils) endif endif @@ -53,11 +50,11 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \ +KBUILD_CFLAGS += -mgeneral-regs-only $(brokengasinst) \ $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) -KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso) +KBUILD_AFLAGS += $(brokengasinst) $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 7b012148bfd6..13869b76b58c 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -12,7 +12,7 @@ #include -#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE) +#ifdef CONFIG_ARM64_LSE_ATOMICS #define __LL_SC_FALLBACK(asm_ops) \ " b 3f\n" \ " .subsection 1\n" \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..4e1009fff686 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -4,7 +4,7 @@ #include -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#ifdef CONFIG_ARM64_LSE_ATOMICS #include #include @@ -36,7 +36,7 @@ static inline bool system_uses_lse_atomics(void) #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) -#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#else /* CONFIG_ARM64_LSE_ATOMICS */ static inline bool system_uses_lse_atomics(void) { return false; } @@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; } #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ #endif /* __ASM_LSE_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..2595c2886d3f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1291,7 +1291,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_pan, }, #endif /* CONFIG_ARM64_PAN */ -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#ifdef CONFIG_ARM64_LSE_ATOMICS { .desc = "LSE atomic instructions", .capability = ARM64_HAS_LSE_ATOMICS, @@ -1302,7 +1302,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 2, }, -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ { .desc = "Software prefetching using PRFM", .capability = ARM64_HAS_NO_HW_PREFETCH, -- cgit v1.2.3-59-g8ed1b From 3e6c69a058deaa50d33c3dac36cde80b4ce590e8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:14 +0000 Subject: arm64: Add initial support for E0PD Kernel Page Table Isolation (KPTI) is used to mitigate some speculation based security issues by ensuring that the kernel is not mapped when userspace is running but this approach is expensive and is incompatible with SPE. E0PD, introduced in the ARMv8.5 extensions, provides an alternative to this which ensures that accesses from userspace to the kernel's half of the memory map to always fault with constant time, preventing timing attacks without requiring constant unmapping and remapping or preventing legitimate accesses. Currently this feature will only be enabled if all CPUs in the system support E0PD, if some CPUs do not support the feature at boot time then the feature will not be enabled and in the unlikely event that a late CPU is the first CPU to lack the feature then we will reject that CPU. This initial patch does not yet integrate with KPTI, this will be dealt with in followup patches. Ideally we could ensure that by default we don't use KPTI on CPUs where E0PD is present. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose [will: Fixed typo in Kconfig text] Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 16 ++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 22 ++++++++++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..9cee2008ea9e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1484,6 +1484,22 @@ config ARM64_PTR_AUTH endmenu +menu "ARMv8.5 architectural features" + +config ARM64_E0PD + bool "Enable support for E0PD" + default y + help + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. + + This option enables E0PD for TTBR1 where available. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b92683871119..33ff25c1ab1b 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_1542419 47 #define ARM64_WORKAROUND_1319367 48 +#define ARM64_HAS_E0PD 49 -#define ARM64_NCAPS 49 +#define ARM64_NCAPS 50 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d9fbd433cc17..378566f4882e 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -292,6 +292,8 @@ #define TCR_HD (UL(1) << 40) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) +#define TCR_E0PD0 (UL(1) << 55) +#define TCR_E0PD1 (UL(1) << 56) /* * TTBR. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..b085258cfe4e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -655,6 +655,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..9d578e720168 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -225,6 +225,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), @@ -1251,6 +1252,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_E0PD +static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) +{ + if (this_cpu_has_cap(ARM64_HAS_E0PD)) + sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); +} +#endif /* CONFIG_ARM64_E0PD */ + #ifdef CONFIG_ARM64_PSEUDO_NMI static bool enable_pseudo_nmi; @@ -1566,6 +1575,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARM64_E0PD + { + .desc = "E0PD", + .capability = ARM64_HAS_E0PD, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_E0PD_SHIFT, + .matches = has_cpuid_feature, + .min_field_value = 1, + .cpu_enable = cpu_enable_e0pd, + }, #endif {}, }; -- cgit v1.2.3-59-g8ed1b From 8bf9284d99dcb1c8fbdfabde1979350cf41fa5f5 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 15 Jan 2020 14:18:25 +0000 Subject: arm64: Turn "broken gas inst" into real config option Use the new 'as-instr' Kconfig macro to define CONFIG_BROKEN_GAS_INST directly, making it available everywhere. Signed-off-by: Vladimir Murzin [will: Drop redundant 'y if' logic] Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 3 +++ arch/arm64/Makefile | 10 +++------- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cf3b6d2a67cf..8a35f776c7f1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -301,6 +301,9 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y +config BROKEN_GAS_INST + def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) + config KASAN_SHADOW_OFFSET hex depends on KASAN diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 6dd8ecacc428..dca1a97751ab 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -42,19 +42,15 @@ cc_has_k_constraint := $(call try-run,echo \ return 0; \ }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1) -ifeq ($(CONFIG_ARM64), y) -brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1) - - ifneq ($(brokengasinst),) +ifeq ($(CONFIG_BROKEN_GAS_INST),y) $(warning Detected assembler with broken .inst; disassembly will be unreliable) - endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(brokengasinst) \ +KBUILD_CFLAGS += -mgeneral-regs-only \ $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) -KBUILD_AFLAGS += $(brokengasinst) $(compat_vdso) +KBUILD_AFLAGS += $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) -- cgit v1.2.3-59-g8ed1b From 98346023365931948b78436ae761544b09035b3b Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 20 Jan 2020 10:36:02 +0000 Subject: arm64: Kconfig: select HAVE_FUTEX_CMPXCHG arm64 provides always working implementation of futex_atomic_cmpxchg_inatomic(), so there is no need to check it runtime. Reported-by: Piyush swami Signed-off-by: Vladimir Murzin Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e9b1fc22f72e..6c27b8126817 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -161,6 +161,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_RCU_TABLE_FREE select HAVE_RSEQ select HAVE_STACKPROTECTOR -- cgit v1.2.3-59-g8ed1b From e717d93b1c3f5e263c6c43b6ff0835a1279cb6fc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Jan 2020 11:23:54 +0000 Subject: arm64: kconfig: Fix alignment of E0PD help text Remove the additional space. Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9cee2008ea9e..aca103ef52b4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1490,13 +1490,13 @@ config ARM64_E0PD bool "Enable support for E0PD" default y help - E0PD (part of the ARMv8.5 extensions) allows us to ensure - that EL0 accesses made via TTBR1 always fault in constant time, - providing similar benefits to KASLR as those provided by KPTI, but - with lower overhead and without disrupting legitimate access to - kernel memory such as SPE. + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. - This option enables E0PD for TTBR1 where available. + This option enables E0PD for TTBR1 where available. endmenu -- cgit v1.2.3-59-g8ed1b