From 3e29ead500137fe0733eecb901707f986aaf3e30 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 May 2019 12:57:54 +0100 Subject: arm64: Remove useless message during oops During an oops, we print the name of the current task and its pid twice. We also helpfully advertise its stack limit as "0x(____ptrval____)". Drop these useless messages. Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ade32046f3fe..e6be1a6efc0a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -168,7 +168,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) static int __die(const char *str, int err, struct pt_regs *regs) { - struct task_struct *tsk = current; static int die_counter; int ret; @@ -181,9 +180,6 @@ static int __die(const char *str, int err, struct pt_regs *regs) return ret; print_modules(); - pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), - end_of_stack(tsk)); show_regs(regs); if (!user_mode(regs)) -- cgit v1.2.3-59-g8ed1b From 969f5ea627570e91c9d54403287ee3ed657f58fe Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Apr 2019 13:03:57 +0100 Subject: arm64: errata: Add workaround for Cortex-A76 erratum #1463225 Revisions of the Cortex-A76 CPU prior to r4p0 are affected by an erratum that can prevent interrupts from being taken when single-stepping. This patch implements a software workaround to prevent userspace from effectively being able to disable interrupts. Cc: Cc: Marc Zyngier Cc: Catalin Marinas Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/Kconfig | 18 ++++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++++++++++++ arch/arm64/kernel/syscall.c | 31 +++++++++++++++++++++++++++++++ arch/arm64/mm/fault.c | 33 +++++++++++++++++++++++++++++++++ 6 files changed, 109 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 68d9b74fd751..b29a32805ad0 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -62,6 +62,7 @@ stable kernels. | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 | | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 | | ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 | +| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4780eb7af842..5d99f492869b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -520,6 +520,24 @@ config ARM64_ERRATUM_1286807 If unsure, say Y. +config ARM64_ERRATUM_1463225 + bool "Cortex-A76: Software Step might prevent interrupt recognition" + default y + help + This option adds a workaround for Arm Cortex-A76 erratum 1463225. + + On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping + of a system call instruction (SVC) can prevent recognition of + subsequent interrupts when software stepping is disabled in the + exception handler of the system call and either kernel debugging + is enabled or VHE is in use. + + Work around the erratum by triggering a dummy step exception + when handling a system call from a task that is being stepped + in a VHE configuration of the kernel. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index defdc67d9ab4..73faee64e498 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -62,7 +62,8 @@ #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 #define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_DCPODP 43 +#define ARM64_WORKAROUND_1463225 44 -#define ARM64_NCAPS 44 +#define ARM64_NCAPS 45 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e88d4e7bdfc7..ac6432bfc1e4 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = { {}, }; +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static bool +has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + /* Cortex-A76 r0p0 - r3p1 */ + struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); +} +#endif + static void __maybe_unused cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) { @@ -823,6 +839,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_1165522, ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1463225 + { + .desc = "ARM erratum 1463225", + .capability = ARM64_WORKAROUND_1463225, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_cortex_a76_erratum_1463225, + }, #endif { } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 5610ac01c1ec..871c739f060a 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags) int syscall_trace_enter(struct pt_regs *regs); void syscall_trace_exit(struct pt_regs *regs); +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static void cortex_a76_erratum_1463225_svc_handler(void) +{ + u32 reg, val; + + if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) + return; + + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) + return; + + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); + reg = read_sysreg(mdscr_el1); + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + write_sysreg(val, mdscr_el1); + asm volatile("msr daifclr, #8"); + isb(); + + /* We will have taken a single-step exception by this point */ + + write_sysreg(reg, mdscr_el1); + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); +} +#else +static void cortex_a76_erratum_1463225_svc_handler(void) { } +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, const syscall_fn_t syscall_table[]) { @@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, regs->orig_x0 = regs->regs[0]; regs->syscallno = scno; + cortex_a76_erratum_1463225_svc_handler(); local_daif_restore(DAIF_PROCCTX); user_exit(); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0cb0e09995e1..9a84a4071561 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -810,6 +810,36 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (user_mode(regs)) + return 0; + + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return 0; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return 1; +} +#else +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs) @@ -817,6 +847,9 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, const struct fault_info *inf = esr_to_debug_fault_info(esr); unsigned long pc = instruction_pointer(regs); + if (cortex_a76_erratum_1463225_debug_handler(regs)) + return; + /* * Tell lockdep we disabled irqs in entry.S. Do nothing if they were * already disabled to preserve the last enabled/disabled addresses. -- cgit v1.2.3-59-g8ed1b From b2eed9b58811283d00fa861944cb75797d4e52a7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 23 May 2019 10:17:37 +0100 Subject: arm64/kernel: kaslr: reduce module randomization range to 2 GB The following commit 7290d5809571 ("module: use relative references for __ksymtab entries") updated the ksymtab handling of some KASLR capable architectures so that ksymtab entries are emitted as pairs of 32-bit relative references. This reduces the size of the entries, but more importantly, it gets rid of statically assigned absolute addresses, which require fixing up at boot time if the kernel is self relocating (which takes a 24 byte RELA entry for each member of the ksymtab struct). Since ksymtab entries are always part of the same module as the symbol they export, it was assumed at the time that a 32-bit relative reference is always sufficient to capture the offset between a ksymtab entry and its target symbol. Unfortunately, this is not always true: in the case of per-CPU variables, a per-CPU variable's base address (which usually differs from the actual address of any of its per-CPU copies) is allocated in the vicinity of the ..data.percpu section in the core kernel (i.e., in the per-CPU reserved region which follows the section containing the core kernel's statically allocated per-CPU variables). Since we randomize the module space over a 4 GB window covering the core kernel (based on the -/+ 4 GB range of an ADRP/ADD pair), we may end up putting the core kernel out of the -/+ 2 GB range of 32-bit relative references of module ksymtab entries that refer to per-CPU variables. So reduce the module randomization range a bit further. We lose 1 bit of randomization this way, but this is something we can tolerate. Cc: # v4.19+ Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/kaslr.c | 6 +++--- arch/arm64/kernel/module.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b09b6f75f759..06941c1fe418 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys) if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { /* - * Randomize the module region over a 4 GB window covering the + * Randomize the module region over a 2 GB window covering the * kernel. This reduces the risk of modules leaking information * about the address of the kernel itself, but results in * branches between modules and the core kernel that are * resolved via PLTs. (Branches between modules will be * resolved normally.) */ - module_range = SZ_4G - (u64)(_end - _stext); - module_alloc_base = max((u64)_end + offset - SZ_4G, + module_range = SZ_2G - (u64)(_end - _stext); + module_alloc_base = max((u64)_end + offset - SZ_2G, (u64)MODULES_VADDR); } else { /* diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f713e2fc4d75..1e418e69b58c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -56,7 +56,7 @@ void *module_alloc(unsigned long size) * can simply omit this fallback in that case. */ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + SZ_4G, GFP_KERNEL, + module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); -- cgit v1.2.3-59-g8ed1b From 1cf24a2cc3fd40942b0f9e6199aaec579e89a832 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 23 May 2019 11:38:54 +0100 Subject: arm64/module: deal with ambiguity in PRELxx relocation ranges The R_AARCH64_PREL16 and R_AARCH64_PREL32 relocations are documented as permitting a range of [-2^15 .. 2^16), resp. [-2^31 .. 2^32). It is also documented that this means we cannot detect overflow in some cases, which is bad. Since we always interpret the targets of these relocations as signed quantities (e.g., in the ksymtab handling code), let's tighten the overflow checks so that targets that are out of range for our signed interpretation of the relocated quantity get flagged. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/module.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1e418e69b58c..f32359cffb01 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -96,15 +96,27 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { s64 sval = do_reloc(op, place, val); + /* + * The ELF psABI for AArch64 documents the 16-bit and 32-bit place + * relative relocations as having a range of [-2^15, 2^16) or + * [-2^31, 2^32), respectively. However, in order to be able to detect + * overflows reliably, we have to choose whether we interpret such + * quantities as signed or as unsigned, and stick with it. + * The way we organize our address space requires a signed + * interpretation of 32-bit relative references, so let's use that + * for all R_AARCH64_PRELxx relocations. This means our upper + * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. + */ + switch (len) { case 16: *(s16 *)place = sval; - if (sval < S16_MIN || sval > U16_MAX) + if (sval < S16_MIN || sval > S16_MAX) return -ERANGE; break; case 32: *(s32 *)place = sval; - if (sval < S32_MIN || sval > U32_MAX) + if (sval < S32_MIN || sval > S32_MAX) return -ERANGE; break; case 64: -- cgit v1.2.3-59-g8ed1b From a5325089bd05a7b0259cc4038479d36308edbda2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 23 May 2019 11:24:50 +0100 Subject: arm64: Handle erratum 1418040 as a superset of erratum 1188873 We already mitigate erratum 1188873 affecting Cortex-A76 and Neoverse-N1 r0p0 to r2p0. It turns out that revisions r0p0 to r3p1 of the same cores are affected by erratum 1418040, which has the same workaround as 1188873. Let's expand the range of affected revisions to match 1418040, and repaint all occurences of 1188873 to 1418040. Whilst we're there, do a bit of reformating in silicon-errata.txt and drop a now unnecessary dependency on ARM_ARCH_TIMER_OOL_WORKAROUND. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.txt | 8 ++++---- arch/arm64/Kconfig | 7 +++---- arch/arm64/include/asm/cpucaps.h | 2 +- arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++---------- arch/arm64/kernel/entry.S | 4 ++-- 5 files changed, 24 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index b29a32805ad0..2735462d5958 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -58,14 +58,14 @@ stable kernels. | ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | -| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | +| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 | | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 | | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 | -| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 | | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | -| ARM | MMU-500 | #841119,#826419 | N/A | +| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +| ARM | MMU-500 | #841119,826419 | N/A | | | | | | -| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | +| Cavium | ThunderX ITS | #22375,24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5d99f492869b..6a9544606da3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -475,16 +475,15 @@ config ARM64_ERRATUM_1024718 If unsure, say Y. -config ARM64_ERRATUM_1188873 +config ARM64_ERRATUM_1418040 bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" default y depends on COMPAT - select ARM_ARCH_TIMER_OOL_WORKAROUND help This option adds a workaround for ARM Cortex-A76/Neoverse-N1 - erratum 1188873. + errata 1188873 and 1418040. - Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could + Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could cause register corruption when accessing the timer registers from AArch32 userspace. diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 73faee64e498..33401ebc187c 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -53,7 +53,7 @@ #define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_CRC32 33 #define ARM64_SSBS 34 -#define ARM64_WORKAROUND_1188873 35 +#define ARM64_WORKAROUND_1418040 35 #define ARM64_HAS_SB 36 #define ARM64_WORKAROUND_1165522 37 #define ARM64_HAS_ADDRESS_AUTH_ARCH 38 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ac6432bfc1e4..d61beedba101 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -698,12 +698,16 @@ static const struct midr_range workaround_clean_cache[] = { }; #endif -#ifdef CONFIG_ARM64_ERRATUM_1188873 -static const struct midr_range erratum_1188873_list[] = { - /* Cortex-A76 r0p0 to r2p0 */ - MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), - /* Neoverse-N1 r0p0 to r2p0 */ - MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0), +#ifdef CONFIG_ARM64_ERRATUM_1418040 +/* + * - 1188873 affects r0p0 to r2p0 + * - 1418040 affects r0p0 to r3p1 + */ +static const struct midr_range erratum_1418040_list[] = { + /* Cortex-A76 r0p0 to r3p1 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), + /* Neoverse-N1 r0p0 to r3p1 */ + MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), {}, }; #endif @@ -825,11 +829,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .matches = has_ssbd_mitigation, .midr_range_list = arm64_ssb_cpus, }, -#ifdef CONFIG_ARM64_ERRATUM_1188873 +#ifdef CONFIG_ARM64_ERRATUM_1418040 { - .desc = "ARM erratum 1188873", - .capability = ARM64_WORKAROUND_1188873, - ERRATA_MIDR_RANGE_LIST(erratum_1188873_list), + .desc = "ARM erratum 1418040", + .capability = ARM64_WORKAROUND_1418040, + ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), }, #endif #ifdef CONFIG_ARM64_ERRATUM_1165522 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 1a7811b7e3c4..cd0c7af8e4a8 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -336,8 +336,8 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: -#ifdef CONFIG_ARM64_ERRATUM_1188873 -alternative_if_not ARM64_WORKAROUND_1188873 +#ifdef CONFIG_ARM64_ERRATUM_1418040 +alternative_if_not ARM64_WORKAROUND_1418040 b 4f alternative_else_nop_endif /* -- cgit v1.2.3-59-g8ed1b From 96a13f57b946be7a6c10405e4bd780c0b6b6fe63 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 24 May 2019 14:15:34 +0100 Subject: arm64: Kconfig: Make ARM64_PSEUDO_NMI depend on BROKEN for now Although we merged support for pseudo-nmi using interrupt priority masking in 5.1, we've since uncovered a number of non-trivial issues with the implementation. Although there are patches pending to address these problems, we're facing issues that prevent us from merging them at this current time: https://lkml.kernel.org/r/1556553607-46531-1-git-send-email-julien.thierry@arm.com For now, simply mark this optional feature as BROKEN in the hope that we can fix things properly in the near future. Cc: # 5.1 Cc: Julien Thierry Acked-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6a9544606da3..f6275c265d41 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1422,6 +1422,7 @@ config ARM64_MODULE_PLTS config ARM64_PSEUDO_NMI bool "Support for NMI-like interrupts" + depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@arm.com select CONFIG_ARM_GIC_V3 help Adds support for mimicking Non-Maskable Interrupts through the use of -- cgit v1.2.3-59-g8ed1b From c5e2edeb01ae9ffbdde95bdcdb6d3614ba1eb195 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 24 May 2019 13:52:19 +0100 Subject: arm64: insn: Fix ldadd instruction encoding GCC 8.1.0 reports that the ldadd instruction encoding, recently added to insn.c, doesn't match the mask and couldn't possibly be identified: linux/arch/arm64/include/asm/insn.h: In function 'aarch64_insn_is_ldadd': linux/arch/arm64/include/asm/insn.h:280:257: warning: bitwise comparison always evaluates to false [-Wtautological-compare] Bits [31:30] normally encode the size of the instruction (1 to 8 bytes) and the current instruction value only encodes the 4- and 8-byte variants. At the moment only the BPF JIT needs this instruction, and doesn't require the 1- and 2-byte variants, but to be consistent with our other ldr and str instruction encodings, clear the size field in the insn value. Fixes: 34b8ab091f9ef57a ("bpf, arm64: use more scalable stadd over ldxr / stxr loop in xadd") Acked-by: Daniel Borkmann Reported-by: Kuninori Morimoto Signed-off-by: Yoshihiro Shimoda Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index ec894de0ed4e..f71b84d9f294 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -277,7 +277,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) -__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0xB8200000) +__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) -- cgit v1.2.3-59-g8ed1b From edbcf50eb8aea5f81ae6d83bb969cb0bc02805a1 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 24 May 2019 13:52:20 +0100 Subject: arm64: insn: Add BUILD_BUG_ON() for invalid masks Detect invalid instruction masks at build time. Some versions of GCC can warn about the situation, but not all of them, it seems. Suggested-by: Will Deacon Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f71b84d9f294..87fdfba13a30 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -18,6 +18,7 @@ */ #ifndef __ASM_INSN_H #define __ASM_INSN_H +#include #include /* A64 instructions are always 32 bits. */ @@ -266,11 +267,16 @@ enum aarch64_insn_adr_type { AARCH64_INSN_ADR_TYPE_ADR, }; -#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ -static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ -{ return (code & (mask)) == (val); } \ -static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ -{ return (val); } +#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ +static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ +{ \ + return (val); \ +} __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000) __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) -- cgit v1.2.3-59-g8ed1b