From b36506787cf19a3eb1116d5dcdef2e2ad5b2933a Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Fri, 18 Jan 2019 14:02:27 +0000 Subject: arm64: perf: remove misleading comment The comment for the armv8pmu_set_event_filter function suggests that it only works for PMUv2 PMUs - this is incorrect. Let's remove the incorrect comment. Acked-by: Mark Rutland Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1620a371b1f5..4addb38bc250 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -810,7 +810,7 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, } /* - * Add an event filter to a given event. This will only work for PMUv2 PMUs. + * Add an event filter to a given event. */ static int armv8pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) -- cgit v1.2.3-59-g8ed1b From 846a415bf4408ccd38d7c0b2a036249737a5ee56 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Jan 2019 11:41:25 +0000 Subject: arm64: default NR_CPUS to 256 There are shipping arm64 platforms with 256 hardware threads. So that we can make use of these with defconfig, bump the arm64 default NR_CPUS to 256. At the same time, drop a redundant comment. We only have one default for NR_CPUS, so there's nothing to sort. Acked-by: Will Deacon Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a4168d366127..4cad67b9ec0a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -792,8 +792,7 @@ config SCHED_SMT config NR_CPUS int "Maximum number of CPUs (2-4096)" range 2 4096 - # These have to remain sorted largest to smallest - default "64" + default "256" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" -- cgit v1.2.3-59-g8ed1b From 83a680dd97ad2d1ed7a6355aa5baddbc0c8ae2ae Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Fri, 18 Jan 2019 13:52:42 +0000 Subject: arm64: asm-offsets: remove unused offsets There are a number of offsets defined in asm-offsets.c which no longer have any users. Let's clean this up by removing them. All the remaining offsets are in use. Acked-by: Mark Rutland Signed-off-by: Andrew Murray Signed-off-by: Catalin Marinas --- arch/arm64/kernel/asm-offsets.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 65b8afc84466..0552b91d7666 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -53,13 +53,9 @@ int main(void) DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); - DEFINE(S_X1, offsetof(struct pt_regs, regs[1])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); - DEFINE(S_X3, offsetof(struct pt_regs, regs[3])); DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); - DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); - DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); @@ -73,12 +69,8 @@ int main(void) DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); -#ifdef CONFIG_COMPAT - DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); -#endif DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); - DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); @@ -93,7 +85,6 @@ int main(void) BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); - DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); @@ -110,25 +101,18 @@ int main(void) BLANK(); DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec)); - DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec)); DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); - DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); - DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult)); - DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult)); DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); BLANK(); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); @@ -142,13 +126,9 @@ int main(void) DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); - DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); - DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); - DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif #ifdef CONFIG_CPU_PM - DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); -- cgit v1.2.3-59-g8ed1b From 83504032e6ddcc8b0942aa24dfad5db849090c9f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Jan 2019 14:22:24 +0000 Subject: arm64: Remove asm/memblock.h The arm64 asm/memblock.h header exists only to provide a function prototype for arm64_memblock_init(), which is called only from setup_arch(). Move the declaration into mmu.h, where it can live alongside other init functions such as paging_init() and bootmem_init() without the need for its own special header file. Acked-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memblock.h | 21 --------------------- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/kernel/setup.c | 1 - arch/arm64/mm/mmu.c | 1 - 4 files changed, 1 insertion(+), 23 deletions(-) delete mode 100644 arch/arm64/include/asm/memblock.h (limited to 'arch') diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h deleted file mode 100644 index 6afeed2467f1..000000000000 --- a/arch/arm64/include/asm/memblock.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_MEMBLOCK_H -#define __ASM_MEMBLOCK_H - -extern void arm64_memblock_init(void); - -#endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3e8063f4f9d3..67ef25d037ea 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -129,6 +129,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) static inline void arm64_apply_bp_hardening(void) { } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ +extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..71f5fbb12608 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b6f5aa52ac67..d6b6f1b169bb 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3-59-g8ed1b From a2c801c53d1682871fba1e037c9d3b0c9fffee8a Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 9 Jan 2019 13:21:00 -0700 Subject: arm64: mm: make use of new memblocks_present() helper Cleanup the arm64_memory_present() function seeing it's very similar to other arches. memblocks_present() is a direct replacement of arm64_memory_present() Acked-by: Will Deacon Acked-by: Catalin Marinas Signed-off-by: Logan Gunthorpe Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7205a9085b4d..2302b4093a63 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -285,24 +285,6 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -#ifndef CONFIG_SPARSEMEM -static void __init arm64_memory_present(void) -{ -} -#else -static void __init arm64_memory_present(void) -{ - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - int nid = memblock_get_region_node(reg); - - memory_present(nid, memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } -} -#endif - static phys_addr_t memory_limit = PHYS_ADDR_MAX; /* @@ -489,7 +471,7 @@ void __init bootmem_init(void) * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. */ - arm64_memory_present(); + memblocks_present(); sparse_init(); zone_sizes_init(min, max); -- cgit v1.2.3-59-g8ed1b From 13e4cdd785867a632516f9cb5dfbe5ba20822820 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 15 Jan 2019 13:58:26 +0000 Subject: arm64: uaccess: Cleanup get/put_user() __get/put_user_check() macro is made to return a value but this is never used. Get rid of them and just use directly __get/put_user_error() as a statement, reducing macro indirection. Also, take this opportunity to rename __get/put_user_err() as it gets a bit confusing having them along __get/put_user_error(). Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 547d7a0c9d05..8e408084b8c3 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -268,7 +268,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __get_user_err(x, ptr, err) \ +#define __raw_get_user(x, ptr, err) \ do { \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ @@ -297,28 +297,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_check(x, ptr, err) \ -({ \ +#define __get_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __get_user_err((x), __p, (err)); \ + __raw_get_user((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ -}) - -#define __get_user_error(x, ptr, err) \ -({ \ - __get_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __get_user(x, ptr) \ ({ \ int __gu_err = 0; \ - __get_user_check((x), (ptr), __gu_err); \ + __get_user_error((x), (ptr), __gu_err); \ __gu_err; \ }) @@ -338,7 +332,7 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __put_user_err(x, ptr, err) \ +#define __raw_put_user(x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ @@ -366,28 +360,22 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user_check(x, ptr, err) \ -({ \ +#define __put_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __put_user_err((x), __p, (err)); \ + __raw_put_user((x), __p, (err)); \ } else { \ (err) = -EFAULT; \ } \ -}) - -#define __put_user_error(x, ptr, err) \ -({ \ - __put_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __put_user(x, ptr) \ ({ \ int __pu_err = 0; \ - __put_user_check((x), (ptr), __pu_err); \ + __put_user_error((x), (ptr), __pu_err); \ __pu_err; \ }) -- cgit v1.2.3-59-g8ed1b From 0bd3ef34d2a8dd4056560567073d8bfc5da92e39 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 15 Jan 2019 13:58:27 +0000 Subject: arm64: uaccess: Implement unsafe accessors Current implementation of get/put_user_unsafe default to get/put_user which toggle PAN before each access, despite having been told by the caller that multiple accesses to user memory were about to happen. Provide implementations for user_access_begin/end to turn PAN off/on and implement unsafe accessors that assume PAN was already turned off. Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 79 ++++++++++++++++++++++++++++++---------- 1 file changed, 59 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8e408084b8c3..6a70c75ed9f4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -270,31 +270,26 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) #define __raw_get_user(x, ptr, err) \ do { \ - unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + __get_user_asm("ldrb", "ldtrb", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + __get_user_asm("ldrh", "ldtrh", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + __get_user_asm("ldr", "ldtr", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + __get_user_asm("ldr", "ldtr", "%x", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -302,8 +297,13 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ + unsigned long __gu_val; \ + __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - __raw_get_user((x), __p, (err)); \ + uaccess_enable_not_uao(); \ + __raw_get_user(__gu_val, __p, (err)); \ + uaccess_disable_not_uao(); \ + (x) = (__force __typeof__(*__p)) __gu_val; \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ @@ -334,30 +334,26 @@ do { \ #define __raw_put_user(x, ptr, err) \ do { \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ + __put_user_asm("strb", "sttrb", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ + __put_user_asm("strh", "sttrh", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ + __put_user_asm("str", "sttr", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ + __put_user_asm("str", "sttr", "%x", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ } while (0) #define __put_user_error(x, ptr, err) \ @@ -365,9 +361,13 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - __raw_put_user((x), __p, (err)); \ - } else { \ + uaccess_enable_not_uao(); \ + __raw_put_user(__pu_val, __p, (err)); \ + uaccess_disable_not_uao(); \ + } else { \ (err) = -EFAULT; \ } \ } while (0) @@ -381,6 +381,45 @@ do { \ #define put_user __put_user +static __must_check inline bool user_access_begin(const void __user *ptr, + size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + + uaccess_enable_not_uao(); + return true; +} +#define user_access_begin(ptr, len) user_access_begin(ptr, len) +#define user_access_end() uaccess_disable_not_uao() + +#define unsafe_get_user(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + unsigned long __gu_val; \ + int __gu_err = 0; \ + might_fault(); \ + __chk_user_ptr(__p); \ + __p = uaccess_mask_ptr(__p); \ + __raw_get_user(__gu_val, __p, __gu_err); \ + (x) = (__force __typeof__(*__p)) __gu_val; \ + if (__gu_err != 0) \ + goto err; \ +} while (0) + +#define unsafe_put_user(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + might_fault(); \ + __chk_user_ptr(__p); \ + __p = uaccess_mask_ptr(__p); \ + __raw_put_user(__pu_val, __p, __pu_err); \ + if (__pu_err != 0) \ + goto err; \ +} while (0) + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ -- cgit v1.2.3-59-g8ed1b From e2a2e56e40822ab78e304198387f61314af7d7ce Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 22 Jan 2019 15:41:11 +0100 Subject: arm64: dump: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Cc: Will Deacon Cc: Marc Zyngier Cc: Peng Donglin Cc: Signed-off-by: Greg Kroah-Hartman Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ptdump.h | 9 +++------ arch/arm64/mm/dump.c | 4 ++-- arch/arm64/mm/ptdump_debugfs.c | 7 ++----- 3 files changed, 7 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 6afd8476c60c..9e948a93d26c 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -34,13 +34,10 @@ struct ptdump_info { void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); #ifdef CONFIG_ARM64_PTDUMP_DEBUGFS -int ptdump_debugfs_register(struct ptdump_info *info, const char *name); +void ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else -static inline int ptdump_debugfs_register(struct ptdump_info *info, - const char *name) -{ - return 0; -} +static inline void ptdump_debugfs_register(struct ptdump_info *info, + const char *name) { } #endif void ptdump_check_wx(void); #endif /* CONFIG_ARM64_PTDUMP_CORE */ diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..08c250350b8a 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -407,7 +407,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { ptdump_initialize(); - return ptdump_debugfs_register(&kernel_ptdump_info, - "kernel_page_tables"); + ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); + return 0; } device_initcall(ptdump_init); diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 24d786fc3a4c..064163f25592 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -12,10 +12,7 @@ static int ptdump_show(struct seq_file *m, void *v) } DEFINE_SHOW_ATTRIBUTE(ptdump); -int ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { - struct dentry *pe; - pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); - return pe ? 0 : -ENOMEM; - + debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } -- cgit v1.2.3-59-g8ed1b From d0a060be573bfbf8753a15dca35497db5e968bb0 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 30 Jan 2019 12:02:44 +0000 Subject: arm64: add ptrace regsets for ptrauth key management Add two new ptrace regsets, which can be used to request and change the pointer authentication keys of a thread. NT_ARM_PACA_KEYS gives access to the instruction/data address keys, and NT_ARM_PACG_KEYS to the generic authentication key. The keys are also part of the core dump file of the process. The regsets are only exposed if the kernel is compiled with CONFIG_CHECKPOINT_RESTORE=y, as the only intended use case is checkpointing and restoring processes that are using pointer authentication. (This can be changed later if there are other use cases.) Reviewed-by: Dave Martin Signed-off-by: Kristina Martsenko Signed-off-by: Catalin Marinas --- Documentation/arm64/pointer-authentication.txt | 5 + arch/arm64/include/uapi/asm/ptrace.h | 13 +++ arch/arm64/kernel/ptrace.c | 147 +++++++++++++++++++++++++ include/uapi/linux/elf.h | 2 + 4 files changed, 167 insertions(+) (limited to 'arch') diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt index a25cd21290e9..5baca42ba146 100644 --- a/Documentation/arm64/pointer-authentication.txt +++ b/Documentation/arm64/pointer-authentication.txt @@ -78,6 +78,11 @@ bits can vary between the two. Note that the masks apply to TTBR0 addresses, and are not valid to apply to TTBR1 addresses (e.g. kernel pointers). +Additionally, when CONFIG_CHECKPOINT_RESTORE is also set, the kernel +will expose the NT_ARM_PACA_KEYS and NT_ARM_PACG_KEYS regsets (struct +user_pac_address_keys and struct user_pac_generic_keys). These can be +used to get and set the keys for a thread. + Virtualization -------------- diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 28d77c9ed531..d78623acb649 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -233,6 +233,19 @@ struct user_pac_mask { __u64 insn_mask; }; +/* pointer authentication keys (NT_ARM_PACA_KEYS, NT_ARM_PACG_KEYS) */ + +struct user_pac_address_keys { + __uint128_t apiakey; + __uint128_t apibkey; + __uint128_t apdakey; + __uint128_t apdbkey; +}; + +struct user_pac_generic_keys { + __uint128_t apgakey; +}; + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..a86413be5a2d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -979,6 +979,131 @@ static int pac_mask_get(struct task_struct *target, return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1); } + +#ifdef CONFIG_CHECKPOINT_RESTORE +static __uint128_t pac_key_to_user(const struct ptrauth_key *key) +{ + return (__uint128_t)key->hi << 64 | key->lo; +} + +static struct ptrauth_key pac_key_from_user(__uint128_t ukey) +{ + struct ptrauth_key key = { + .lo = (unsigned long)ukey, + .hi = (unsigned long)(ukey >> 64), + }; + + return key; +} + +static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apiakey = pac_key_to_user(&keys->apia); + ukeys->apibkey = pac_key_to_user(&keys->apib); + ukeys->apdakey = pac_key_to_user(&keys->apda); + ukeys->apdbkey = pac_key_to_user(&keys->apdb); +} + +static void pac_address_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_address_keys *ukeys) +{ + keys->apia = pac_key_from_user(ukeys->apiakey); + keys->apib = pac_key_from_user(ukeys->apibkey); + keys->apda = pac_key_from_user(ukeys->apdakey); + keys->apdb = pac_key_from_user(ukeys->apdbkey); +} + +static int pac_address_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_address_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + int ret; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_address_keys_from_user(keys, &user_keys); + + return 0; +} + +static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apgakey = pac_key_to_user(&keys->apga); +} + +static void pac_generic_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_generic_keys *ukeys) +{ + keys->apga = pac_key_from_user(ukeys->apgakey); +} + +static int pac_generic_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_generic_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + int ret; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_generic_keys_from_user(keys, &user_keys); + + return 0; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ #endif /* CONFIG_ARM64_PTR_AUTH */ enum aarch64_regset { @@ -995,6 +1120,10 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, +#ifdef CONFIG_CHECKPOINT_RESTORE + REGSET_PACA_KEYS, + REGSET_PACG_KEYS, +#endif #endif }; @@ -1074,6 +1203,24 @@ static const struct user_regset aarch64_regsets[] = { .get = pac_mask_get, /* this cannot be set dynamically */ }, +#ifdef CONFIG_CHECKPOINT_RESTORE + [REGSET_PACA_KEYS] = { + .core_note_type = NT_ARM_PACA_KEYS, + .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_address_keys_get, + .set = pac_address_keys_set, + }, + [REGSET_PACG_KEYS] = { + .core_note_type = NT_ARM_PACG_KEYS, + .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_generic_keys_get, + .set = pac_generic_keys_set, + }, +#endif #endif }; diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index e4d6ddd93567..34c02e4290fe 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -421,6 +421,8 @@ typedef struct elf64_shdr { #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ #define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */ +#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */ +#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ -- cgit v1.2.3-59-g8ed1b From 8aa67d18a466f338f79bac99f44e7891f4c71bbb Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 31 Jan 2019 18:23:37 +0000 Subject: arm64: entry: Remove unneeded need_resched() loop Since the enabling and disabling of IRQs within preempt_schedule_irq() is contained in a need_resched() loop, we don't need the outer arch code loop. Reported-by: Julien Thierry Reported-by: Will Deacon Reviewed-by: Julien Thierry Acked-by: Will Deacon Signed-off-by: Valentin Schneider Cc: Mark Rutland Cc: Marc Zyngier Cc: Julien Grall Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 0ec0c46b2c0c..4d0c81f29a60 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -611,7 +611,7 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count cbnz x24, 1f // preempt count != 0 - bl el1_preempt + bl preempt_schedule_irq // irq en/disable is done inside 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS @@ -620,15 +620,6 @@ el1_irq: kernel_exit 1 ENDPROC(el1_irq) -#ifdef CONFIG_PREEMPT -el1_preempt: - mov x24, lr -1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? - ret x24 -#endif - /* * EL0 mode handlers. */ -- cgit v1.2.3-59-g8ed1b From 5870970b9a828d8693aa6d15742573289d7dbcd0 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:39 +0000 Subject: arm64: Fix HCR.TGE status for NMI contexts When using VHE, the host needs to clear HCR_EL2.TGE bit in order to interact with guest TLBs, switching from EL2&0 translation regime to EL1&0. However, some non-maskable asynchronous event could happen while TGE is cleared like SDEI. Because of this address translation operations relying on EL2&0 translation regime could fail (tlb invalidation, userspace access, ...). Fix this by properly setting HCR_EL2.TGE when entering NMI context and clear it if necessary when returning to the interrupted context. Signed-off-by: Julien Thierry Suggested-by: Marc Zyngier Reviewed-by: Marc Zyngier Reviewed-by: James Morse Cc: Arnd Bergmann Cc: Will Deacon Cc: Marc Zyngier Cc: James Morse Cc: linux-arch@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hardirq.h | 31 +++++++++++++++++++++++++++++++ arch/arm64/kernel/irq.c | 3 +++ include/linux/hardirq.h | 7 +++++++ 3 files changed, 41 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 1473fc2f7ab7..89691c86640a 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -17,8 +17,12 @@ #define __ASM_HARDIRQ_H #include +#include #include +#include #include +#include +#include #define NR_IPI 7 @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu); #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 +struct nmi_ctx { + u64 hcr; +}; + +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); + +#define arch_nmi_enter() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + nmi_ctx->hcr = read_sysreg(hcr_el2); \ + if (!(nmi_ctx->hcr & HCR_TGE)) { \ + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ + isb(); \ + } \ + } \ + } while (0) + +#define arch_nmi_exit() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + if (!(nmi_ctx->hcr & HCR_TGE)) \ + write_sysreg(nmi_ctx->hcr, hcr_el2); \ + } \ + } while (0) + static inline void ack_bad_irq(unsigned int irq) { extern unsigned long irq_err_count; diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 780a12f59a8f..92fa81798fb9 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -33,6 +33,9 @@ unsigned long irq_err_count; +/* Only access this in an NMI enter/exit */ +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); + DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); int arch_show_interrupts(struct seq_file *p, int prec) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 0fbbcdf0c178..da0af631ded5 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -60,8 +60,14 @@ extern void irq_enter(void); */ extern void irq_exit(void); +#ifndef arch_nmi_enter +#define arch_nmi_enter() do { } while (0) +#define arch_nmi_exit() do { } while (0) +#endif + #define nmi_enter() \ do { \ + arch_nmi_enter(); \ printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ @@ -80,6 +86,7 @@ extern void irq_exit(void); ftrace_nmi_exit(); \ lockdep_on(); \ printk_nmi_exit(); \ + arch_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ -- cgit v1.2.3-59-g8ed1b From a82785a953e03444fe38616aed4d27b01da79a97 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:40 +0000 Subject: arm64: Remove unused daif related functions/macros There are some helpers to modify PSR.[DAIF] bits that are not referenced anywhere. The less these bits are available outside of local_irq_* functions the better. Get rid of those unused helpers. Signed-off-by: Julien Thierry Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 +--------- arch/arm64/include/asm/daifflags.h | 10 ---------- 2 files changed, 1 insertion(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4feb6119c3c9..7acf2436b578 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -62,16 +62,8 @@ .endm /* - * Enable and disable interrupts. + * Save/restore interrupts. */ - .macro disable_irq - msr daifset, #2 - .endm - - .macro enable_irq - msr daifclr, #2 - .endm - .macro save_and_disable_irq, flags mrs \flags, daif msr daifset, #2 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 8d91f2233135..546bc398553e 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -43,16 +43,6 @@ static inline unsigned long local_daif_save(void) return flags; } -static inline void local_daif_unmask(void) -{ - trace_hardirqs_on(); - asm volatile( - "msr daifclr, #0xf // local_daif_unmask" - : - : - : "memory"); -} - static inline void local_daif_restore(unsigned long flags) { if (!arch_irqs_disabled_flags(flags)) -- cgit v1.2.3-59-g8ed1b From c9bfdf734d4c0e7dc25f39dd636ba1952994865a Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:41 +0000 Subject: arm64: cpufeature: Set SYSREG_GIC_CPUIF as a boot system feature It is not supported to have some CPUs using GICv3 sysreg CPU interface while some others do not. Once ICC_SRE_EL1.SRE is set on a CPU, the bit cannot be cleared. Since matching this feature require setting ICC_SRE_EL1.SRE, it cannot be turned off if found on a CPU. Set the feature as STRICT_BOOT, if boot CPU has it, all other CPUs are required to have it. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Reviewed-by: Suzuki K Poulose Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Suzuki K Poulose Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f6d84e2c92fe..b9c0adf71a54 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1207,7 +1207,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, .field_pos = ID_AA64PFR0_GIC_SHIFT, -- cgit v1.2.3-59-g8ed1b From b90d2b22afdc7ce150a9ee7a8d82378bcfc395a5 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:42 +0000 Subject: arm64: cpufeature: Add cpufeature for IRQ priority masking Add a cpufeature indicating whether a cpu supports masking interrupts by priority. The feature will be properly enabled in a later patch. Signed-off-by: Julien Thierry Reviewed-by: Suzuki K Poulose Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Cc: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/cpufeature.h | 6 ++++++ arch/arm64/kernel/cpufeature.c | 23 +++++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 82e9099834ae..f6a76e43f39e 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -60,7 +60,8 @@ #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_GENERIC_AUTH_ARCH 40 #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 +#define ARM64_HAS_IRQ_PRIO_MASKING 42 -#define ARM64_NCAPS 42 +#define ARM64_NCAPS 43 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index dfcfba725d72..89c3f318f6be 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -612,6 +612,12 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); } +static inline bool system_uses_irq_prio_masking(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b9c0adf71a54..6f56e0ab63a1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1203,6 +1203,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return false; +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1480,6 +1488,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, }, #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI + { + /* + * Depends on having GICv3 + */ + .desc = "IRQ priority masking", + .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = can_use_gic_priorities, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_GIC_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#endif {}, }; -- cgit v1.2.3-59-g8ed1b From e99da7c6f51b487280406d8dc31cc7532cfb2017 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:43 +0000 Subject: arm/arm64: gic-v3: Add PMR and RPR accessors Add helper functions to access system registers related to interrupt priorities: PMR and RPR. Signed-off-by: Julien Thierry Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Russell King Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/arch_gicv3.h | 16 ++++++++++++++++ arch/arm64/include/asm/arch_gicv3.h | 15 +++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 0bd530702118..bef0b5de8dca 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -34,6 +34,7 @@ #define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) #define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) #define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3) +#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3) #define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x) #define ICC_AP0R0 __ICC_AP0Rx(0) @@ -245,6 +246,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg(val, ICC_BPR1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg(ICC_PMR); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg(val, ICC_PMR); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg(ICC_RPR); +} + /* * Even in 32bit systems that use LPAE, there is no guarantee that the I/O * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index e278f94df0c9..37193e224a50 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -114,6 +114,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg_s(val, SYS_ICC_BPR1_EL1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg_s(val, SYS_ICC_PMR_EL1); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg_s(SYS_ICC_RPR_EL1); +} + #define gic_read_typer(c) readq_relaxed(c) #define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_read_lpir(c) readq_relaxed(c) -- cgit v1.2.3-59-g8ed1b From 3f1f3234bc2db1c16b9818b9a15a5d58ad45251c Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:44 +0000 Subject: irqchip/gic-v3: Switch to PMR masking before calling IRQ handler Mask the IRQ priority through PMR and re-enable IRQs at CPU level, allowing only higher priority interrupts to be received during interrupt handling. Signed-off-by: Julien Thierry Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Thomas Gleixner Cc: Jason Cooper Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm/include/asm/arch_gicv3.h | 17 +++++++++++++++++ arch/arm64/include/asm/arch_gicv3.h | 17 +++++++++++++++++ drivers/irqchip/irq-gic-v3.c | 5 +++++ 3 files changed, 39 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index bef0b5de8dca..f6f485f4744e 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -363,5 +363,22 @@ static inline void gits_write_vpendbaser(u64 val, void * __iomem addr) #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) +static inline bool gic_prio_masking_enabled(void) +{ + return false; +} + +static inline void gic_pmr_mask_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + +static inline void gic_arch_enable_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 37193e224a50..b5f8142bf802 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -155,5 +155,22 @@ static inline u32 gic_read_rpr(void) #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) #define gits_read_vpendbaser(c) readq_relaxed(c) +static inline bool gic_prio_masking_enabled(void) +{ + return system_uses_irq_prio_masking(); +} + +static inline void gic_pmr_mask_irqs(void) +{ + /* Should not get called yet. */ + WARN_ON_ONCE(true); +} + +static inline void gic_arch_enable_irqs(void) +{ + /* Should not get called yet. */ + WARN_ON_ONCE(true); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0868a9d81c3c..8148a9250be2 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -356,6 +356,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs irqnr = gic_read_iar(); + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { int err; -- cgit v1.2.3-59-g8ed1b From cdbc81ddef43c8fdcbd3a26e1a7530c70b629cfc Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:45 +0000 Subject: arm64: ptrace: Provide definitions for PMR values Introduce fixed values for PMR that are going to be used to mask and unmask interrupts by priority. The current priority given to GIC interrupts is 0xa0, so clearing PMR's most significant bit is enough to mask interrupts. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Oleg Nesterov Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ptrace.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index fce22c4b2f73..8b131bc8984d 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -25,6 +25,18 @@ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* + * PMR values used to mask/unmask interrupts. + * + * GIC priority masking works as follows: if an IRQ's priority is a higher value + * than the value held in PMR, that IRQ is masked. Lowering the value of PMR + * means masking more IRQs (or at least that the same IRQs remain masked). + * + * To mask interrupts, we clear the most significant bit of PMR. + */ +#define GIC_PRIO_IRQON 0xf0 +#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) + /* Additional SPSR bits not exposed in the UABI */ #define PSR_IL_BIT (1 << 20) -- cgit v1.2.3-59-g8ed1b From 133d05186325ce04494ea6488a6b86e50a446c12 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:46 +0000 Subject: arm64: Make PMR part of task context In order to replace PSR.I interrupt disabling/enabling with ICC_PMR_EL1 interrupt masking, ICC_PMR_EL1 needs to be saved/restored when taking/returning from an exception. This mimics the way hardware saves and restores PSR.I bit in spsr_el1 for exceptions and ERET. Add PMR to the registers to save in the pt_regs struct upon kernel entry, and restore it before ERET. Also, initialize it to a sane value when creating new tasks. Signed-off-by: Julien Thierry Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Oleg Nesterov Cc: Dave Martin Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 3 +++ arch/arm64/include/asm/ptrace.h | 14 +++++++++++--- arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry.S | 14 ++++++++++++++ arch/arm64/kernel/process.c | 6 ++++++ 5 files changed, 35 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f1a7ab18faf3..5d9ce62bdebd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -191,6 +191,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) memset(regs, 0, sizeof(*regs)); forget_syscall(regs); regs->pc = pc; + + if (system_uses_irq_prio_masking()) + regs->pmr_save = GIC_PRIO_IRQON; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 8b131bc8984d..ec60174c8c18 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -19,6 +19,8 @@ #ifndef __ASM_PTRACE_H #define __ASM_PTRACE_H +#include + #include /* Current Exception Level values, as contained in CurrentEL */ @@ -179,7 +181,8 @@ struct pt_regs { #endif u64 orig_addr_limit; - u64 unused; // maintain 16 byte alignment + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + u64 pmr_save; u64 stackframe[2]; }; @@ -214,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs) #define processor_mode(regs) \ ((regs)->pstate & PSR_MODE_MASK) -#define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT)) +#define irqs_priority_unmasked(regs) \ + (system_uses_irq_prio_masking() ? \ + (regs)->pmr_save == GIC_PRIO_IRQON : \ + true) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 0552b91d7666..7f40dcbdd51d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -73,6 +73,7 @@ int main(void) DEFINE(S_PC, offsetof(struct pt_regs, pc)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4d0c81f29a60..02f809a5c823 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -249,6 +249,12 @@ alternative_else_nop_endif msr sp_el0, tsk .endif + /* Save pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] +alternative_else_nop_endif + /* * Registers that may be useful after this macro is invoked: * @@ -269,6 +275,14 @@ alternative_else_nop_endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif + /* Restore pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + /* Ensure priority change is seen by redistributor */ + dsb sy +alternative_else_nop_endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a0f985a6ac50..6d410fc2849b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -232,6 +232,9 @@ void __show_regs(struct pt_regs *regs) printk("sp : %016llx\n", sp); + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + i = top_reg; while (i >= 0) { @@ -363,6 +366,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; + if (system_uses_irq_prio_masking()) + childregs->pmr_save = GIC_PRIO_IRQON; + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } -- cgit v1.2.3-59-g8ed1b From a9806aa259feb2f6fd582b6342c835a3482fccc6 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:47 +0000 Subject: arm64: Unmask PMR before going idle CPU does not received signals for interrupts with a priority masked by ICC_PMR_EL1. This means the CPU might not come back from a WFI instruction. Make sure ICC_PMR_EL1 does not mask interrupts when doing a WFI. Since the logic of cpu_do_idle is becoming a bit more complex than just two instructions, lets turn it from ASM to C. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ arch/arm64/mm/proc.S | 11 ----------- 2 files changed, 45 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6d410fc2849b..3767fb21a5b8 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,6 +51,7 @@ #include #include +#include #include #include #include @@ -74,6 +75,50 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +static void __cpu_do_idle(void) +{ + dsb(sy); + wfi(); +} + +static void __cpu_do_idle_irqprio(void) +{ + unsigned long pmr; + unsigned long daif_bits; + + daif_bits = read_sysreg(daif); + write_sysreg(daif_bits | PSR_I_BIT, daif); + + /* + * Unmask PMR before going idle to make sure interrupts can + * be raised. + */ + pmr = gic_read_pmr(); + gic_write_pmr(GIC_PRIO_IRQON); + + __cpu_do_idle(); + + gic_write_pmr(pmr); + write_sysreg(daif_bits, daif); +} + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void cpu_do_idle(void) +{ + if (system_uses_irq_prio_masking()) + __cpu_do_idle_irqprio(); + else + __cpu_do_idle(); +} + /* * This is our default idle handler. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 73886a5f1f30..3ea4f3b84a8b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -55,17 +55,6 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - */ -ENTRY(cpu_do_idle) - dsb sy // WFI may enter a low-power mode - wfi - ret -ENDPROC(cpu_do_idle) - #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context -- cgit v1.2.3-59-g8ed1b From 85738e05dc38a80921e1e1944e5b835f6668fc30 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:48 +0000 Subject: arm64: kvm: Unmask PMR before entering guest Interrupts masked by ICC_PMR_EL1 will not be signaled to the CPU. This means that hypervisor will not receive masked interrupts while running a guest. We need to make sure that all maskable interrupts are masked from the time we call local_irq_disable() in the main run loop, and remain so until we call local_irq_enable() after returning from the guest, and we need to ensure that we see no interrupts at all (including pseudo-NMIs) in the middle of the VM world-switch, while at the same time we need to ensure we exit the guest when there are interrupts for the host. We can accomplish this with pseudo-NMIs enabled by: (1) local_irq_disable: set the priority mask (2) enter guest: set PSTATE.I (3) clear the priority mask (4) eret to guest (5) exit guest: set the priotiy mask clear PSTATE.I (and restore other host PSTATE bits) (6) local_irq_enable: clear the priority mask. Signed-off-by: Julien Thierry Acked-by: Catalin Marinas Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall Cc: Christoffer Dall Cc: Marc Zyngier Cc: Will Deacon Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++++ arch/arm64/kvm/hyp/switch.c | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..292c88263fd1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -474,10 +475,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static inline void kvm_arm_vhe_guest_enter(void) { local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } } static inline void kvm_arm_vhe_guest_exit(void) { + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ local_daif_restore(DAIF_PROCCTX_NOIRQ); /* diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..6a4c2d6c3287 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -22,6 +22,7 @@ #include +#include #include #include #include @@ -521,6 +522,17 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } + vcpu = kern_hyp_va(vcpu); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); @@ -573,6 +585,10 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + /* Returning to host will clear PSR.I, remask PMR if needed */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQOFF); + return exit_code; } -- cgit v1.2.3-59-g8ed1b From 4a503217ce37e1f4f3d9b681bbcbbac103776bf1 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:50 +0000 Subject: arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking Instead disabling interrupts by setting the PSR.I bit, use a priority higher than the one used for interrupts to mask them via PMR. When using PMR to disable interrupts, the value of PMR will be used instead of PSR.[DAIF] for the irqflags. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Cc: Will Deacon Cc: Ard Biesheuvel Cc: Oleg Nesterov Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 11 +++++ arch/arm64/include/asm/irqflags.h | 100 +++++++++++++++++++++++++++----------- 2 files changed, 83 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7ed320895d1f..c9e9a6978e73 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) +/* + * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not. + * And EFI shouldn't really play around with priority masking as it is not aware + * which priorities the OS has assigned to its interrupts. + */ +#define arch_efi_save_flags(state_flags) \ + ((void)((state_flags) = read_sysreg(daif))) + +#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif) + + /* arch specific definitions used by the stub code */ /* diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 24692edf1a69..d4597b2c5729 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -18,7 +18,9 @@ #ifdef __KERNEL__ +#include #include +#include /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and @@ -36,33 +38,27 @@ /* * CPU interrupt mask handling. */ -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_irq_save\n" - "msr daifset, #2" - : "=r" (flags) - : - : "memory"); - return flags; -} - static inline void arch_local_irq_enable(void) { - asm volatile( - "msr daifclr, #2 // arch_local_irq_enable" - : + asm volatile(ALTERNATIVE( + "msr daifclr, #2 // arch_local_irq_enable\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" (GIC_PRIO_IRQON) : "memory"); } static inline void arch_local_irq_disable(void) { - asm volatile( - "msr daifset, #2 // arch_local_irq_disable" - : + asm volatile(ALTERNATIVE( + "msr daifset, #2 // arch_local_irq_disable", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" (GIC_PRIO_IRQOFF) : "memory"); } @@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { + unsigned long daif_bits; unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_save_flags" - : "=r" (flags) - : + + daif_bits = read_sysreg(daif); + + /* + * The asm is logically equivalent to: + * + * if (system_uses_irq_prio_masking()) + * flags = (daif_bits & PSR_I_BIT) ? + * GIC_PRIO_IRQOFF : + * read_sysreg_s(SYS_ICC_PMR_EL1); + * else + * flags = daif_bits; + */ + asm volatile(ALTERNATIVE( + "mov %0, %1\n" + "nop\n" + "nop", + "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n" + "ands %1, %1, " __stringify(PSR_I_BIT) "\n" + "csel %0, %0, %2, eq", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (flags), "+r" (daif_bits) + : "r" (GIC_PRIO_IRQOFF) : "memory"); + + return flags; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = arch_local_save_flags(); + + arch_local_irq_disable(); + return flags; } @@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void) */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile( - "msr daif, %0 // arch_local_irq_restore" - : - : "r" (flags) - : "memory"); + asm volatile(ALTERNATIVE( + "msr daif, %0\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) + : "+r" (flags) + : + : "memory"); } static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags & PSR_I_BIT; + int res; + + asm volatile(ALTERNATIVE( + "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n" + "nop", + "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n" + "cset %w0, ls", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (res) + : "r" ((int) flags) + : "memory"); + + return res; } #endif #endif -- cgit v1.2.3-59-g8ed1b From 8cb7eff32cc00697d4a37b1ed569c72ee2039ca4 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:51 +0000 Subject: arm64: daifflags: Include PMR in daifflags restore operations The addition of PMR should not bypass the semantics of daifflags. When DA_F are set, I bit is also set as no interrupts (even of higher priority) is allowed. When DA_F are cleared, I bit is cleared and interrupt enabling/disabling goes through ICC_PMR_EL1. Signed-off-by: Julien Thierry Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/daifflags.h | 50 +++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 546bc398553e..1dd3d7a38d34 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -18,6 +18,8 @@ #include +#include + #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT @@ -36,7 +38,13 @@ static inline unsigned long local_daif_save(void) { unsigned long flags; - flags = arch_local_save_flags(); + flags = read_sysreg(daif); + + if (system_uses_irq_prio_masking()) { + /* If IRQs are masked with PMR, reflect it in the flags */ + if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF) + flags |= PSR_I_BIT; + } local_daif_mask(); @@ -45,12 +53,46 @@ static inline unsigned long local_daif_save(void) static inline void local_daif_restore(unsigned long flags) { - if (!arch_irqs_disabled_flags(flags)) + bool irq_disabled = flags & PSR_I_BIT; + + if (!irq_disabled) { trace_hardirqs_on(); - arch_local_irq_restore(flags); + if (system_uses_irq_prio_masking()) + arch_local_irq_enable(); + } else if (!(flags & PSR_A_BIT)) { + /* + * If interrupts are disabled but we can take + * asynchronous errors, we can take NMIs + */ + if (system_uses_irq_prio_masking()) { + flags &= ~PSR_I_BIT; + /* + * There has been concern that the write to daif + * might be reordered before this write to PMR. + * From the ARM ARM DDI 0487D.a, section D1.7.1 + * "Accessing PSTATE fields": + * Writes to the PSTATE fields have side-effects on + * various aspects of the PE operation. All of these + * side-effects are guaranteed: + * - Not to be visible to earlier instructions in + * the execution stream. + * - To be visible to later instructions in the + * execution stream + * + * Also, writes to PMR are self-synchronizing, so no + * interrupts with a lower priority than PMR is signaled + * to the PE after the write. + * + * So we don't need additional synchronization here. + */ + arch_local_irq_disable(); + } + } + + write_sysreg(flags, daif); - if (arch_irqs_disabled_flags(flags)) + if (irq_disabled) trace_hardirqs_off(); } -- cgit v1.2.3-59-g8ed1b From e9ab7a2e333615497b3fc426c379c330230c2b50 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:52 +0000 Subject: arm64: alternative: Allow alternative status checking per cpufeature In preparation for the application of alternatives at different points during the boot process, provide the possibility to check whether alternatives for a feature of interest was already applied instead of having a global boolean for all alternatives. Make VHE enablement code check for the VHE feature instead of considering all alternatives. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Suzuki K Poulose Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative.h | 3 +-- arch/arm64/kernel/alternative.c | 21 +++++++++++++++++---- arch/arm64/kernel/cpufeature.c | 2 +- 3 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4b650ec1d7dd..9806a2357e70 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -14,8 +14,6 @@ #include #include -extern int alternatives_applied; - struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -28,6 +26,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); +bool alternative_is_applied(u16 cpufeature); #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length); diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index b5d603992d40..c947d2246017 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,13 +32,23 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -int alternatives_applied; +static int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; +bool alternative_is_applied(u16 cpufeature) +{ + if (WARN_ON(cpufeature >= ARM64_NCAPS)) + return false; + + return test_bit(cpufeature, applied_alternatives); +} + /* * Check if the target PC is within an alternative block. */ @@ -192,6 +202,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) dsb(ish); __flush_icache_all(); isb(); + + /* We applied all that was available */ + bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS); } } @@ -208,14 +221,14 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(alternatives_applied)) + while (!READ_ONCE(all_alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(alternatives_applied); + BUG_ON(all_alternatives_applied); __apply_alternatives(®ion, false); /* Barriers provided by the cache flushing */ - WRITE_ONCE(alternatives_applied, 1); + WRITE_ONCE(all_alternatives_applied, 1); } return 0; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6f56e0ab63a1..d607ea33228c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1118,7 +1118,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to * do anything here. */ - if (!alternatives_applied) + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } #endif -- cgit v1.2.3-59-g8ed1b From 0ceb0d56905e3d141fae77e5936d00eee9233473 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 31 Jan 2019 14:58:53 +0000 Subject: arm64: alternative: Apply alternatives early in boot process Currently alternatives are applied very late in the boot process (and a long time after we enable scheduling). Some alternative sequences, such as those that alter the way CPU context is stored, must be applied much earlier in the boot sequence. Introduce apply_boot_alternatives() to allow some alternatives to be applied immediately after we detect the CPU features of the boot CPU. Signed-off-by: Daniel Thompson [julien.thierry@arm.com: rename to fit new cpufeature framework better, apply BOOT_SCOPE feature early in boot] Signed-off-by: Julien Thierry Reviewed-by: Suzuki K Poulose Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Christoffer Dall Cc: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative.h | 1 + arch/arm64/include/asm/cpufeature.h | 4 ++++ arch/arm64/kernel/alternative.c | 43 +++++++++++++++++++++++++++++++----- arch/arm64/kernel/cpufeature.c | 6 +++++ arch/arm64/kernel/smp.c | 7 ++++++ 5 files changed, 56 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 9806a2357e70..b9f8d787eea9 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -25,6 +25,7 @@ struct alt_instr { typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void __init apply_boot_alternatives(void); void __init apply_alternatives_all(void); bool alternative_is_applied(u16 cpufeature); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 89c3f318f6be..e505e1fbd2b9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -391,6 +391,10 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; +/* ARM64 CAPS + alternative_cb */ +#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) +extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index c947d2246017..a9b467763153 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -155,7 +155,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __apply_alternatives(void *alt_region, bool is_module) +static void __apply_alternatives(void *alt_region, bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -165,6 +166,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + if (!test_bit(alt->cpufeature, feature_mask)) + continue; + /* Use ARM64_CB_PATCH as an unconditional patch */ if (alt->cpufeature < ARM64_CB_PATCH && !cpus_have_cap(alt->cpufeature)) @@ -203,8 +207,11 @@ static void __apply_alternatives(void *alt_region, bool is_module) __flush_icache_all(); isb(); - /* We applied all that was available */ - bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS); + /* Ignore ARM64_CB bit from feature mask */ + bitmap_or(applied_alternatives, applied_alternatives, + feature_mask, ARM64_NCAPS); + bitmap_and(applied_alternatives, applied_alternatives, + cpu_hwcaps, ARM64_NCAPS); } } @@ -225,8 +232,13 @@ static int __apply_alternatives_multi_stop(void *unused) cpu_relax(); isb(); } else { + DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + + bitmap_complement(remaining_capabilities, boot_capabilities, + ARM64_NPATCHABLE); + BUG_ON(all_alternatives_applied); - __apply_alternatives(®ion, false); + __apply_alternatives(®ion, false, remaining_capabilities); /* Barriers provided by the cache flushing */ WRITE_ONCE(all_alternatives_applied, 1); } @@ -240,6 +252,24 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + struct alt_region region = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, + }; + + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + __apply_alternatives(®ion, false, &boot_capabilities[0]); +} + #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length) { @@ -247,7 +277,10 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; + DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + + bitmap_fill(all_capabilities, ARM64_NPATCHABLE); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d607ea33228c..b530fb24e6c6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -54,6 +54,9 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; +/* Need also bit for ARM64_CB_PATCH */ +DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1677,6 +1680,9 @@ static void update_cpu_capabilities(u16 scope_mask) if (caps->desc) pr_info("detected: %s\n", caps->desc); cpus_set_cap(caps->capability); + + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) + set_bit(caps->capability, boot_capabilities); } } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 1598d6f7200a..a944edd39d2d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -419,6 +419,13 @@ void __init smp_prepare_boot_cpu(void) */ jump_label_init(); cpuinfo_store_boot_cpu(); + + /* + * We now know enough about the boot CPU to apply the + * alternatives that cannot wait until interrupt handling + * and/or scheduling is enabled. + */ + apply_boot_alternatives(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) -- cgit v1.2.3-59-g8ed1b From e79321883842ca7b77d8a58fe8303e8da35c085e Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:55 +0000 Subject: arm64: Switch to PMR masking when starting CPUs Once the boot CPU has been prepared or a new secondary CPU has been brought up, use ICC_PMR_EL1 to mask interrupts on that CPU and clear PSR.I bit. Since ICC_PMR_EL1 is initialized at CPU bringup, avoid overwriting it in the GICv3 driver. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 26 ++++++++++++++++++++++++++ drivers/irqchip/irq-gic-v3.c | 8 +++++++- 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a944edd39d2d..824de7038967 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -180,6 +181,24 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +static void init_gic_priority_masking(void) +{ + u32 cpuflags; + + if (WARN_ON(!gic_enable_sre())) + return; + + cpuflags = read_sysreg(daif); + + WARN_ON(!(cpuflags & PSR_I_BIT)); + + gic_write_pmr(GIC_PRIO_IRQOFF); + + /* We can only unmask PSR.I if we can take aborts */ + if (!(cpuflags & PSR_A_BIT)) + write_sysreg(cpuflags & ~PSR_I_BIT, daif); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -206,6 +225,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ cpu_uninstall_idmap(); + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + preempt_disable(); trace_hardirqs_off(); @@ -426,6 +448,10 @@ void __init smp_prepare_boot_cpu(void) * and/or scheduling is enabled. */ apply_boot_alternatives(); + + /* Conditionally switch to GIC PMR for interrupt masking */ + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index da547e013c1a..5a703ae09ce5 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -415,6 +415,9 @@ static u32 gic_get_pribits(void) static bool gic_has_group0(void) { u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); /* * Let's find out if Group0 is under control of EL3 or not by @@ -430,6 +433,8 @@ static bool gic_has_group0(void) gic_write_pmr(BIT(8 - gic_get_pribits())); val = gic_read_pmr(); + gic_write_pmr(old_pmr); + return val != 0; } @@ -591,7 +596,8 @@ static void gic_cpu_sys_reg_init(void) group0 = gic_has_group0(); /* Set priority mask register */ - write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + if (!gic_prio_masking_enabled()) + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); /* * Some firmwares hand over to the kernel with the BPR changed from -- cgit v1.2.3-59-g8ed1b From b334481ab76b2a9031aef5393b07de6d21a08244 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:56 +0000 Subject: arm64: gic-v3: Implement arch support for priority masking Implement architecture specific primitive allowing the GICv3 driver to use priorities to mask interrupts. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Marc Zyngier Cc: Marc Zyngier Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/arch_gicv3.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index b5f8142bf802..14b41ddc68ba 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -22,6 +22,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -162,14 +163,13 @@ static inline bool gic_prio_masking_enabled(void) static inline void gic_pmr_mask_irqs(void) { - /* Should not get called yet. */ - WARN_ON_ONCE(true); + BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF); + gic_write_pmr(GIC_PRIO_IRQOFF); } static inline void gic_arch_enable_irqs(void) { - /* Should not get called yet. */ - WARN_ON_ONCE(true); + asm volatile ("msr daifclr, #2" : : : "memory"); } #endif /* __ASSEMBLY__ */ -- cgit v1.2.3-59-g8ed1b From 7d31464adf20fb8c075a3a3dfe2002a195566510 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:00 +0000 Subject: arm64: Handle serror in NMI context Per definition of the daifflags, Serrors can occur during any interrupt context, that includes NMI contexts. Trying to nmi_enter in an nmi context will crash. Skip nmi_enter/nmi_exit when serror occurred during an NMI. Suggested-by: James Morse Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Mark Rutland Cc: Dave Martin Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4e2fb877f8d5..8ad119c3f665 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -898,13 +898,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) { - nmi_enter(); + const bool was_in_nmi = in_nmi(); + + if (!was_in_nmi) + nmi_enter(); /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - nmi_exit(); + if (!was_in_nmi) + nmi_exit(); } void __pte_error(const char *file, int line, unsigned long val) -- cgit v1.2.3-59-g8ed1b From 1234ad686fb1bde5a9c2447fc4c9df8430358763 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:01 +0000 Subject: arm64: Skip preemption when exiting an NMI Handling of an NMI should not set any TIF flags. For NMIs received from EL0 the current exit path is safe to use. However, an NMI received at EL1 could have interrupted some task context that has set the TIF_NEED_RESCHED flag. Preempting a task should not happen as a result of an NMI. Skip preemption after handling an NMI from EL1. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 02f809a5c823..35ed48469506 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -624,7 +624,15 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz x24, 1f // preempt count != 0 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA_F were cleared at start of handling. If anything is set in DAIF, + * we come back from an NMI, so skip preemption + */ + mrs x0, daif + orr x24, x24, x0 +alternative_else_nop_endif + cbnz x24, 1f // preempt count != 0 || NMI return path bl preempt_schedule_irq // irq en/disable is done inside 1: #endif -- cgit v1.2.3-59-g8ed1b From c25349fd3c8024cfebcc9b01ee6cfb093fab9be0 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:02 +0000 Subject: arm64: Skip irqflags tracing for NMI in IRQs disabled context When an NMI is raised while interrupts where disabled, the IRQ tracing already is in the correct state (i.e. hardirqs_off) and should be left as such when returning to the interrupted context. Check whether PMR was masking interrupts when the NMI was raised and skip IRQ tracing if necessary. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 35ed48469506..6bf7e12f9a2b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -617,7 +617,18 @@ el1_irq: kernel_entry 1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] +alternative_else + mov x20, #GIC_PRIO_IRQON +alternative_endif + cmp x20, #GIC_PRIO_IRQOFF + /* Irqs were disabled, don't trace */ + b.ls 1f +#endif bl trace_hardirqs_off +1: #endif irq_handler @@ -637,8 +648,18 @@ alternative_else_nop_endif 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI + /* + * if IRQs were disabled when we received the interrupt, we have an NMI + * and we are not re-enabling interrupt upon eret. Skip tracing. + */ + cmp x20, #GIC_PRIO_IRQOFF + b.ls 1f +#endif bl trace_hardirqs_on +1: #endif + kernel_exit 1 ENDPROC(el1_irq) -- cgit v1.2.3-59-g8ed1b From bc3c03ccb4641fb940b27a0d369431876923a8fe Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:03 +0000 Subject: arm64: Enable the support of pseudo-NMIs Add a build option and a command line parameter to build and enable the support of pseudo-NMIs. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Cc: Will Deacon Signed-off-by: Catalin Marinas --- Documentation/admin-guide/kernel-parameters.txt | 5 +++++ arch/arm64/Kconfig | 14 ++++++++++++++ arch/arm64/kernel/cpufeature.c | 10 +++++++++- 3 files changed, 28 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..4d85fa524109 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1831,6 +1831,11 @@ to let secondary kernels in charge of setting up LPIs. + irqchip.gicv3_pseudo_nmi= [ARM64] + Enables support for pseudo-NMIs in the kernel. This + requires the kernel to be built with + CONFIG_ARM64_PSEUDO_NMI. + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4cad67b9ec0a..c7a44bcfc385 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1327,6 +1327,20 @@ config ARM64_MODULE_PLTS bool select HAVE_MOD_ARCH_SPECIFIC +config ARM64_PSEUDO_NMI + bool "Support for NMI-like interrupts" + select CONFIG_ARM_GIC_V3 + help + Adds support for mimicking Non-Maskable Interrupts through the use of + GIC interrupt priority. This support requires version 3 or later of + Arm GIC. + + This high priority configuration for interrupts needs to be + explicitly enabled by setting the kernel parameter + "irqchip.gicv3_pseudo_nmi" to 1. + + If unsure, say N + config RELOCATABLE bool help diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b530fb24e6c6..e24e94d28767 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1207,10 +1207,18 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_PSEUDO_NMI +static bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return strtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, int scope) { - return false; + return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); } #endif -- cgit v1.2.3-59-g8ed1b From a80554fc36ba41d96af8e72fb54cd5d490e06c54 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 8 Feb 2019 09:36:48 +0000 Subject: arm64: irqflags: Fix clang build warnings Clang complains when passing asm operands that are smaller than the registers they are mapped to: arch/arm64/include/asm/irqflags.h:50:10: warning: value size does not match register size specified by the constraint and modifier [-Wasm-operand-widths] : "r" (GIC_PRIO_IRQON) Fix it by casting the affected input operands to a type of the correct size. Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Signed-off-by: Julien Thierry Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/irqflags.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index d4597b2c5729..43d8366c1e87 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -47,7 +47,7 @@ static inline void arch_local_irq_enable(void) "dsb sy", ARM64_HAS_IRQ_PRIO_MASKING) : - : "r" (GIC_PRIO_IRQON) + : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); } @@ -58,7 +58,7 @@ static inline void arch_local_irq_disable(void) "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", ARM64_HAS_IRQ_PRIO_MASKING) : - : "r" (GIC_PRIO_IRQOFF) + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); } @@ -91,7 +91,7 @@ static inline unsigned long arch_local_save_flags(void) "csel %0, %0, %2, eq", ARM64_HAS_IRQ_PRIO_MASKING) : "=&r" (flags), "+r" (daif_bits) - : "r" (GIC_PRIO_IRQOFF) + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); return flags; -- cgit v1.2.3-59-g8ed1b From 47224e51ab778e918257b96c1a1b3735e4b8c15d Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 8 Feb 2019 17:04:25 +0000 Subject: arm64: Remove documentation about TIF_USEDFPU TIF_USEDFPU is not defined as thread flags for Arm64. So drop it from the documentation. Acked-by: Will Deacon Signed-off-by: Julien Grall Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/thread_info.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index bbca68b54732..eb3ef73e07cf 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -79,7 +79,6 @@ void arch_release_task_struct(struct task_struct *tsk); * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user - * TIF_USEDFPU - FPU was used by this task this quantum (SMP) */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 -- cgit v1.2.3-59-g8ed1b From 4caf8758b60b6f7f9773fd1d265cb5a7cf935c97 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 22 Feb 2019 09:32:50 +0000 Subject: arm64: Rename get_thread_info() The assembly macro get_thread_info() actually returns a task_struct and is analogous to the current/get_current macro/function. While it could be argued that thread_info sits at the start of task_struct and the intention could have been to return a thread_info, instances of loads from/stores to the address obtained from get_thread_info() use offsets that are generated with offsetof(struct task_struct, [...]). Rename get_thread_info() to state it returns a task_struct. Acked-by: Mark Rutland Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 2 +- arch/arm64/include/asm/assembler.h | 6 +++--- arch/arm64/kernel/entry.S | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 4128bec033f6..f74909ba29bd 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -24,7 +24,7 @@ .endm .macro __uaccess_ttbr0_enable, tmp1, tmp2 - get_thread_info \tmp1 + get_current_task \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 mrs \tmp2, ttbr1_el1 extr \tmp2, \tmp2, \tmp1, #48 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 7acf2436b578..9c5c876a9ff2 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -528,9 +528,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* - * Return the current thread_info. + * Return the current task_struct. */ - .macro get_thread_info, rd + .macro get_current_task, rd mrs \rd, sp_el0 .endm @@ -713,7 +713,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .macro if_will_cond_yield_neon #ifdef CONFIG_PREEMPT - get_thread_info x0 + get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET cbz x0, .Lyield_\@ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6bf7e12f9a2b..c50a7a75f2e0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -185,7 +185,7 @@ alternative_cb_end .else add x21, sp, #S_FRAME_SIZE - get_thread_info tsk + get_current_task tsk /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] @@ -1104,7 +1104,7 @@ ENTRY(ret_from_fork) cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 -1: get_thread_info tsk +1: get_current_task tsk b ret_to_user ENDPROC(ret_from_fork) NOKPROBE(ret_from_fork) -- cgit v1.2.3-59-g8ed1b From 3e32131abc311a5cb9fddecc72cbd0b95ffcc10d Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Tue, 26 Feb 2019 18:43:41 +0000 Subject: arm64: Add workaround for Fujitsu A64FX erratum 010001 On the Fujitsu-A64FX cores ver(1.0, 1.1), memory access may cause an undefined fault (Data abort, DFSC=0b111111). This fault occurs under a specific hardware condition when a load/store instruction performs an address translation. Any load/store instruction, except non-fault access including Armv8 and SVE might cause this undefined fault. The TCR_ELx.NFD1 bit is used by the kernel when CONFIG_RANDOMIZE_BASE is enabled to mitigate timing attacks against KASLR where the kernel address space could be probed using the FFR and suppressed fault on SVE loads. Since this erratum causes spurious exceptions, which may corrupt the exception registers, we clear the TCR_ELx.NFDx=1 bits when booting on an affected CPU. Signed-off-by: Zhang Lei [Generated MIDR value/mask for __cpu_setup(), removed spurious-fault handler and always disabled the NFDx bits on affected CPUs] Signed-off-by: James Morse Tested-by: zhang.lei Signed-off-by: Catalin Marinas --- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/Kconfig | 19 +++++++++++++++++++ arch/arm64/include/asm/assembler.h | 20 ++++++++++++++++++++ arch/arm64/include/asm/cputype.h | 9 +++++++++ arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/proc.S | 1 + 6 files changed, 51 insertions(+) (limited to 'arch') diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 1f09d043d086..26d64e9f3a35 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -80,3 +80,4 @@ stable kernels. | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | +| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c7a44bcfc385..3fd266a177b5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -643,6 +643,25 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config FUJITSU_ERRATUM_010001 + bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" + default y + help + This option adds workaround for Fujitsu-A64FX erratum E#010001. + On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory + accesses may cause undefined fault (Data abort, DFSC=0b111111). + This fault occurs under a specific hardware condition when a + load/store instruction performs an address translation using: + case-1 TTBR0_EL1 with TCR_EL1.NFD0 == 1. + case-2 TTBR0_EL2 with TCR_EL2.NFD0 == 1. + case-3 TTBR1_EL1 with TCR_EL1.NFD1 == 1. + case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. + + The workaround is to ensure these bits are clear in TCR_ELx. + The workaround only affect the Fujitsu-A64FX. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 9c5c876a9ff2..c5308d01e228 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -596,6 +597,25 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif .endm +/* + * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU. + */ + .macro tcr_clear_errata_bits, tcr, tmp1, tmp2 +#ifdef CONFIG_FUJITSU_ERRATUM_010001 + mrs \tmp1, midr_el1 + + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK + and \tmp1, \tmp1, \tmp2 + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001 + cmp \tmp1, \tmp2 + b.ne 10f + + mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001 + bic \tcr, \tcr, \tmp2 +10: +#endif /* CONFIG_FUJITSU_ERRATUM_010001 */ + .endm + /** * Errata workaround prior to disable MMU. Insert an ISB immediately prior * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 951ed1a4e5c9..2afb1338b48a 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -76,6 +76,7 @@ #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E +#define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -104,6 +105,8 @@ #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 +#define FUJITSU_CPU_PART_A64FX 0x001 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -122,6 +125,12 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) +#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) + +/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ +#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX +#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1)) +#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index e9b0a7d75184..a69259cc1f16 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -302,6 +302,7 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 3ea4f3b84a8b..aa0817c9c4c3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -445,6 +445,7 @@ ENTRY(__cpu_setup) ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS + tcr_clear_errata_bits x10, x9, x5 #ifdef CONFIG_ARM64_USER_VA_BITS_52 ldr_l x9, vabits_user -- cgit v1.2.3-59-g8ed1b From ce246c444a08e03bc95c9b1aa111527690d1ae13 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 18:04:53 +0000 Subject: riscv: io: Update __io_[p]ar() macros to take an argument The definitions of the __io_[p]ar() macros in asm-generic/io.h take the value returned by the preceding I/O read as an argument so that architectures can use this to create order with a subsequent delayX() routine using a dependency. Update the riscv barrier definitions to match, although the argument is currently unused. Suggested-by: Arnd Bergmann Reviewed-by: Palmer Dabbelt Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/riscv/include/asm/io.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index b269451e7e85..1d9c1376dc64 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -163,20 +163,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory"); +#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); #define __io_aw() do {} while (0) -#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; }) -#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; }) -#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; }) +#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) +#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) #define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) #define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) #define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) #ifdef CONFIG_64BIT -#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; }) +#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) #define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) #endif @@ -198,20 +198,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * writes. */ #define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); -#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory"); +#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); #define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); #define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); -#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) +#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) #define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #ifdef CONFIG_64BIT -#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; }) +#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; }) #define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); }) #endif @@ -254,16 +254,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) afence; \ } -__io_reads_ins(reads, u8, b, __io_br(), __io_ar()) -__io_reads_ins(reads, u16, w, __io_br(), __io_ar()) -__io_reads_ins(reads, u32, l, __io_br(), __io_ar()) +__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) #define readsb(addr, buffer, count) __readsb(addr, buffer, count) #define readsw(addr, buffer, count) __readsw(addr, buffer, count) #define readsl(addr, buffer, count) __readsl(addr, buffer, count) -__io_reads_ins(ins, u8, b, __io_pbr(), __io_par()) -__io_reads_ins(ins, u16, w, __io_pbr(), __io_par()) -__io_reads_ins(ins, u32, l, __io_pbr(), __io_par()) +__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) #define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) #define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) #define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) @@ -283,10 +283,10 @@ __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) #define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) #ifdef CONFIG_64BIT -__io_reads_ins(reads, u64, q, __io_br(), __io_ar()) +__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) -__io_reads_ins(ins, u64, q, __io_pbr(), __io_par()) +__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) #define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) -- cgit v1.2.3-59-g8ed1b From 2c97a9cc35a7a73a7580a8e2632419ff3c0b0fe5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 18:04:54 +0000 Subject: arm64: io: Hook up __io_par() for inX() ordering Ensure that inX() provides the same ordering guarantees as readX() by hooking up __io_par() so that it maps directly to __iormb(). Reported-by: Andrew Murray Reviewed-by: Palmer Dabbelt Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index ee723835c1f4..8bb7210ac286 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -121,6 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) : "memory"); \ }) +#define __io_par(v) __iormb(v) #define __iowmb() wmb() #define mmiowb() do { } while (0) -- cgit v1.2.3-59-g8ed1b From a29c78234942fcfba2c5c305adc85b64332f9a95 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 15 Jan 2019 20:18:39 +0100 Subject: arm64: Kconfig.platforms: fix warning unmet direct dependencies When ARCH_MXC get enabled, ARM64_ERRATUM_845719 will be selected and this warning will happen when COMPAT isn't set. WARNING: unmet direct dependencies detected for ARM64_ERRATUM_845719 Depends on [n]: COMPAT [=n] Selected by [y]: - ARCH_MXC [=y] Rework to add 'if COMPAT' before ARM64_ERRATUM_845719 gets selected, since ARM64_ERRATUM_845719 depends on COMPAT. Acked-by: Will Deacon Signed-off-by: Anders Roxell Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig.platforms | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 251ecf34cb02..d4faca775d9c 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -145,7 +145,7 @@ config ARCH_MVEBU config ARCH_MXC bool "ARMv8 based NXP i.MX SoC family" select ARM64_ERRATUM_843419 - select ARM64_ERRATUM_845719 + select ARM64_ERRATUM_845719 if COMPAT help This enables support for the ARMv8 based SoCs in the NXP i.MX family. -- cgit v1.2.3-59-g8ed1b From 366e37e4da23f9df498cc9577cadcb354f7bd431 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 22 Feb 2019 15:42:23 +0100 Subject: arm64: avoid clang warning about self-assignment Building a preprocessed source file for arm64 now always produces a warning with clang because of the page_to_virt() macro assigning a variable to itself. Adding a new temporary variable avoids this issue. Fixes: 2813b9c02962 ("kasan, mm, arm64: tag non slab memory allocated via pagealloc") Reviewed-by: Andrey Konovalov Signed-off-by: Arnd Bergmann Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..6340aa8350d9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -316,8 +316,9 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - __addr = __tag_set(__addr, page_kasan_tag(page)); \ - ((void *)__addr); \ + unsigned long __addr_tag = \ + __tag_set(__addr, page_kasan_tag(page)); \ + ((void *)__addr_tag); \ }) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) -- cgit v1.2.3-59-g8ed1b From 3cd0ddb3deec43bd1cfcdd39d3dde37a0135d0c6 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 1 Mar 2019 14:19:06 +0000 Subject: Revert "arm64: uaccess: Implement unsafe accessors" This reverts commit 0bd3ef34d2a8dd4056560567073d8bfc5da92e39. There is ongoing work on objtool to identify incorrect uses of user_access_{begin,end}. Until this is sorted, do not enable the functionality on arm64. Also, on ARMv8.2 CPUs with hardware PAN and UAO support, there is no obvious performance benefit to the unsafe user accessors. Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 79 ++++++++++------------------------------ 1 file changed, 20 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6a70c75ed9f4..8e408084b8c3 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -270,26 +270,31 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) #define __raw_get_user(x, ptr, err) \ do { \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", (x), (ptr), \ + __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", (x), (ptr), \ + __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", (x), (ptr), \ + __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", (x), (ptr), \ + __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ + uaccess_disable_not_uao(); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -297,13 +302,8 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ - unsigned long __gu_val; \ - __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - uaccess_enable_not_uao(); \ - __raw_get_user(__gu_val, __p, (err)); \ - uaccess_disable_not_uao(); \ - (x) = (__force __typeof__(*__p)) __gu_val; \ + __raw_get_user((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ @@ -334,26 +334,30 @@ do { \ #define __raw_put_user(x, ptr, err) \ do { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", (x), (ptr), \ + __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", (x), (ptr), \ + __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", (x), (ptr), \ + __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", (x), (ptr), \ + __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ + uaccess_disable_not_uao(); \ } while (0) #define __put_user_error(x, ptr, err) \ @@ -361,13 +365,9 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - uaccess_enable_not_uao(); \ - __raw_put_user(__pu_val, __p, (err)); \ - uaccess_disable_not_uao(); \ - } else { \ + __raw_put_user((x), __p, (err)); \ + } else { \ (err) = -EFAULT; \ } \ } while (0) @@ -381,45 +381,6 @@ do { \ #define put_user __put_user -static __must_check inline bool user_access_begin(const void __user *ptr, - size_t len) -{ - if (unlikely(!access_ok(ptr, len))) - return false; - - uaccess_enable_not_uao(); - return true; -} -#define user_access_begin(ptr, len) user_access_begin(ptr, len) -#define user_access_end() uaccess_disable_not_uao() - -#define unsafe_get_user(x, ptr, err) \ -do { \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - unsigned long __gu_val; \ - int __gu_err = 0; \ - might_fault(); \ - __chk_user_ptr(__p); \ - __p = uaccess_mask_ptr(__p); \ - __raw_get_user(__gu_val, __p, __gu_err); \ - (x) = (__force __typeof__(*__p)) __gu_val; \ - if (__gu_err != 0) \ - goto err; \ -} while (0) - -#define unsafe_put_user(x, ptr, err) \ -do { \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - int __pu_err = 0; \ - might_fault(); \ - __chk_user_ptr(__p); \ - __p = uaccess_mask_ptr(__p); \ - __raw_put_user(__pu_val, __p, __pu_err); \ - if (__pu_err != 0) \ - goto err; \ -} while (0) - extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ -- cgit v1.2.3-59-g8ed1b From b9a4b9d084d978f80eb9210727c81804588b42ff Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Mar 2019 13:28:00 +0000 Subject: arm64: debug: Don't propagate UNKNOWN FAR into si_code for debug signals FAR_EL1 is UNKNOWN for all debug exceptions other than those caused by taking a hardware watchpoint. Unfortunately, if a debug handler returns a non-zero value, then we will propagate the UNKNOWN FAR value to userspace via the si_addr field of the SIGTRAP siginfo_t. Instead, let's set si_addr to take on the PC of the faulting instruction, which we have available in the current pt_regs. Cc: Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index efb7b2cbead5..ef46925096f0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -824,11 +824,12 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } -asmlinkage int __exception do_debug_exception(unsigned long addr, +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); + unsigned long pc = instruction_pointer(regs); int rv; /* @@ -838,14 +839,14 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); - if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs))) + if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); - if (!inf->fn(addr, esr, regs)) { + if (!inf->fn(addr_if_watchpoint, esr, regs)) { rv = 1; } else { arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)addr, esr); + inf->sig, inf->code, (void __user *)pc, esr); rv = 0; } -- cgit v1.2.3-59-g8ed1b From 6bd288569b50bc89fa5513031086746968f585cb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Mar 2019 13:28:01 +0000 Subject: arm64: debug: Ensure debug handlers check triggering exception level Debug exception handlers may be called for exceptions generated both by user and kernel code. In many cases, this is checked explicitly, but in other cases things either happen to work by happy accident or they go slightly wrong. For example, executing 'brk #4' from userspace will enter the kprobes code and be ignored, but the instruction will be retried forever in userspace instead of delivering a SIGTRAP. Fix this issue in the most stable-friendly fashion by simply adding explicit checks of the triggering exception level to all of our debug exception handlers. Cc: Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/kgdb.c | 14 ++++++++++---- arch/arm64/kernel/probes/kprobes.c | 6 ++++++ 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index ce46c4cdf368..691854b77c7f 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) { - if (!kgdb_single_step) + if (user_mode(regs) || !kgdb_single_step) return DBG_HOOK_ERROR; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..ea32379de331 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int retval; + if (user_mode(regs)) + return DBG_HOOK_ERROR; + /* return error if this is not our step */ retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); @@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) int __kprobes kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kprobe_handler(regs); return DBG_HOOK_HANDLED; } -- cgit v1.2.3-59-g8ed1b From b855b58ac1b7891b219e1d9ef60c45c774cadefe Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Wed, 13 Feb 2019 12:10:09 +0000 Subject: arm64: mmu: drop paging_init comments The comments could not reflect the code, and it is easy to get what this function does from a straight-line reading of the code. So let's drop the comments Signed-off-by: Peng Fan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d6b6f1b169bb..402b6495ff58 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -654,10 +654,6 @@ static void __init map_kernel(pgd_t *pgdp) kasan_copy_shadow(pgdp); } -/* - * paging_init() sets up the page tables, initialises the zone memory - * maps and sets up the zero page. - */ void __init paging_init(void) { pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); -- cgit v1.2.3-59-g8ed1b