diff options
30 files changed, 247 insertions, 346 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 6493bd479ddc..fe2fb1ddd771 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -26,13 +26,13 @@ #include <asm/cputype.h> /* arm64 compatibility macros */ -#define COMPAT_PSR_MODE_ABT ABT_MODE -#define COMPAT_PSR_MODE_UND UND_MODE -#define COMPAT_PSR_T_BIT PSR_T_BIT -#define COMPAT_PSR_I_BIT PSR_I_BIT -#define COMPAT_PSR_A_BIT PSR_A_BIT -#define COMPAT_PSR_E_BIT PSR_E_BIT -#define COMPAT_PSR_IT_MASK PSR_IT_MASK +#define PSR_AA32_MODE_ABT ABT_MODE +#define PSR_AA32_MODE_UND UND_MODE +#define PSR_AA32_T_BIT PSR_T_BIT +#define PSR_AA32_I_BIT PSR_I_BIT +#define PSR_AA32_A_BIT PSR_A_BIT +#define PSR_AA32_E_BIT PSR_E_BIT +#define PSR_AA32_IT_MASK PSR_IT_MASK unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 42c090cf0292..476de9b1d239 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -42,8 +42,19 @@ config ARM64 select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 3a9b84d39d71..6cd5d77b6b44 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f11518af96a9..822a9192c551 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -128,6 +128,19 @@ do { \ __u.__val; \ }) +#define smp_cond_load_relaxed(ptr, cond_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(__PTR, VAL); \ + } \ + VAL; \ +}) + #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5df5cfe1c143..5ee5bca8c24b 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -21,12 +21,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 #define CTR_IDC_SHIFT 28 #define CTR_DIC_SHIFT 29 +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define ICACHE_POLICY_VPIPT 0 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index d264a7274811..19844211a4e6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -19,6 +19,7 @@ #ifndef __ASM_CACHEFLUSH_H #define __ASM_CACHEFLUSH_H +#include <linux/kgdb.h> #include <linux/mm.h> /* @@ -71,7 +72,7 @@ * - kaddr - page address * - size - region size */ -extern void flush_icache_range(unsigned long start, unsigned long end); +extern void __flush_icache_range(unsigned long start, unsigned long end); extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); @@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + __flush_icache_range(start, end); + + /* + * IPI all online CPUs so that they undergo a context synchronization + * event and are forced to refetch the new instructions. + */ +#ifdef CONFIG_KGDB + /* + * KGDB performs cache maintenance with interrupts disabled, so we + * will deadlock trying to IPI the secondary CPUs. In theory, we can + * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that + * just means that KGDB will elide the maintenance altogether! As it + * turns out, KGDB uses IPIs to round-up the secondary CPUs during + * the patching operation, so we don't need extra IPIs here anyway. + * In which case, add a KGDB-specific bodge and return early. + */ + if (kgdb_connected && irqs_disabled()) + return; +#endif + kick_all_cpus_sync(); +} + static inline void flush_cache_mm(struct mm_struct *mm) { } diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8a699c708fc9..be3bf3d08916 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -49,7 +49,8 @@ #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 +#define ARM64_MISMATCHED_CACHE_TYPE 31 -#define ARM64_NCAPS 31 +#define ARM64_NCAPS 32 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f62c56b1793f..c6802dea6cab 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); -bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); - int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 1dab3a984608..0c97e45d1dc3 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; } /* @@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) u32 mode; if (vcpu_mode_is_32bit(vcpu)) { - mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; - return mode > COMPAT_PSR_MODE_USR; + mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; + return mode > PSR_AA32_MODE_USR; } mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; @@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr |= (1 << 25); @@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); + return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index a73ae1e49200..e02612105d78 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); - regs->pstate = COMPAT_PSR_MODE_USR; + regs->pstate = PSR_AA32_MODE_USR; if (pc & 1) - regs->pstate |= COMPAT_PSR_T_BIT; + regs->pstate |= PSR_AA32_T_BIT; #ifdef __AARCH64EB__ - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; #endif regs->compat_sp = sp; diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 6069d66e0bc2..177b851ca6d9 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -35,36 +35,39 @@ #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 -/* AArch32 CPSR bits */ -#define COMPAT_PSR_MODE_MASK 0x0000001f -#define COMPAT_PSR_MODE_USR 0x00000010 -#define COMPAT_PSR_MODE_FIQ 0x00000011 -#define COMPAT_PSR_MODE_IRQ 0x00000012 -#define COMPAT_PSR_MODE_SVC 0x00000013 -#define COMPAT_PSR_MODE_ABT 0x00000017 -#define COMPAT_PSR_MODE_HYP 0x0000001a -#define COMPAT_PSR_MODE_UND 0x0000001b -#define COMPAT_PSR_MODE_SYS 0x0000001f -#define COMPAT_PSR_T_BIT 0x00000020 -#define COMPAT_PSR_F_BIT 0x00000040 -#define COMPAT_PSR_I_BIT 0x00000080 -#define COMPAT_PSR_A_BIT 0x00000100 -#define COMPAT_PSR_E_BIT 0x00000200 -#define COMPAT_PSR_J_BIT 0x01000000 -#define COMPAT_PSR_Q_BIT 0x08000000 -#define COMPAT_PSR_V_BIT 0x10000000 -#define COMPAT_PSR_C_BIT 0x20000000 -#define COMPAT_PSR_Z_BIT 0x40000000 -#define COMPAT_PSR_N_BIT 0x80000000 -#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ -#define COMPAT_PSR_GE_MASK 0x000f0000 +/* SPSR_ELx bits for exceptions taken from AArch32 */ +#define PSR_AA32_MODE_MASK 0x0000001f +#define PSR_AA32_MODE_USR 0x00000010 +#define PSR_AA32_MODE_FIQ 0x00000011 +#define PSR_AA32_MODE_IRQ 0x00000012 +#define PSR_AA32_MODE_SVC 0x00000013 +#define PSR_AA32_MODE_ABT 0x00000017 +#define PSR_AA32_MODE_HYP 0x0000001a +#define PSR_AA32_MODE_UND 0x0000001b +#define PSR_AA32_MODE_SYS 0x0000001f +#define PSR_AA32_T_BIT 0x00000020 +#define PSR_AA32_F_BIT 0x00000040 +#define PSR_AA32_I_BIT 0x00000080 +#define PSR_AA32_A_BIT 0x00000100 +#define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_DIT_BIT 0x01000000 +#define PSR_AA32_Q_BIT 0x08000000 +#define PSR_AA32_V_BIT 0x10000000 +#define PSR_AA32_C_BIT 0x20000000 +#define PSR_AA32_Z_BIT 0x40000000 +#define PSR_AA32_N_BIT 0x80000000 +#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define PSR_AA32_GE_MASK 0x000f0000 #ifdef CONFIG_CPU_BIG_ENDIAN -#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT +#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT #else -#define COMPAT_PSR_ENDSTATE 0 +#define PSR_AA32_ENDSTATE 0 #endif +/* AArch32 CPSR bits, as seen in AArch32 */ +#define COMPAT_PSR_DIT_BIT 0x00200000 + /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. @@ -111,6 +114,30 @@ #define compat_sp_fiq regs[29] #define compat_lr_fiq regs[30] +static inline unsigned long compat_psr_to_pstate(const unsigned long psr) +{ + unsigned long pstate; + + pstate = psr & ~COMPAT_PSR_DIT_BIT; + + if (psr & COMPAT_PSR_DIT_BIT) + pstate |= PSR_AA32_DIT_BIT; + + return pstate; +} + +static inline unsigned long pstate_to_compat_psr(const unsigned long pstate) +{ + unsigned long psr; + + psr = pstate & ~PSR_AA32_DIT_BIT; + + if (pstate & PSR_AA32_DIT_BIT) + psr |= COMPAT_PSR_DIT_BIT; + + return psr; +} + /* * This struct defines the way the registers are stored on the stack during an * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for @@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs) #ifdef CONFIG_COMPAT #define compat_thumb_mode(regs) \ - (((regs)->pstate & COMPAT_PSR_T_BIT)) + (((regs)->pstate & PSR_AA32_T_BIT)) #else #define compat_thumb_mode(regs) (0) #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 26c5bd7d88d8..38116008d18b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -16,123 +16,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/lse.h> -#include <asm/spinlock_types.h> -#include <asm/processor.h> - -/* - * Spinlock implementation. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - */ - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval, newval; - - asm volatile( - /* Atomically increment the next ticket. */ - ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ -" prfm pstl1strm, %3\n" -"1: ldaxr %w0, %3\n" -" add %w1, %w0, %w5\n" -" stxr %w2, %w1, %3\n" -" cbnz %w2, 1b\n", - /* LSE atomics */ -" mov %w2, %w5\n" -" ldadda %w2, %w0, %3\n" - __nops(3) - ) - - /* Did we get the lock? */ -" eor %w1, %w0, %w0, ror #16\n" -" cbz %w1, 3f\n" - /* - * No: spin on the owner. Send a local event to avoid missing an - * unlock before the exclusive load. - */ -" sevl\n" -"2: wfe\n" -" ldaxrh %w2, %4\n" -" eor %w1, %w2, %w0, lsr #16\n" -" cbnz %w1, 2b\n" - /* We got the lock. Critical section starts here. */ -"3:" - : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) - : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " prfm pstl1strm, %2\n" - "1: ldaxr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 2f\n" - " add %w0, %w0, %3\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" - "2:", - /* LSE atomics */ - " ldr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 1f\n" - " add %w1, %w0, %3\n" - " casa %w0, %w1, %2\n" - " sub %w1, %w1, %3\n" - " eor %w1, %w1, %w0\n" - "1:") - : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) - : "I" (1 << TICKET_SHIFT) - : "memory"); - - return !tmp; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " ldrh %w1, %0\n" - " add %w1, %w1, #1\n" - " stlrh %w1, %0", - /* LSE atomics */ - " mov %w1, #1\n" - " staddlh %w1, %0\n" - __nops(1)) - : "=Q" (lock->owner), "=&r" (tmp) - : - : "memory"); -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.owner == lock.next; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return !arch_spin_value_unlocked(READ_ONCE(*lock)); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - arch_spinlock_t lockval = READ_ONCE(*lock); - return (lockval.next - lockval.owner) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - #include <asm/qrwlock.h> +#include <asm/qspinlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 6b856012c51b..a157ff465e27 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -20,22 +20,7 @@ # error "please don't include this file directly" #endif -#include <linux/types.h> - -#define TICKET_SHIFT 16 - -typedef struct { -#ifdef __AARCH64EB__ - u16 next; - u16 owner; -#else - u16 owner; - u16 next; -#endif -} __aligned(4) arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } - +#include <asm-generic/qspinlock_types.h> #include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index d4707abb2f16..d2c0938ccaec 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -441,8 +441,8 @@ static struct undef_hook swp_hooks[] = { { .instr_mask = 0x0fb00ff0, .instr_val = 0x01000090, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = swp_handler }, { } @@ -521,15 +521,15 @@ static struct undef_hook cp15_barrier_hooks[] = { { .instr_mask = 0x0fff0fdf, .instr_val = 0x0e070f9a, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = cp15barrier_handler, }, { .instr_mask = 0x0fff0fff, .instr_val = 0x0e070f95, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = cp15barrier_handler, }, { } @@ -562,10 +562,10 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) if (big_endian) { insn = "setend be"; - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; } else { insn = "setend le"; - regs->pstate &= ~COMPAT_PSR_E_BIT; + regs->pstate &= ~PSR_AA32_E_BIT; } trace_instruction_emulation(insn, regs->pc); @@ -593,16 +593,16 @@ static struct undef_hook setend_hooks[] = { { .instr_mask = 0xfffffdff, .instr_val = 0xf1010000, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = a32_setend_handler, }, { /* Thumb mode */ .instr_mask = 0x0000fff7, .instr_val = 0x0000b650, - .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK), - .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR), + .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), + .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), .fn = t16_setend_handler, }, {} diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index 6c2b1b4f57c9..fad90e4935fb 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -16,13 +16,14 @@ void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); -static inline void __noreturn cpu_soft_restart(unsigned long el2_switch, - unsigned long entry, unsigned long arg0, unsigned long arg1, - unsigned long arg2) +static inline void __noreturn cpu_soft_restart(unsigned long entry, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) { typeof(__cpu_soft_restart) *restart; - el2_switch = el2_switch && !is_kernel_in_hyp_mode() && + unsigned long el2_switch = !is_kernel_in_hyp_mode() && is_hyp_mode_available(); restart = (void *)__pa_symbol(__cpu_soft_restart); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 1d2b6d768efe..459129712dfa 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -65,12 +65,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) } static bool -has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, - int scope) +has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, + int scope) { + u64 mask = CTR_CACHE_MINLINE_MASK; + + /* Skip matching the min line sizes for cache type check */ + if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE) + mask ^= arm64_ftr_reg_ctrel0.strict_mask; + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != - (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); + return (read_cpuid_cachetype() & mask) != + (arm64_ftr_reg_ctrel0.sys_val & mask); } static void @@ -101,7 +107,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, for (i = 0; i < SZ_2K; i += 0x80) memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); - flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); + __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } static void __install_bp_hardening_cb(bp_hardening_cb_t fn, @@ -613,7 +619,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "Mismatched cache line size", .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, - .matches = has_mismatched_cache_line_size, + .matches = has_mismatched_cache_type, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .cpu_enable = cpu_enable_trap_ctr_access, + }, + { + .desc = "Mismatched cache type", + .capability = ARM64_MISMATCHED_CACHE_TYPE, + .matches = has_mismatched_cache_type, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .cpu_enable = cpu_enable_trap_ctr_access, }, diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f24892a40d2c..4b3dc9fc9c68 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -214,7 +214,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = { * If we have differing I-cache policies, report it as the weakest - VIPT. */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1723,7 +1723,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn) static struct undef_hook mrs_hook = { .instr_mask = 0xfff00000, .instr_val = 0xd5300000, - .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_mask = PSR_AA32_MODE_MASK, .pstate_val = PSR_MODE_EL0t, .fn = emulate_mrs, }; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 816d03c4c913..2b3413549734 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) return __aarch64_insn_write(addr, cpu_to_le32(insn)); } -static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) -{ - if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) - return false; - - return aarch64_insn_is_b(insn) || - aarch64_insn_is_bl(insn) || - aarch64_insn_is_svc(insn) || - aarch64_insn_is_hvc(insn) || - aarch64_insn_is_smc(insn) || - aarch64_insn_is_brk(insn) || - aarch64_insn_is_nop(insn); -} - bool __kprobes aarch64_insn_uses_literal(u32 insn) { /* ldr/ldrsw (literal), prfm */ @@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn) aarch64_insn_is_bcond(insn); } -/* - * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a - * Section B2.6.5 "Concurrent modification and execution of instructions": - * Concurrent modification and execution of instructions can lead to the - * resulting instruction performing any behavior that can be achieved by - * executing any sequence of instructions that can be executed from the - * same Exception level, except where the instruction before modification - * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, - * or SMC instruction. - */ -bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) -{ - return __aarch64_insn_hotpatch_safe(old_insn) && - __aarch64_insn_hotpatch_safe(new_insn); -} - int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; @@ -216,8 +186,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) ret = aarch64_insn_write(tp, insn); if (ret == 0) - flush_icache_range((uintptr_t)tp, - (uintptr_t)tp + AARCH64_INSN_SIZE); + __flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); return ret; } @@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); - /* - * aarch64_insn_patch_text_nosync() calls flush_icache_range(), - * which ends with "dsb; isb" pair guaranteeing global - * visibility. - */ /* Notify other processors with an additional increment. */ atomic_inc(&pp->cpu_count); } else { @@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) return ret; } -static -int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) { struct aarch64_insn_patch patch = { .text_addrs = addrs, @@ -272,34 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) cpu_online_mask); } -int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) -{ - int ret; - u32 insn; - - /* Unsafe to patch multiple instructions without synchronizaiton */ - if (cnt == 1) { - ret = aarch64_insn_read(addrs[0], &insn); - if (ret) - return ret; - - if (aarch64_insn_hotpatch_safe(insn, insns[0])) { - /* - * ARMv8 architecture doesn't guarantee all CPUs see - * the new instruction after returning from function - * aarch64_insn_patch_text_nosync(). So send IPIs to - * all other CPUs to achieve instruction - * synchronization. - */ - ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); - kick_all_cpus_sync(); - return ret; - } - } - - return aarch64_insn_patch_text_sync(addrs, insns, cnt); -} - static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, u32 *maskp, int *shiftp) { diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index f76ea92dff91..f62effc6e064 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -207,8 +207,7 @@ void machine_kexec(struct kimage *kimage) * relocation is complete. */ - cpu_soft_restart(kimage != kexec_crash_image, - reboot_code_buffer_phys, kimage->head, kimage->start, 0); + cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0); BUG(); /* Should never get here. */ } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index e10bc363f533..740b31f77ade 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -177,16 +177,16 @@ static void print_pstate(struct pt_regs *regs) if (compat_user_mode(regs)) { printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", pstate, - pstate & COMPAT_PSR_N_BIT ? 'N' : 'n', - pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z', - pstate & COMPAT_PSR_C_BIT ? 'C' : 'c', - pstate & COMPAT_PSR_V_BIT ? 'V' : 'v', - pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q', - pstate & COMPAT_PSR_T_BIT ? "T32" : "A32", - pstate & COMPAT_PSR_E_BIT ? "BE" : "LE", - pstate & COMPAT_PSR_A_BIT ? 'A' : 'a', - pstate & COMPAT_PSR_I_BIT ? 'I' : 'i', - pstate & COMPAT_PSR_F_BIT ? 'F' : 'f'); + pstate & PSR_AA32_N_BIT ? 'N' : 'n', + pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', + pstate & PSR_AA32_C_BIT ? 'C' : 'c', + pstate & PSR_AA32_V_BIT ? 'V' : 'v', + pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', + pstate & PSR_AA32_T_BIT ? "T32" : "A32", + pstate & PSR_AA32_E_BIT ? "BE" : "LE", + pstate & PSR_AA32_A_BIT ? 'A' : 'a', + pstate & PSR_AA32_I_BIT ? 'I' : 'i', + pstate & PSR_AA32_F_BIT ? 'F' : 'f'); } else { printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", pstate, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 5c338ce5a7fa..42ff28aa5a00 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1076,6 +1076,7 @@ static int compat_gpr_get(struct task_struct *target, break; case 16: reg = task_pt_regs(target)->pstate; + reg = pstate_to_compat_psr(reg); break; case 17: reg = task_pt_regs(target)->orig_x0; @@ -1143,6 +1144,7 @@ static int compat_gpr_set(struct task_struct *target, newregs.pc = reg; break; case 16: + reg = compat_psr_to_pstate(reg); newregs.pstate = reg; break; case 17: @@ -1659,15 +1661,19 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) } /* - * Bits which are always architecturally RES0 per ARM DDI 0487A.h + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a + * We also take into account DIT (bit 24), which is not yet documented, and + * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be + * allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. + * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ - GENMASK_ULL(5, 5)) + (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) static int valid_compat_regs(struct user_pt_regs *regs) { @@ -1675,15 +1681,15 @@ static int valid_compat_regs(struct user_pt_regs *regs) if (!system_supports_mixed_endian_el0()) { if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; else - regs->pstate &= ~COMPAT_PSR_E_BIT; + regs->pstate &= ~PSR_AA32_E_BIT; } if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && - (regs->pstate & COMPAT_PSR_A_BIT) == 0 && - (regs->pstate & COMPAT_PSR_I_BIT) == 0 && - (regs->pstate & COMPAT_PSR_F_BIT) == 0) { + (regs->pstate & PSR_AA32_A_BIT) == 0 && + (regs->pstate & PSR_AA32_I_BIT) == 0 && + (regs->pstate & PSR_AA32_F_BIT) == 0) { return 1; } @@ -1691,11 +1697,11 @@ static int valid_compat_regs(struct user_pt_regs *regs) * Force PSR to a valid 32-bit EL0t, preserving the same bits as * arch/arm. */ - regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | - COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | - COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | - COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | - COMPAT_PSR_T_BIT; + regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | + PSR_AA32_C_BIT | PSR_AA32_V_BIT | + PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | + PSR_AA32_GE_MASK | PSR_AA32_E_BIT | + PSR_AA32_T_BIT; regs->pstate |= PSR_MODE32_BIT; return 0; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 77b91f478995..fca761be18c8 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -243,6 +243,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, int err; sigset_t set; struct compat_aux_sigframe __user *aux; + unsigned long psr; err = get_sigset_t(&set, &sf->uc.uc_sigmask); if (err == 0) { @@ -266,7 +267,9 @@ static int compat_restore_sigframe(struct pt_regs *regs, __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); - __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); + + regs->pstate = compat_psr_to_pstate(psr); /* * Avoid compat_sys_sigreturn() restarting. @@ -372,22 +375,22 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, { compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); compat_ulong_t retcode; - compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT); + compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT); int thumb; /* Check if the handler is written for ARM or Thumb */ thumb = handler & 1; if (thumb) - spsr |= COMPAT_PSR_T_BIT; + spsr |= PSR_AA32_T_BIT; else - spsr &= ~COMPAT_PSR_T_BIT; + spsr &= ~PSR_AA32_T_BIT; /* The IT state must be cleared for both ARM and Thumb-2 */ - spsr &= ~COMPAT_PSR_IT_MASK; + spsr &= ~PSR_AA32_IT_MASK; /* Restore the original endianness */ - spsr |= COMPAT_PSR_ENDSTATE; + spsr |= PSR_AA32_ENDSTATE; if (ka->sa.sa_flags & SA_RESTORER) { retcode = ptr_to_compat(ka->sa.sa_restorer); @@ -414,6 +417,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct compat_aux_sigframe __user *aux; + unsigned long psr = pstate_to_compat_psr(regs->pstate); int err = 0; __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); @@ -432,7 +436,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); - __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); /* set the compat FSR WnR */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d5718a060672..e160ca123da3 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -56,6 +56,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + if (frame->fp <= fp) + return -EINVAL; + #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 56a0260ceb11..cdd4d9d6d575 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { - u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; + u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_USR: - case COMPAT_PSR_MODE_FIQ: - case COMPAT_PSR_MODE_IRQ: - case COMPAT_PSR_MODE_SVC: - case COMPAT_PSR_MODE_ABT: - case COMPAT_PSR_MODE_UND: + case PSR_AA32_MODE_USR: + case PSR_AA32_MODE_FIQ: + case PSR_AA32_MODE_IRQ: + case PSR_AA32_MODE_SVC: + case PSR_AA32_MODE_ABT: + case PSR_AA32_MODE_UND: case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 39be799d0417..215c7c0eb3b0 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -27,7 +27,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT); + return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT); return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); } diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c index eefe403a2e63..7a5173ea2276 100644 --- a/arch/arm64/kvm/regmap.c +++ b/arch/arm64/kvm/regmap.c @@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) { unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; - unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: + case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC: mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ break; - case COMPAT_PSR_MODE_ABT: + case PSR_AA32_MODE_ABT: mode = 4; break; - case COMPAT_PSR_MODE_UND: + case PSR_AA32_MODE_UND: mode = 5; break; - case COMPAT_PSR_MODE_SYS: + case PSR_AA32_MODE_SYS: mode = 0; /* SYS maps to USR */ break; @@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) */ static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu) { - unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC; - case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT; - case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND; - case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ; - case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ; + case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC; + case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT; + case PSR_AA32_MODE_UND: return KVM_SPSR_UND; + case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ; + case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ; default: BUG(); } } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index a74311beda35..4e4aedaf7ab7 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = { }; static const struct kvm_regs default_regs_reset32 = { - .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT | - COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), + .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | + PSR_AA32_I_BIT | PSR_AA32_F_BIT), }; static bool cpu_has_32bit_el1(void) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 30334d81b021..0c22ede52f90 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -35,7 +35,7 @@ * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(flush_icache_range) +ENTRY(__flush_icache_range) /* FALLTHROUGH */ /* @@ -77,7 +77,7 @@ alternative_else_nop_endif 9: mov x0, #-EFAULT b 1b -ENDPROC(flush_icache_range) +ENDPROC(__flush_icache_range) ENDPROC(__flush_cache_user_range) /* diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 02b18f8b2905..24d786fc3a4c 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -10,18 +10,7 @@ static int ptdump_show(struct seq_file *m, void *v) ptdump_walk_pgd(m, info); return 0; } - -static int ptdump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ptdump_show, inode->i_private); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump); int ptdump_debugfs_register(struct ptdump_info *info, const char *name) { diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index efc84cbe8277..5abbe9b3c652 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) { unsigned long itbits, cond; unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); + bool is_arm = !(cpsr & PSR_AA32_T_BIT); - if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK)) + if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) return; cond = (cpsr & 0xe000) >> 13; @@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) else itbits = (itbits << 1) & 0x1f; - cpsr &= ~COMPAT_PSR_IT_MASK; + cpsr &= ~PSR_AA32_IT_MASK; cpsr |= cond << 13; cpsr |= (itbits & 0x1c) << (10 - 2); cpsr |= (itbits & 0x3) << 25; @@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) { bool is_thumb; - is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); if (is_thumb && !is_wide_instr) *vcpu_pc(vcpu) += 2; else @@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) { unsigned long cpsr; unsigned long new_spsr_value = *vcpu_cpsr(vcpu); - bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); + bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT); u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - cpsr = mode | COMPAT_PSR_I_BIT; + cpsr = mode | PSR_AA32_I_BIT; if (sctlr & (1 << 30)) - cpsr |= COMPAT_PSR_T_BIT; + cpsr |= PSR_AA32_T_BIT; if (sctlr & (1 << 25)) - cpsr |= COMPAT_PSR_E_BIT; + cpsr |= PSR_AA32_E_BIT; *vcpu_cpsr(vcpu) = cpsr; @@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { - prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); } /* @@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, fsr = &vcpu_cp15(vcpu, c5_DFSR); } - prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); + prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset); *far = addr; |