aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig11
-rw-r--r--arch/arm64/include/asm/acpi.h5
-rw-r--r--arch/arm64/include/asm/atomic.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h33
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h9
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h20
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/ftrace.c3
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kernel/vdso32/Makefile8
-rw-r--r--arch/arm64/kvm/aarch32.c28
-rw-r--r--arch/arm64/kvm/arm.c25
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c65
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/pmu.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c25
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c10
-rw-r--r--arch/m68k/coldfire/pci.c4
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/include/asm/uaccess_no.h6
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h52
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h11
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile5
-rw-r--r--arch/mips/kvm/emulate.c503
-rw-r--r--arch/mips/kvm/entry.c19
-rw-r--r--arch/mips/kvm/interrupt.c93
-rw-r--r--arch/mips/kvm/interrupt.h14
-rw-r--r--arch/mips/kvm/loongson_ipi.c214
-rw-r--r--arch/mips/kvm/mips.c47
-rw-r--r--arch/mips/kvm/tlb.c41
-rw-r--r--arch/mips/kvm/trap_emul.c3
-rw-r--r--arch/mips/kvm/vz.c237
-rw-r--r--arch/nios2/kernel/signal.c1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h16
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h27
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s.h2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c18
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv.c75
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c14
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c72
-rw-r--r--arch/powerpc/kvm/book3s_pr.c30
-rw-r--r--arch/powerpc/kvm/booke.c36
-rw-r--r--arch/powerpc/kvm/booke.h8
-rw-r--r--arch/powerpc/kvm/booke_emulate.c2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c15
-rw-r--r--arch/powerpc/kvm/emulate.c10
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c32
-rw-r--r--arch/powerpc/kvm/powerpc.c72
-rw-r--r--arch/powerpc/kvm/trace_hv.h6
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c4
-rw-r--r--arch/riscv/Kconfig74
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/processor.h13
-rw-r--r--arch/riscv/include/asm/smp.h3
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h79
-rw-r--r--arch/riscv/include/asm/vdso/processor.h19
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/kernel/cpu.c16
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/irq.c33
-rw-r--r--arch/riscv/kernel/patch.c1
-rw-r--r--arch/riscv/kernel/smp.c11
-rw-r--r--arch/riscv/kernel/time.c9
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/vdso.c24
-rw-r--r--arch/riscv/kernel/vdso/Makefile12
-rw-r--r--arch/riscv/kernel/vdso/clock_getres.S18
-rw-r--r--arch/riscv/kernel/vdso/clock_gettime.S18
-rw-r--r--arch/riscv/kernel/vdso/gettimeofday.S18
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kernel/vdso/vgettimeofday.c25
-rw-r--r--arch/riscv/mm/init.c11
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/entry/Makefile8
-rw-r--r--arch/x86/entry/calling.h25
-rw-r--r--arch/x86/entry/common.c440
-rw-r--r--arch/x86/entry/entry_32.S485
-rw-r--r--arch/x86/entry/entry_64.S840
-rw-r--r--arch/x86/entry/entry_64_compat.S55
-rw-r--r--arch/x86/entry/thunk_64.S14
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/hyperv/hv_init.c9
-rw-r--r--arch/x86/include/asm/acrn.h11
-rw-r--r--arch/x86/include/asm/amd_nb.h1
-rw-r--r--arch/x86/include/asm/apic.h33
-rw-r--r--arch/x86/include/asm/atomic.h31
-rw-r--r--arch/x86/include/asm/atomic64_32.h9
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/bug.h3
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h12
-rw-r--r--arch/x86/include/asm/debugreg.h48
-rw-r--r--arch/x86/include/asm/desc.h52
-rw-r--r--arch/x86/include/asm/entry_arch.h56
-rw-r--r--arch/x86/include/asm/hw_irq.h22
-rw-r--r--arch/x86/include/asm/idtentry.h652
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/irq.h15
-rw-r--r--arch/x86/include/asm/irq_regs.h32
-rw-r--r--arch/x86/include/asm/irq_stack.h53
-rw-r--r--arch/x86/include/asm/irq_work.h1
-rw-r--r--arch/x86/include/asm/irqflags.h54
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/mce.h30
-rw-r--r--arch/x86/include/asm/mshyperv.h13
-rw-r--r--arch/x86/include/asm/nospec-branch.h4
-rw-r--r--arch/x86/include/asm/pgtable_32.h7
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/set_memory.h19
-rw-r--r--arch/x86/include/asm/special_insns.h22
-rw-r--r--arch/x86/include/asm/text-patching.h11
-rw-r--r--arch/x86/include/asm/trace/common.h4
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h17
-rw-r--r--arch/x86/include/asm/trapnr.h31
-rw-r--r--arch/x86/include/asm/traps.h118
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h8
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h18
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/alternative.c25
-rw-r--r--arch/x86/kernel/amd_nb.c5
-rw-r--r--arch/x86/kernel/apic/apic.c41
-rw-r--r--arch/x86/kernel/apic/msi.c3
-rw-r--r--arch/x86/kernel/apic/vector.c5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/acrn.c9
-rw-r--r--arch/x86/kernel/cpu/bugs.c92
-rw-r--r--arch/x86/kernel/cpu/common.c19
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c238
-rw-r--r--arch/x86/kernel/cpu/mce/core.c206
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c8
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c4
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h12
-rw-r--r--arch/x86/kernel/cpu/mce/p5.c8
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c6
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c5
-rw-r--r--arch/x86/kernel/cpu/mce/threshold.c5
-rw-r--r--arch/x86/kernel/cpu/mce/winchip.c8
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c22
-rw-r--r--arch/x86/kernel/doublefault_32.c10
-rw-r--r--arch/x86/kernel/dumpstack_64.c7
-rw-r--r--arch/x86/kernel/e820.c10
-rw-r--r--arch/x86/kernel/ftrace_64.S2
-rw-r--r--arch/x86/kernel/head_64.S7
-rw-r--r--arch/x86/kernel/hw_breakpoint.c100
-rw-r--r--arch/x86/kernel/idt.c226
-rw-r--r--arch/x86/kernel/irq.c66
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c6
-rw-r--r--arch/x86/kernel/irq_work.c6
-rw-r--r--arch/x86/kernel/kprobes/core.c7
-rw-r--r--arch/x86/kernel/kprobes/opt.c4
-rw-r--r--arch/x86/kernel/kvm.c16
-rw-r--r--arch/x86/kernel/nmi.c75
-rw-r--r--arch/x86/kernel/process.c28
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/smp.c37
-rw-r--r--arch/x86/kernel/sys_ia32.c40
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/tracepoint.c17
-rw-r--r--arch/x86/kernel/traps.c548
-rw-r--r--arch/x86/kernel/unwind_frame.c8
-rw-r--r--arch/x86/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/debugfs.c10
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/i8254.c1
-rw-r--r--arch/x86/kvm/svm/nested.c2
-rw-r--r--arch/x86/kvm/svm/svm.c6
-rw-r--r--arch/x86/kvm/vmx/nested.c84
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c40
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c139
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/mm/cpu_entry_area.c1
-rw-r--r--arch/x86/mm/extable.c15
-rw-r--r--arch/x86/mm/fault.c78
-rw-r--r--arch/x86/mm/pti.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/purgatory/.gitignore1
-rw-r--r--arch/x86/purgatory/Makefile21
-rw-r--r--arch/x86/realmode/Makefile3
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/xen/enlighten_hvm.c12
-rw-r--r--arch/x86/xen/enlighten_pv.c52
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--arch/x86/xen/smp_pv.c3
-rw-r--r--arch/x86/xen/suspend_hvm.c3
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-asm_64.S44
-rw-r--r--arch/x86/xen/xen-ops.h2
219 files changed, 5040 insertions, 3392 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7f9d38444d6d..8a46ed3ab429 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1299,6 +1299,14 @@ config COMPAT_VDSO
You must have a 32-bit build of glibc 2.22 or later for programs
to seamlessly take advantage of this.
+config THUMB2_COMPAT_VDSO
+ bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
+ depends on COMPAT_VDSO
+ default y
+ help
+ Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
+ otherwise with '-marm'.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on SYSCTL
@@ -1740,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING
endif
config RELOCATABLE
- bool
+ bool "Build a relocatable kernel image" if EXPERT
select ARCH_HAS_RELR
+ default y
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index b263e239cb59..a45366c3909b 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
+#include <linux/stddef.h>
#include <asm/cputype.h>
#include <asm/io.h>
@@ -31,14 +32,14 @@
* is therefore used to delimit the MADT GICC structure minimum length
* appropriately.
*/
-#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
+#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
(unsigned long)(entry) + (entry)->header.length > (end))
-#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
+#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16))
/* Basic configuration for ACPI */
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9543b5e0534d..a08890da696c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define ATOMIC_INIT(i) { (i) }
-#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 0c9b5fc4ba0a..352aaebf4198 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
-/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+/*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s) \
+ ({ \
+ typeof(s) *addr; \
+ asm("adrp %0, %1\n" \
+ "add %0, %0, :lo12:%1\n" \
+ : "=r" (addr) : "S" (&s)); \
+ addr; \
+ })
+
+/*
+ * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
+ * provided that sym is really a *symbol* and not a pointer obtained from
+ * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
+ * sparse quiet.
+ */
#define __hyp_this_cpu_ptr(sym) \
({ \
- void *__ptr = hyp_symbol_addr(sym); \
+ void *__ptr; \
+ __verify_pcpu_ptr(&sym); \
+ __ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
- (typeof(&sym))__ptr; \
+ (typeof(sym) __kernel __force *)__ptr; \
})
#define __hyp_this_cpu_read(sym) \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6ea53e6e8b26..4d0f8ea600ba 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
-{
- if (vcpu_has_ptrauth(vcpu))
- vcpu_ptrauth_disable(vcpu);
-}
-
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index abbdf9703e20..c3e6fcc664b1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 094260aaafdd..b12bfc1f051a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -108,26 +108,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
- * Obtain the PC-relative address of a kernel symbol
- * s: symbol
- *
- * The goal of this macro is to return a symbol's address based on a
- * PC-relative computation, as opposed to a loading the VA from a
- * constant pool or something similar. This works well for HYP, as an
- * absolute VA is guaranteed to be wrong. Only use this if trying to
- * obtain the address of a symbol (i.e. not something you obtained by
- * following a pointer).
- */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr; \
- asm("adrp %0, %1\n" \
- "add %0, %0, :lo12:%1\n" \
- : "=r" (addr) : "S" (&s)); \
- addr; \
- })
-
-/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
*/
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 15e80c876d46..5df49366e9ab 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu)
return 0;
}
-static int debug_monitors_init(void)
+static int __init debug_monitors_init(void)
{
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
"arm64/debug_monitors:starting",
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8618faa82e6d..86a5cf9bc19a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
- if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
+ if (addr == FTRACE_REGS_ADDR &&
+ IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
#endif
return NULL;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3fd2c11c09fc..93b3844cf442 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p)
xen_early_init();
efi_init();
+
+ if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
+ pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+
arm64_memblock_init();
paging_init();
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 3964738ebbde..7ea1e827e505 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*'
VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
VDSO_CFLAGS += -Wno-int-to-pointer-cast
+# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
+# unreliable.
+ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
+VDSO_CFLAGS += -mthumb -fomit-frame-pointer
+else
+VDSO_CFLAGS += -marm
+endif
+
VDSO_AFLAGS := $(VDSO_CAFLAGS)
VDSO_AFLAGS += -D__ASSEMBLY__
diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c
index 0a356aa91aa1..40a62a99fbf8 100644
--- a/arch/arm64/kvm/aarch32.c
+++ b/arch/arm64/kvm/aarch32.c
@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};
+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.sysregs_loaded_on_cpu) {
+ kvm_arch_vcpu_put(vcpu);
+ return true;
+ }
+
+ preempt_enable();
+ return false;
+}
+
+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
+{
+ if (loaded) {
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+ }
+}
+
/*
* When an exception is taken, most CPSR fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
+ bool loaded = pre_fault_synchronize(vcpu);
+
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
+ post_fault_synchronize(vcpu, loaded);
}
/*
@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
+ bool loaded;
+
+ loaded = pre_fault_synchronize(vcpu);
if (is_pabt) {
vect_offset = 12;
@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}
+
+ post_fault_synchronize(vcpu, loaded);
}
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7a57381c05e8..90cb90561446 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -144,11 +144,6 @@ out_fail_alloc:
return ret;
}
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -340,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
@@ -370,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps(vcpu);
- vcpu_ptrauth_setup_lazy(vcpu);
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_disable(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -990,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Ensure a rebooted VM will fault in RAM pages and detect if the
* guest MMU is turned off and flush the caches as needed.
*
- * S2FWB enforces all memory accesses to RAM being cacheable, we
- * ensure that the cache is always coherent.
+ * S2FWB enforces all memory accesses to RAM being cacheable,
+ * ensuring that the data side is always coherent. We still
+ * need to invalidate the I-cache though, as FWB does *not*
+ * imply CTR_EL0.DIC.
*/
- if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
- stage2_unmap_vm(vcpu->kvm);
+ if (vcpu->arch.has_run_once) {
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+ stage2_unmap_vm(vcpu->kvm);
+ else
+ __flush_icache_all();
+ }
vcpu_reset_hcr(vcpu);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index eb194696ef62..5a02d4c90559 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-#define __ptrauth_save_key(regs, key) \
-({ \
- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
-})
-
-/*
- * Handle the guest trying to use a ptrauth instruction, or trying to access a
- * ptrauth register.
- */
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpu_context *ctxt;
-
- if (vcpu_has_ptrauth(vcpu)) {
- vcpu_ptrauth_enable(vcpu);
- ctxt = vcpu->arch.host_cpu_context;
- __ptrauth_save_key(ctxt->sys_regs, APIA);
- __ptrauth_save_key(ctxt->sys_regs, APIB);
- __ptrauth_save_key(ctxt->sys_regs, APDA);
- __ptrauth_save_key(ctxt->sys_regs, APDB);
- __ptrauth_save_key(ctxt->sys_regs, APGA);
- } else {
- kvm_inject_undefined(vcpu);
- }
-}
-
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
- * a NOP).
+ * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
+ * that we can do is give the guest an UNDEF.
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
+ kvm_inject_undefined(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 676b6585e5ae..db1c4487d95d 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
+{
+ u32 ec = ESR_ELx_EC(esr);
+
+ if (ec == ESR_ELx_EC_PAC)
+ return true;
+
+ if (ec != ESR_ELx_EC_SYS64)
+ return false;
+
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_APIAKEYLO_EL1:
+ case SYS_APIAKEYHI_EL1:
+ case SYS_APIBKEYLO_EL1:
+ case SYS_APIBKEYHI_EL1:
+ case SYS_APDAKEYLO_EL1:
+ case SYS_APDAKEYHI_EL1:
+ case SYS_APDBKEYLO_EL1:
+ case SYS_APDBKEYHI_EL1:
+ case SYS_APGAKEYLO_EL1:
+ case SYS_APGAKEYHI_EL1:
+ return true;
+ }
+
+ return false;
+}
+
+#define __ptrauth_save_key(regs, key) \
+({ \
+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+})
+
+static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *ctxt;
+ u64 val;
+
+ if (!vcpu_has_ptrauth(vcpu) ||
+ !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
+ return false;
+
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ __ptrauth_save_key(ctxt->sys_regs, APIA);
+ __ptrauth_save_key(ctxt->sys_regs, APIB);
+ __ptrauth_save_key(ctxt->sys_regs, APDA);
+ __ptrauth_save_key(ctxt->sys_regs, APDB);
+ __ptrauth_save_key(ctxt->sys_regs, APGA);
+
+ vcpu_ptrauth_enable(vcpu);
+
+ val = read_sysreg(hcr_el2);
+ val |= (HCR_API | HCR_APK);
+ write_sysreg(val, hcr_el2);
+
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (__hyp_handle_fpsimd(vcpu))
return true;
+ if (__hyp_handle_ptrauth(vcpu))
+ return true;
+
if (!__populate_fault_info(vcpu))
return true;
@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index ea5d22fbdacf..cc7e957f5b2c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
- ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
@@ -123,7 +122,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
isb();
}
- write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
@@ -267,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -303,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 80985439bfb2..baf5ce9225ce 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
switch (reg) {
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
- case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break;
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
@@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
switch (reg) {
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
- case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break;
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
@@ -1034,16 +1032,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
-
/*
- * Return false for both cases as we never skip the trapped
- * instruction:
- *
- * - Either we re-execute the same key register access instruction
- * after enabling ptrauth.
- * - Or an UNDEF is injected as ptrauth is not supported/enabled.
+ * If we land here, that is because we didn't fixup the access on exit
+ * by allowing the PtrAuth sysregs. The only way this happens is when
+ * the guest does not have PtrAuth support enabled.
*/
+ kvm_inject_undefined(vcpu);
+
return false;
}
@@ -1319,10 +1314,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ int reg = r->reg;
+
+ /* See the 32bit mapping in kvm_host.h */
+ if (p->is_aarch32)
+ reg = r->reg / 2;
+
if (p->is_write)
- vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+ vcpu_write_sys_reg(vcpu, p->regval, reg);
else
- p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+ p->regval = vcpu_read_sys_reg(vcpu, reg);
return true;
}
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 9cb6b4c8355a..aa9d356451eb 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
+
+ if (p->is_aarch32) {
+ if (r->Op2 & 2)
+ p->regval = upper_32_bits(p->regval);
+ else
+ p->regval = lower_32_bits(p->regval);
+ }
+
return true;
}
@@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = {
/* ACTLR */
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
access_actlr },
+ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011),
+ access_actlr },
};
static struct kvm_sys_reg_target_table genericv8_target_table = {
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 62b0eb6cf69a..84eab0f5e00a 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void)
/* Keep a virtual mapping to IO/config space active */
iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
- if (iospace == 0)
+ if (iospace == 0) {
+ pci_free_host_bridge(bridge);
return -ENODEV;
+ }
pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
(u32) iospace);
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 27fa9465d19d..2b746f55f419 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -48,7 +48,6 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_ROM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index a24cfe4a0d32..dcfb69361408 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -42,7 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
__put_user_asm(__pu_err, __pu_val, ptr, l); \
break; \
case 8: \
- memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
+ memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \
break; \
default: \
__pu_err = __put_user_bad(); \
@@ -60,7 +60,7 @@ extern int __put_user_bad(void);
* aliasing issues.
*/
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __user *)(x))
#define __put_user_asm(err,x,ptr,bwl) \
__asm__ ("move" #bwl " %0,%1" \
@@ -85,7 +85,7 @@ extern int __put_user_bad(void);
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
- memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l)); \
+ memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \
(x) = __gu_val.t; \
break; \
} \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index eb1e86c30d0c..26c63e8161f0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1403,6 +1403,7 @@ config CPU_LOONGSON64
select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
select SWIOTLB
+ select HAVE_KVM
help
The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
cores implements the MIPS64R2 instruction set with many extensions,
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index caecbae4b599..724dfddcab92 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -682,6 +682,9 @@
#ifndef cpu_guest_has_htw
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
#endif
+#ifndef cpu_guest_has_ldpte
+#define cpu_guest_has_ldpte (cpu_data[0].guest.options & MIPS_CPU_LDPTE)
+#endif
#ifndef cpu_guest_has_mvh
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
#endif
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e28b5a946e26..363e7a89d173 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -23,6 +23,8 @@
#include <asm/inst.h>
#include <asm/mipsregs.h>
+#include <kvm/iodev.h>
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
@@ -66,9 +68,11 @@
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
+#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
@@ -78,8 +82,8 @@
#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
-#define KVM_MAX_VCPUS 8
-#define KVM_USER_MEM_SLOTS 8
+#define KVM_MAX_VCPUS 16
+#define KVM_USER_MEM_SLOTS 16
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
@@ -171,6 +175,9 @@ struct kvm_vcpu_stat {
u64 vz_ghfc_exits;
u64 vz_gpa_exits;
u64 vz_resvd_exits;
+#ifdef CONFIG_CPU_LOONGSON64
+ u64 vz_cpucfg_exits;
+#endif
#endif
u64 halt_successful_poll;
u64 halt_attempted_poll;
@@ -183,11 +190,39 @@ struct kvm_vcpu_stat {
struct kvm_arch_memory_slot {
};
+#ifdef CONFIG_CPU_LOONGSON64
+struct ipi_state {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[4];
+};
+
+struct loongson_kvm_ipi;
+
+struct ipi_io_device {
+ int node_id;
+ struct loongson_kvm_ipi *ipi;
+ struct kvm_io_device device;
+};
+
+struct loongson_kvm_ipi {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct ipi_state ipistate[16];
+ struct ipi_io_device dev_ipi[4];
+};
+#endif
+
struct kvm_arch {
/* Guest physical mm */
struct mm_struct gpa_mm;
/* Mask of CPUs needing GPA ASID flush */
cpumask_t asid_flush_mask;
+#ifdef CONFIG_CPU_LOONGSON64
+ struct loongson_kvm_ipi ipi;
+#endif
};
#define N_MIPS_COPROC_REGS 32
@@ -225,6 +260,7 @@ struct mips_coproc {
#define MIPS_CP0_WATCH_LO 18
#define MIPS_CP0_WATCH_HI 19
#define MIPS_CP0_TLB_XCONTEXT 20
+#define MIPS_CP0_DIAG 22
#define MIPS_CP0_ECC 26
#define MIPS_CP0_CACHE_ERR 27
#define MIPS_CP0_TAG_LO 28
@@ -276,8 +312,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0
+#if defined(CONFIG_64BIT)
+#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
+#else
#define VPN2_MASK 0xffffe000
-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
+#endif
+#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
@@ -892,6 +932,10 @@ void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void);
+void kvm_loongson_clear_guest_ftlb(void);
+#endif
#endif
void kvm_mips_suspend_mm(int cpu);
@@ -1131,6 +1175,8 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
/* Misc */
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 796dbb86575b..20d6d40c59a4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1038,6 +1038,8 @@
/* Disable Branch Return Cache */
#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22)
+/* Flush BTB */
+#define LOONGSON_DIAG_BTB (_ULCAST_(1) << 1)
/* Flush ITLB */
#define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2)
/* Flush DTLB */
@@ -2874,7 +2876,9 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config6)
__BUILD_SET_C0(config7)
+__BUILD_SET_C0(diag)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 98f97c85e059..43d1faa02933 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -1012,6 +1012,16 @@ struct loongson3_lsdc2_format { /* Loongson-3 overridden ldc2/sdc2 Load/Store fo
;))))))
};
+struct loongson3_lscsr_format { /* Loongson-3 CPUCFG&CSR read/write format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
/*
* MIPS16e instruction formats (16-bit length)
*/
@@ -1114,6 +1124,7 @@ union mips_instruction {
struct mm16_r5_format mm16_r5_format;
struct loongson3_lswc2_format loongson3_lswc2_format;
struct loongson3_lsdc2_format loongson3_lsdc2_format;
+ struct loongson3_lscsr_format loongson3_lscsr_format;
};
union mips16e_instruction {
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6b93162d7c5a..def1659fe262 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -2017,8 +2017,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
if (cfg2 & LOONGSON_CFG2_LEXT2)
c->ases |= MIPS_ASE_LOONGSON_EXT2;
- if (cfg2 & LOONGSON_CFG2_LSPW)
+ if (cfg2 & LOONGSON_CFG2_LSPW) {
c->options |= MIPS_CPU_LDPTE;
+ c->guest.options |= MIPS_CPU_LDPTE;
+ }
if (cfg3 & LOONGSON_CFG3_LCAMP)
c->ases |= MIPS_ASE_LOONGSON_CAM;
@@ -2074,6 +2076,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
break;
case PRID_IMP_LOONGSON_64G:
c->cputype = CPU_LOONGSON64;
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b91d145aa2d5..d697752a5723 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select EXPORT_UASM
select PREEMPT_NOTIFIERS
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select HAVE_KVM_EVENTFD
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select MMU_NOTIFIER
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 01affc1d21c5..506c4ac0ba1c 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
@@ -13,6 +13,9 @@ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o
+ifdef CONFIG_CPU_LOONGSON64
+kvm-objs += loongson_ipi.o
+endif
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 7ccf9b096783..5ae82d925197 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1600,9 +1600,11 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
u32 rt;
void *data = run->mmio.data;
+ unsigned int imme;
unsigned long curr_pc;
/*
@@ -1660,15 +1662,229 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
+ case swl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
+ (vcpu->arch.gprs[rt] >> 24);
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
+ (vcpu->arch.gprs[rt] >> 16);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
+ (vcpu->arch.gprs[rt] >> 8);
+ break;
+ case 3:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case swr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case sdl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
+ ((vcpu->arch.gprs[rt] >> 56) & 0xff);
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
+ ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
+ ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
+ ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
+ ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
+ ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
+ ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
+ break;
+ case 7:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+ case sdr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
+ (vcpu->arch.gprs[rt] << 32);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
+ (vcpu->arch.gprs[rt] << 40);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
+ (vcpu->arch.gprs[rt] << 48);
+ break;
+ case 7:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
+ (vcpu->arch.gprs[rt] << 56);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case sdc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x0 gssbx: store 1 bytes from GPR
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+ default:
+ kvm_err("Godson Exteneded GS-Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
goto out_fail;
}
- run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
+ run->mmio.is_write = 1;
vcpu->mmio_is_write = 1;
+
+ r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, data);
+
+ if (!r) {
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
out_fail:
@@ -1681,9 +1897,11 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
+ unsigned int imme;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
@@ -1736,6 +1954,162 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
run->mmio.len = 1;
break;
+ case lwl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 3; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 4; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 5; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 6; /* 4 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case lwr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 7; /* 4 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 8; /* 3 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 9; /* 2 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 10; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 11; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 12; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 13; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 14; /* 4 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 15; /* 5 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 16; /* 6 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 17; /* 7 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 18; /* 8 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 19; /* 8 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 20; /* 7 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 21; /* 6 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 22; /* 5 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 23; /* 4 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 24; /* 3 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 25; /* 2 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 26; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case ldc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x0 gslbx: store 1 bytes from GPR
+ * 0x1 gslhx: store 2 bytes from GPR
+ * 0x2 gslwx: store 4 bytes from GPR
+ * 0x3 gsldx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 27; /* signed */
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 28; /* signed */
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 29; /* signed */
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ vcpu->mmio_needed = 30; /* signed */
+ break;
+ default:
+ kvm_err("Godson Exteneded GS-Load for float not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
+
default:
kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
@@ -1745,6 +2119,16 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
+
+ r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, run->mmio.data);
+
+ if (!r) {
+ kvm_mips_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
}
@@ -2591,28 +2975,125 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 8:
- *gpr = *(s64 *)run->mmio.data;
+ switch (vcpu->mmio_needed) {
+ case 11:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xff) << 56);
+ break;
+ case 12:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffff) << 48);
+ break;
+ case 13:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
+ break;
+ case 14:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
+ break;
+ case 15:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
+ break;
+ case 16:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
+ break;
+ case 17:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
+ break;
+ case 18:
+ case 19:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 20:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
+ break;
+ case 21:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
+ break;
+ case 22:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
+ break;
+ case 23:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
+ ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
+ break;
+ case 24:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
+ ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
+ break;
+ case 25:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
+ ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
+ break;
+ case 26:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
+ ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
+ break;
+ default:
+ *gpr = *(s64 *)run->mmio.data;
+ }
break;
case 4:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s32 *)run->mmio.data;
- else
+ switch (vcpu->mmio_needed) {
+ case 1:
*gpr = *(u32 *)run->mmio.data;
+ break;
+ case 2:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 3:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s32 *)run->mmio.data) & 0xff) << 24);
+ break;
+ case 4:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s32 *)run->mmio.data) & 0xffff) << 16);
+ break;
+ case 5:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
+ break;
+ case 6:
+ case 7:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 8:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
+ ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
+ break;
+ case 9:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
+ ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
+ break;
+ case 10:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
+ ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
+ break;
+ default:
+ *gpr = *(s32 *)run->mmio.data;
+ }
break;
case 2:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s16 *) run->mmio.data;
- else
+ if (vcpu->mmio_needed == 1)
*gpr = *(u16 *)run->mmio.data;
+ else
+ *gpr = *(s16 *)run->mmio.data;
break;
case 1:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s8 *) run->mmio.data;
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u8 *)run->mmio.data;
else
- *gpr = *(u8 *) run->mmio.data;
+ *gpr = *(s8 *)run->mmio.data;
break;
}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..fd716942e302 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -56,6 +56,7 @@
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
+#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
@@ -307,7 +308,10 @@ static void *kvm_mips_build_enter_guest(void *addr)
#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
- UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(&p, K0, C0_PWBASE);
+ else
+ UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
@@ -469,8 +473,10 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+#endif
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -490,6 +496,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
preempt_disable();
+#ifdef CONFIG_CPU_LOONGSON64
+ UASM_i_MFC0(&p, K1, C0_PGD);
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+#else
/*
* Now for the actual refill bit. A lot of this can be common with the
* Linux TLB refill handler, however we don't need to handle so many
@@ -512,6 +528,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
preempt_enable();
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 7257e8b6f5a9..d28c2c9c343e 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -61,27 +61,8 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
* the EXC code will be set when we are actually
* delivering the interrupt:
*/
- switch (intr) {
- case 2:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
+ kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -89,26 +70,8 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
{
int intr = (int)irq->irq;
- switch (intr) {
- case -2:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
+ kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
/* Deliver the interrupt of the corresponding priority, if possible. */
@@ -116,50 +79,20 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
int allowed = 0;
- u32 exccode;
+ u32 exccode, ie;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
- switch (priority) {
- case MIPS_EXC_INT_TIMER:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IO:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_1:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_2:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
+ if (priority == MIPS_EXC_MAX)
+ return 0;
- default:
- break;
+ ie = 1 << (kvm_priority_to_irq[priority] + 8);
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & ie)) {
+ allowed = 1;
+ exccode = EXCCODE_INT;
}
/* Are we allowed to deliver the interrupt ??? */
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 3bf0a49725e8..c3e878ca3e07 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -21,11 +21,12 @@
#define MIPS_EXC_NMI 5
#define MIPS_EXC_MCHK 6
#define MIPS_EXC_INT_TIMER 7
-#define MIPS_EXC_INT_IO 8
-#define MIPS_EXC_EXECUTE 9
-#define MIPS_EXC_INT_IPI_1 10
-#define MIPS_EXC_INT_IPI_2 11
-#define MIPS_EXC_MAX 12
+#define MIPS_EXC_INT_IO_1 8
+#define MIPS_EXC_INT_IO_2 9
+#define MIPS_EXC_EXECUTE 10
+#define MIPS_EXC_INT_IPI_1 11
+#define MIPS_EXC_INT_IPI_2 12
+#define MIPS_EXC_MAX 13
/* XXXSL More to follow */
#define C_TI (_ULCAST_(1) << 30)
@@ -38,6 +39,9 @@
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
+extern u32 *kvm_priority_to_irq;
+u32 kvm_irq_to_priority(u32 irq);
+
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
new file mode 100644
index 000000000000..3681fc8fba38
--- /dev/null
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-3 Virtual IPI interrupt support.
+ *
+ * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
+ *
+ * Authors: Chen Zhu <zhuchen@loongson.cn>
+ * Authors: Huacai Chen <chenhc@lemote.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#define IPI_BASE 0x3ff01000ULL
+
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+
+#define CORE1_STATUS_OFF 0x100
+#define CORE1_EN_OFF 0x104
+#define CORE1_SET_OFF 0x108
+#define CORE1_CLEAR_OFF 0x10c
+#define CORE1_BUF_20 0x120
+#define CORE1_BUF_28 0x128
+#define CORE1_BUF_30 0x130
+#define CORE1_BUF_38 0x138
+
+#define CORE2_STATUS_OFF 0x200
+#define CORE2_EN_OFF 0x204
+#define CORE2_SET_OFF 0x208
+#define CORE2_CLEAR_OFF 0x20c
+#define CORE2_BUF_20 0x220
+#define CORE2_BUF_28 0x228
+#define CORE2_BUF_30 0x230
+#define CORE2_BUF_38 0x238
+
+#define CORE3_STATUS_OFF 0x300
+#define CORE3_EN_OFF 0x304
+#define CORE3_SET_OFF 0x308
+#define CORE3_CLEAR_OFF 0x30c
+#define CORE3_BUF_20 0x320
+#define CORE3_BUF_28 0x328
+#define CORE3_BUF_30 0x330
+#define CORE3_BUF_38 0x338
+
+static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t offset = addr & 0xff;
+ void *pbuf;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ *(uint64_t *)val = s->status;
+ break;
+
+ case CORE0_EN_OFF:
+ *(uint64_t *)val = s->en;
+ break;
+
+ case CORE0_SET_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_CLEAR_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)val = *(uint64_t *)pbuf;
+ else /* Assume len == 4 */
+ *(uint32_t *)val = *(uint32_t *)pbuf;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, const void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t data, offset = addr & 0xff;
+ void *pbuf;
+ struct kvm *kvm = ipi->kvm;
+ struct kvm_mips_interrupt irq;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ data = *(uint64_t *)val;
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ break;
+
+ case CORE0_EN_OFF:
+ s->en = data;
+ break;
+
+ case CORE0_SET_OFF:
+ s->status |= data;
+ irq.cpu = id;
+ irq.irq = 6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ break;
+
+ case CORE0_CLEAR_OFF:
+ s->status &= ~data;
+ if (!s->status) {
+ irq.cpu = id;
+ irq.irq = -6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ }
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)pbuf = (uint64_t)data;
+ else /* Assume len == 4 */
+ *(uint32_t *)pbuf = (uint32_t)data;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_read(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_write(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+void kvm_init_loongson_ipi(struct kvm *kvm)
+{
+ int i;
+ unsigned long addr;
+ struct loongson_kvm_ipi *s;
+ struct kvm_io_device *device;
+
+ s = &kvm->arch.ipi;
+ s->kvm = kvm;
+ spin_lock_init(&s->lock);
+
+ /*
+ * Initialize IPI device
+ */
+ for (i = 0; i < 4; i++) {
+ device = &s->dev_ipi[i].device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ addr = (((unsigned long)i) << 44) + IPI_BASE;
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
+ mutex_unlock(&kvm->slots_lock);
+ s->dev_ipi[i].ipi = s;
+ s->dev_ipi[i].node_id = i;
+ }
+}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 2416fa40b687..521bd5891e84 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -67,6 +67,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("vz_ghfc", vz_ghfc_exits),
VCPU_STAT("vz_gpa", vz_gpa_exits),
VCPU_STAT("vz_resvd", vz_resvd_exits),
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
VCPU_STAT("halt_successful_poll", halt_successful_poll),
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
@@ -129,6 +130,8 @@ int kvm_arch_check_processor_compat(void *opaque)
return 0;
}
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
@@ -148,6 +151,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
+
return 0;
}
@@ -490,7 +497,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
@@ -499,10 +509,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
else
dvcpu = vcpu->kvm->vcpus[irq->cpu];
- if (intr == 2 || intr == 3 || intr == 4) {
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
- } else if (intr == -2 || intr == -3 || intr == -4) {
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
@@ -1620,6 +1630,34 @@ static struct notifier_block kvm_mips_csr_die_notifier = {
.notifier_call = kvm_mips_csr_die_notify,
};
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
static int __init kvm_mips_init(void)
{
int ret;
@@ -1638,6 +1676,9 @@ static int __init kvm_mips_init(void)
if (ret)
return ret;
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
register_die_notifier(&kvm_mips_csr_die_notifier);
return 0;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index fc8aee05d10b..1c1fbce3f566 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -20,6 +20,7 @@
#include <asm/cpu.h>
#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
@@ -621,6 +622,46 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void)
+{
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ write_gc0_index(0);
+ guest_tlbinvf();
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
+
+void kvm_loongson_clear_guest_ftlb(void)
+{
+ int i;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ for (i = current_cpu_data.tlbsizevtlb;
+ i < (current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets);
+ i++) {
+ write_gc0_index(i);
+ guest_tlbinvf();
+ }
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
+#endif
+
#endif
/**
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 60763efa723e..34ad0b46e610 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -529,6 +529,9 @@ static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MIPS_TE:
r = 1;
break;
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 51f51009a53f..d9c462c14163 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -29,6 +29,7 @@
#include <linux/kvm_host.h>
#include "interrupt.h"
+#include "loongson_regs.h"
#include "trace.h"
@@ -126,6 +127,11 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return MIPS_CONF6_LOONGSON_INTIMER | MIPS_CONF6_LOONGSON_EXTIMER;
+}
+
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
@@ -180,6 +186,12 @@ static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config6_guest_wrmask(vcpu) |
+ MIPS_CONF6_LOONGSON_SFBEN | MIPS_CONF6_LOONGSON_FTLBDIS;
+}
+
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
@@ -225,23 +237,7 @@ static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case 2:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -253,44 +249,22 @@ static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case -2:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
-static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
- [MIPS_EXC_INT_TIMER] = C_IRQ5,
- [MIPS_EXC_INT_IO] = C_IRQ0,
- [MIPS_EXC_INT_IPI_1] = C_IRQ1,
- [MIPS_EXC_INT_IPI_2] = C_IRQ2,
-};
-
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
@@ -311,7 +285,7 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
@@ -329,7 +303,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
@@ -966,7 +941,8 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
- (sel == 7)) || /* Config7 */
+ (sel == 6 || /* Config6 */
+ sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
@@ -974,6 +950,11 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ val = cop0->reg[rd][sel];
+#endif
} else {
val = 0;
er = EMULATE_FAIL;
@@ -1036,9 +1017,40 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_CONFIG &&
+ (sel == 6)) {
+ cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (val & LOONGSON_DIAG_BTB) {
+ /* Flush BTB */
+ set_c0_diag(LOONGSON_DIAG_BTB);
+ }
+ if (val & LOONGSON_DIAG_ITLB) {
+ /* Flush ITLB */
+ set_c0_diag(LOONGSON_DIAG_ITLB);
+ }
+ if (val & LOONGSON_DIAG_DTLB) {
+ /* Flush DTLB */
+ set_c0_diag(LOONGSON_DIAG_DTLB);
+ }
+ if (val & LOONGSON_DIAG_VTLB) {
+ /* Flush VTLB */
+ kvm_loongson_clear_guest_vtlb();
+ }
+ if (val & LOONGSON_DIAG_FTLB) {
+ /* Flush FTLB */
+ kvm_loongson_clear_guest_ftlb();
+ }
+ local_irq_restore(flags);
+#endif
} else {
er = EMULATE_FAIL;
}
@@ -1129,6 +1141,77 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
return EMULATE_FAIL;
}
+#ifdef CONFIG_CPU_LOONGSON64
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned int rs, rd;
+ unsigned int hostcfg;
+ unsigned long curr_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rs = inst.loongson3_lscsr_format.rs;
+ rd = inst.loongson3_lscsr_format.rd;
+ switch (inst.loongson3_lscsr_format.fr) {
+ case 0x8: /* Read CPUCFG */
+ ++vcpu->stat.vz_cpucfg_exits;
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
+
+ switch (vcpu->arch.gprs[rs]) {
+ case LOONGSON_CFG0:
+ vcpu->arch.gprs[rd] = 0x14c000;
+ break;
+ case LOONGSON_CFG1:
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
+ LOONGSON_CFG1_SFBP);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG2:
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG3:
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ default:
+ /* Don't export any other advanced features to guest */
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ break;
+
+ default:
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+#endif
+
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
@@ -1158,6 +1241,11 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
+#ifdef CONFIG_CPU_LOONGSON64
+ case lwc2_op:
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
+ break;
+#endif
case spec3_op:
switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
@@ -1652,6 +1740,7 @@ static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
@@ -1706,7 +1795,7 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
- if (cpu_guest_has_htw)
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
@@ -1755,7 +1844,7 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
@@ -1878,17 +1967,17 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
@@ -1896,7 +1985,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
@@ -1979,6 +2068,9 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
return -EINVAL;
*v = read_gc0_config5();
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ *v = kvm_read_sw_gc0_config6(cop0);
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2101,17 +2193,17 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwsize(v);
break;
@@ -2119,7 +2211,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwctl(v);
break;
@@ -2248,6 +2340,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_config5(v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ cur = kvm_read_sw_gc0_config6(cop0);
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_sw_gc0_config6(cop0, (int)v);
+ }
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2580,7 +2680,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
/* restore HTW registers */
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
@@ -2597,7 +2697,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
- if (cpu_guest_has_rw_llb)
+ if (vcpu->kvm->created_vcpus > 1)
write_gc0_lladdr(0);
return 0;
@@ -2685,8 +2785,8 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
}
/* save HTW registers if enabled in guest */
- if (cpu_guest_has_htw &&
- kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
@@ -2853,8 +2953,12 @@ static int kvm_vz_hardware_enable(void)
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
- if (cpu_has_guestctl0ext)
- set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ if (cpu_has_guestctl0ext) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ else
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ }
if (cpu_has_guestid) {
write_c0_guestctl1(0);
@@ -2871,6 +2975,12 @@ static int kvm_vz_hardware_enable(void)
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
+#ifdef CONFIG_CPU_LOONGSON64
+ /* Control guest CCA attribute */
+ if (cpu_has_csr())
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
+#endif
+
return 0;
}
@@ -2927,6 +3037,9 @@ static int kvm_vz_check_extension(struct kvm *kvm, long ext)
r = 2;
break;
#endif
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index a42dd09c6578..d8a087cf2b42 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -252,6 +252,7 @@ static int do_signal(struct pt_regs *regs)
switch (retval) {
case ERESTART_RESTARTBLOCK:
restart = -2;
+ fallthrough;
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 8dd24c7692a0..d32ec9ae73bd 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned long addr,
- unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long addr, unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 337047ba4a56..7e2d061d0445 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task;
- struct kvm_run *kvm_run;
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28 +58,28 @@ enum xlate_readwrite {
XLATE_WRITE /* check for write permissions */
};
-extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
-extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
-extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes,
int is_default_endian);
@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
-extern int kvmppc_emulate_instruction(struct kvm_run *run,
- struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
-extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
@@ -267,7 +266,7 @@ struct kvmppc_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
@@ -291,7 +290,7 @@ struct kvmppc_ops {
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
- int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int (*emulate_op)(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 37508a356f28..41fedec69ac3 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
+ return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index eae259ee49af..9b6323ec8e60 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong spr_val);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ddfc4c90ebb6..7c5a1812a1c3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -412,7 +412,7 @@ static int instruction_is_store(unsigned int instr)
return (instr & mask) != 0;
}
-int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store)
{
u32 last_inst;
@@ -472,10 +472,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea;
- return kvmppc_emulate_mmio(run, vcpu);
+ return kvmppc_emulate_mmio(vcpu);
}
-int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -498,7 +498,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte_t pte, *ptep;
if (kvm_is_radix(kvm))
- return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
+ return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
/*
* Real-mode code has already searched the HPT and found the
@@ -518,7 +518,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_base = r & HPTE_R_RPN & ~(psize - 1);
gfn_base = gpa_base >> PAGE_SHIFT;
gpa = gpa_base | (ea & (psize - 1));
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
}
}
@@ -554,7 +554,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 2a2fad9a1c13..3cb0c9843d01 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void)
{
- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ pte_t *pte;
+
+ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ /* pmd_populate() will only reference _pa(pte). */
+ kmemleak_ignore(pte);
+
+ return pte;
}
static void kvmppc_pte_free(pte_t *ptep)
@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep)
static pmd_t *kvmppc_pmd_alloc(void)
{
- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ pmd_t *pmd;
+
+ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ /* pud_populate() will only reference _pa(pmd). */
+ kmemleak_ignore(pmd);
+
+ return pmd;
}
static void kvmppc_pmd_free(pmd_t *pmdp)
@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* Callers are responsible for flushing the PWC.
*
* When page tables are being unmapped/freed as part of page fault path
- * (full == false), ptes are not expected. There is code to unmap them
- * and emit a warning if encountered, but there may already be data
- * corruption due to the unexpected mappings.
+ * (full == false), valid ptes are generally not expected; however, there
+ * is one situation where they arise, which is when dirty page logging is
+ * turned off for a memslot while the VM is running. The new memslot
+ * becomes visible to page faults before the memslot commit function
+ * gets to flush the memslot, which can lead to a 2MB page mapping being
+ * installed for a guest physical address where there are already 64kB
+ * (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid)
@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
if (pte_val(*p) == 0)
continue;
- WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
PAGE_SHIFT, NULL, lpid);
@@ -891,7 +906,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -937,7 +952,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
@@ -1142,6 +1157,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm->arch.lpid);
gpa += PAGE_SIZE;
}
+ /*
+ * Increase the mmu notifier sequence number to prevent any page
+ * fault that read the memslot earlier from writing a PTE.
+ */
+ kvm->mmu_notifier_seq++;
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 50555ad1db93..1a529df0ab44 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
struct iommu_table_group *table_group = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
table_group = iommu_group_get_iommudata(grp);
@@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
}
}
+ cond_resched_rcu();
}
+ rcu_read_unlock();
}
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
@@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!f.file)
return -EBADF;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt == f.file->private_data) {
found = true;
break;
}
}
+ rcu_read_unlock();
fdput(f);
@@ -143,6 +148,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbl)
return -EINVAL;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
if (tbl != stit->tbl)
continue;
@@ -150,14 +156,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!kref_get_unless_zero(&stit->kref)) {
/* stit is being destroyed */
iommu_tce_table_put(tbl);
+ rcu_read_unlock();
return -ENOTTY;
}
/*
* The table is already known to this KVM, we just increased
* its KVM reference counter and can return.
*/
+ rcu_read_unlock();
return 0;
}
+ rcu_read_unlock();
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
@@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
unsigned long hpa = 0;
struct mm_iommu_table_group_mem_t *mem;
long shift = stit->tbl->it_page_shift;
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
+ rcu_read_unlock();
return H_TOO_HARD;
+ }
}
+ rcu_read_unlock();
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index dad71d276b91..0effd48c8f4d 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
#endif
-int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
break;
- run->papr_hcall.nr = cmd;
+ vcpu->run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
- run->papr_hcall.args[i] = gpr;
+ vcpu->run->papr_hcall.args[i] = gpr;
}
- run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
emulated = EMULATE_EXIT_USER;
break;
@@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_emulate_paired_single(run, vcpu);
+ emulated = kvmppc_emulate_paired_single(vcpu);
return emulated;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7f5d58663f13..6bf66649ab92 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1094,9 +1094,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = kvmppc_h_svm_init_done(vcpu->kvm);
break;
case H_SVM_INIT_ABORT:
- ret = H_UNSUPPORTED;
- if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ /*
+ * Even if that call is made by the Ultravisor, the SSR1 value
+ * is the guest context one, with the secure bit clear as it has
+ * not yet been secured. So we can't check it here.
+ * Instead the kvm->arch.secure_guest flag is checked inside
+ * kvmppc_h_svm_init_abort().
+ */
+ ret = kvmppc_h_svm_init_abort(vcpu->kvm);
break;
default:
@@ -1151,8 +1156,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd);
}
-static int kvmppc_emulate_debug_inst(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
{
u32 last_inst;
@@ -1166,8 +1170,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run,
}
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST;
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
@@ -1268,9 +1272,10 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
+ struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
vcpu->stat.sum_exits++;
@@ -1405,7 +1410,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- r = kvmppc_emulate_debug_inst(run, vcpu);
+ r = kvmppc_emulate_debug_inst(vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1457,7 +1462,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1515,7 +1520,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
@@ -1525,7 +1530,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
@@ -2929,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
ret = RESUME_GUEST;
if (vcpu->arch.trap)
- ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+ ret = kvmppc_handle_exit_hv(vcpu,
vcpu->arch.run_task);
vcpu->arch.ret = ret;
@@ -3894,15 +3899,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
return r;
}
-static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
kvmppc_update_vpas(vcpu);
@@ -3914,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3947,8 +3952,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
r = kvmhv_setup_mmu(vcpu);
spin_lock(&vc->lock);
if (r) {
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- kvm_run->fail_entry.
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.
hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
@@ -3967,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
- v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(&v->arch.cpu_run);
}
@@ -4008,7 +4013,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu);
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
}
@@ -4019,15 +4024,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
- struct kvm_vcpu *vcpu, u64 time_limit,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
+ struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
int srcu_idx, lpid;
struct kvmppc_vcore *vc;
@@ -4036,14 +4041,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -4161,9 +4165,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
r = RESUME_GUEST;
if (trap) {
if (!nested)
- r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
+ r = kvmppc_handle_exit_hv(vcpu, current);
else
- r = kvmppc_handle_nested_exit(kvm_run, vcpu);
+ r = kvmppc_handle_nested_exit(vcpu);
}
vcpu->arch.ret = r;
@@ -4173,7 +4177,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
@@ -4189,13 +4193,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
done:
kvmppc_remove_runnable(vc, vcpu);
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret;
sigpend:
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
out:
local_irq_enable();
@@ -4203,8 +4207,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
goto done;
}
-static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int r;
int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
@@ -4288,10 +4293,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
!no_mixing_hpt_and_radix)
- r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
+ r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
- r = kvmppc_run_vcpu(run, vcpu);
+ r = kvmppc_run_vcpu(vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
@@ -4301,7 +4306,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&kvm->srcu);
- r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ r = kvmppc_book3s_hv_page_fault(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&kvm->srcu, srcu_idx);
} else if (r == RESUME_PASSTHROUGH) {
@@ -4975,7 +4980,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
}
/* We don't need to emulate any privileged instructions or dcbz */
-static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
return EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index a221cae792dc..0989751c9d5e 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
- r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+ r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
/* save L2 state for return */
@@ -1270,8 +1269,7 @@ static inline int kvmppc_radix_shift_to_level(int shift)
}
/* called with gp->tlb_lock held */
-static long int __kvmhv_nested_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp)
{
struct kvm *kvm = vcpu->kvm;
@@ -1354,7 +1352,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
}
/* passthrough of emulated MMIO case */
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
if (writing) {
@@ -1429,8 +1427,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
- if (n_rmap)
- kfree(n_rmap);
+ kfree(n_rmap);
if (ret == -EAGAIN)
ret = RESUME_GUEST; /* Let the guest try again */
@@ -1441,13 +1438,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
return RESUME_GUEST;
}
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *gp = vcpu->arch.nested;
long int ret;
mutex_lock(&gp->tlb_lock);
- ret = __kvmhv_nested_page_fault(run, vcpu, gp);
+ ret = __kvmhv_nested_page_fault(vcpu, gp);
mutex_unlock(&gp->tlb_lock);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index f91224ea034a..09d8119024db 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -749,6 +749,20 @@ static u64 kvmppc_get_secmem_size(void)
const __be32 *prop;
u64 size = 0;
+ /*
+ * First try the new ibm,secure-memory nodes which supersede the
+ * secure-memory-ranges property.
+ * If we found some, no need to read the deprecated ones.
+ */
+ for_each_compatible_node(np, NULL, "ibm,secure-memory") {
+ prop = of_get_property(np, "reg", &len);
+ if (!prop)
+ continue;
+ size += of_read_number(prop + 2, 2);
+ }
+ if (size)
+ return size;
+
np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
if (!np)
goto out;
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index bf0282775e37..a11436720a8c 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -169,7 +169,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
}
-static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -188,7 +188,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
len, 1);
goto done_load;
}
@@ -213,7 +213,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -248,7 +248,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
+ emulated = kvmppc_handle_store(vcpu, val, len, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -259,7 +259,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -279,12 +279,12 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
4, 1);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs,
8, 1);
goto done_load;
}
@@ -302,7 +302,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -318,10 +318,10 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
+ emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1);
} else if (r == EMULATE_DO_MMIO) {
u64 val = ((u64)tmp[0] << 32) | tmp[1];
- emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
+ emulated = kvmppc_handle_store(vcpu, val, 8, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -618,7 +618,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
return EMULATE_DONE;
}
-int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu)
{
u32 inst;
enum emulation_result emulated = EMULATE_DONE;
@@ -680,7 +680,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_LU:
@@ -690,7 +690,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -703,7 +703,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_STU:
@@ -713,7 +713,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -733,7 +733,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4X_PS_CMPO0:
@@ -747,7 +747,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -824,7 +824,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4XW_PSQ_STUX:
@@ -834,7 +834,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -922,7 +922,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -930,7 +930,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -941,7 +941,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -949,7 +949,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -960,7 +960,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -968,7 +968,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -979,7 +979,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -987,7 +987,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1001,7 +1001,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1010,7 +1010,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1022,7 +1022,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1031,7 +1031,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1043,7 +1043,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1052,7 +1052,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1064,7 +1064,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1073,7 +1073,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1085,7 +1085,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr,
FPU_LS_SINGLE_LOW);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index a0f6813f4560..ef54f917bdaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -700,7 +700,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
}
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
ulong eaddr, int vec)
{
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -795,7 +795,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* The guest's PTE is not mapped yet. Map on the host */
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
/* Exit KVM if mapping failed */
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
if (data)
@@ -808,7 +808,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
}
@@ -992,7 +992,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
enum emulation_result er = EMULATE_FAIL;
if (!(kvmppc_get_msr(vcpu) & MSR_PR))
- er = kvmppc_emulate_instruction(vcpu->run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
/* Couldn't emulate, trigger interrupt in guest */
@@ -1089,8 +1089,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
enum emulation_result er;
ulong flags;
@@ -1124,7 +1123,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
r = RESUME_GUEST_NV;
@@ -1139,7 +1138,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV;
break;
case EMULATE_EXIT_USER:
@@ -1198,7 +1197,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -1248,7 +1247,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
@@ -1292,7 +1291,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_PROGRAM:
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
break;
case BOOK3S_INTERRUPT_SYSCALL:
{
@@ -1370,7 +1369,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&last_inst);
if (emul == EMULATE_DONE)
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
else
r = RESUME_GUEST;
@@ -1825,8 +1824,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
vfree(vcpu_book3s);
}
-static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int ret;
#ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1834,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1861,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(run, vcpu);
kvmppc_clear_debug(vcpu);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 888afe8d35cc..c0d62a917e20 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -729,13 +729,14 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return r;
}
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
@@ -777,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(run, vcpu);
/* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
@@ -799,11 +800,11 @@ out:
return ret;
}
-static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int emulation_exit(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
/* don't overwrite subtypes, just account kvm_stats */
@@ -820,8 +821,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -833,8 +834,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr;
@@ -953,7 +955,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
}
-static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
enum emulation_result emulated, u32 last_inst)
{
switch (emulated) {
@@ -965,8 +967,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
__func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -1023,7 +1025,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
if (emulated != EMULATE_DONE) {
- r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
+ r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
goto out;
}
@@ -1083,7 +1085,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_HV_PRIV:
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_PROGRAM:
@@ -1093,7 +1095,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We are here because of an SW breakpoint instr,
* so lets return to host to handle.
*/
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
break;
@@ -1114,7 +1116,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
@@ -1332,7 +1334,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_DEBUG: {
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
if (r == RESUME_HOST)
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 65b4d337d337..be9da96d9f06 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
@@ -94,16 +94,12 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 689ff5f90e9e..d8d38aca71bd 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
}
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3d0d3ec5be96..64eb833e9f02 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
}
#endif
-static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG:
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = vcpu->arch.regs.nip;
- run->debug.arch.status = 0;
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
+ vcpu->run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER;
*advance = 0;
@@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_EHPRIV:
- emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
- advance);
+ emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
break;
default:
@@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
+ emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
return emulated;
}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 6fca38ca791f..ee1147c98cd8 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */
-int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
{
u32 inst;
int rs, rt, sprn;
@@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* these are illegal instructions.
*/
if (inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.status = 0;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.status = 0;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
emulated = EMULATE_EXIT_USER;
advance = 0;
} else
@@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (emulated == EMULATE_FAIL) {
- emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
&advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 135d0e686622..48272a9b9c30 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -71,7 +71,6 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 inst;
enum emulation_result emulated = EMULATE_FAIL;
int advance = 1;
@@ -104,10 +103,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
int instr_byte_swap = op.type & BYTEREV;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
op.reg, size, !instr_byte_swap);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -124,10 +123,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -164,12 +163,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_load(run,
- vcpu, KVM_MMIO_REG_VMX|op.reg,
+ emulated = kvmppc_handle_vmx_load(vcpu,
+ KVM_MMIO_REG_VMX|op.reg,
8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
KVM_MMIO_REG_VMX|op.reg,
size, 1);
}
@@ -217,7 +216,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_load(run, vcpu,
+ emulated = kvmppc_handle_vsx_load(vcpu,
KVM_MMIO_REG_VSX|op.reg, io_size_each,
1, op.type & SIGNEXT);
break;
@@ -227,8 +226,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* if need byte reverse, op.val has been reversed by
* analyse_instr().
*/
- emulated = kvmppc_handle_store(run, vcpu, op.val,
- size, 1);
+ emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -250,7 +248,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (op.type & FPCONV)
vcpu->arch.mmio_sp64_extend = 1;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
VCPU_FPR(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -290,12 +288,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, 8, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, 8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, size, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, size, 1);
}
break;
@@ -338,7 +336,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
op.reg, io_size_each, 1);
break;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27ccff612903..dd7d141e33e8 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -279,7 +279,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
-int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
int r;
@@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were
@@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd)
#define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */
-static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
u64 uninitialized_var(gpr);
if (run->mmio.len > sizeof(gpr)) {
@@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
-static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int sign_extend)
{
+ struct kvm_run *run = vcpu->run;
int idx, ret;
bool host_swabbed;
@@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) {
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
@@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
}
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
-int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
}
#ifdef CONFIG_VSX
-int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend)
{
@@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
while (vcpu->arch.mmio_vsx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
#endif /* CONFIG_VSX */
-int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
int idx, ret;
bool host_swabbed;
@@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
return result;
}
-int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian)
{
u64 val;
@@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
val, bytes, is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
+ emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else {
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
-int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian)
{
enum emulation_result emulated = EMULATE_DONE;
@@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
while (vcpu->arch.mmio_vmx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, 0);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian)
{
u64 val = 0;
@@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
- emulated = kvmppc_handle_store(run, vcpu, val, bytes,
+ emulated = kvmppc_handle_store(vcpu, val, bytes,
is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
} else {
- emulated = kvmppc_handle_vmx_store(run, vcpu,
+ emulated = kvmppc_handle_vmx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
#ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--;
@@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->immediate_exit)
r = -EINTR;
else
- r = kvmppc_vcpu_run(run, vcpu);
+ r = kvmppc_vcpu_run(vcpu);
kvm_sigset_deactivate(vcpu);
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 8a1e3b0047f1..4a61a971c34e 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -472,9 +472,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter,
);
TRACE_EVENT(kvmppc_run_vcpu_exit,
- TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+ TP_PROTO(struct kvm_vcpu *vcpu),
- TP_ARGS(vcpu, run),
+ TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(int, vcpu_id)
@@ -484,7 +484,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
- __entry->exit = run->exit_reason;
+ __entry->exit = vcpu->run->exit_reason;
__entry->ret = vcpu->arch.ret;
),
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 25db70be4c9c..266a6ca5e15e 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -127,7 +127,7 @@ static void update_csb(struct vas_window *window,
return;
}
- use_mm(window->mm);
+ kthread_use_mm(window->mm);
rc = copy_to_user(csb_addr, &csb, sizeof(csb));
/*
* User space polls on csb.flags (first byte). So add barrier
@@ -139,7 +139,7 @@ static void update_csb(struct vas_window *window,
smp_mb();
rc = copy_to_user(csb_addr, &csb, sizeof(u8));
}
- unuse_mm(window->mm);
+ kthread_unuse_mm(window->mm);
put_task_struct(tsk);
/* Success */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c733007b90ab..128192e14ff2 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -12,64 +12,70 @@ config 32BIT
config RISCV
def_bool y
- select OF
- select OF_EARLY_FLATTREE
- select OF_IRQ
+ select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_DIRECT_MAP
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
select COMMON_CLK
+ select EDAC_SUPPORT
+ select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
+ select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IOREMAP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
+ select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_IOREMAP
- select GENERIC_PTDUMP if MMU
+ select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB_QXFER_PKT
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
+ select HAVE_COPY_THREAD_TLS
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EBPF_JIT if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GENERIC_VDSO if MMU && 64BIT
+ select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select SPARSE_IRQ
- select SYSCTL_EXCEPTION_TRACE
- select HAVE_ARCH_TRACEHOOK
- select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
- select THREAD_INFO_IN_TASK
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
+ select RISCV_INTC
select RISCV_TIMER
- select GENERIC_IRQ_MULTI_HANDLER
- select GENERIC_ARCH_TOPOLOGY if SMP
- select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MMIOWB
- select ARCH_HAS_DEBUG_VIRTUAL if MMU
- select HAVE_EBPF_JIT if MMU
- select EDAC_SUPPORT
- select ARCH_HAS_GIGANTIC_PAGE
- select ARCH_HAS_SET_DIRECT_MAP
- select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if MMU
- select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
- select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select ARCH_HAS_GCOV_PROFILE_ALL
- select HAVE_COPY_THREAD_TLS
- select HAVE_ARCH_KASAN if MMU && 64BIT
- select HAVE_ARCH_KGDB
- select HAVE_ARCH_KGDB_QXFER_PKT
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -196,11 +202,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_FUNCTION_TRACER
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select SWIOTLB if MMU
endchoice
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 000000000000..482185566b0c
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 6e1b0e0325eb..9807ad164015 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -10,11 +10,6 @@
#include <linux/interrupt.h>
#include <linux/linkage.h>
-#define NR_IRQS 0
-
-void riscv_timer_interrupt(void);
-void riscv_software_interrupt(void);
-
#include <asm-generic/irq.h>
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3ddb798264f1..bdddcd5c1b71 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -8,6 +8,8 @@
#include <linux/const.h>
+#include <vdso/processor.h>
+
#include <asm/ptrace.h>
/*
@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
-#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
-#endif
- barrier();
-}
-
static inline void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
struct device_node;
int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index f4c7cfda6b7f..40bb1c15a731 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
/* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask);
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 7a7fce63c474..8454f746bbfd 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,8 +10,10 @@
#include <linux/types.h>
+#ifndef GENERIC_TIME_VSYSCALL
struct vdso_data {
};
+#endif
/*
* The VDSO symbols are mapped into Linux so we can just use regular symbol
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..df6ea65c1dec
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..c8e818688ec1
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..82a5693b1861
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..82fd5d83bd60
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 40a3c442ac5f..6d59e6906fdd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
return hart;
}
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hartid(node);
+ }
+
+ return -1;
+}
+
#ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *isa)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 56d071b2c0a1..cae7e6d4c7ef 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -106,7 +106,9 @@ _save_context:
/* Handle interrupts */
move a0, sp /* pt_regs */
- tail do_IRQ
+ la a1, handle_arch_irq
+ REG_L a1, (a1)
+ jr a1
1:
/*
* Exceptions run with interrupts enabled or disabled depending on the
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 345c4f2eba13..7207fa08d78f 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <asm/smp.h>
@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
- switch (regs->cause & ~CAUSE_IRQ_FLAG) {
- case RV_IRQ_TIMER:
- riscv_timer_interrupt();
- break;
-#ifdef CONFIG_SMP
- case RV_IRQ_SOFT:
- /*
- * We only use software interrupts to pass IPIs, so if a non-SMP
- * system gets one, then we don't know what to do.
- */
- riscv_software_interrupt();
- break;
-#endif
- case RV_IRQ_EXT:
- handle_arch_irq(regs);
- break;
- default:
- pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
- BUG();
- }
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
void __init init_IRQ(void)
{
irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
}
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 5805791cd5b5..d4a64dfed342 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -11,6 +11,7 @@
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/patch.h>
struct patch_insn {
void *addr;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index a65a8fa0c22d..b1d4f452f843 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -123,11 +123,14 @@ static inline void clear_ipi(void)
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
}
-void riscv_software_interrupt(void)
+void handle_IPI(struct pt_regs *regs)
{
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
+ irq_enter();
+
clear_ipi();
while (true) {
@@ -138,7 +141,7 @@ void riscv_software_interrupt(void)
ops = xchg(pending_ipis, 0);
if (ops == 0)
- return;
+ goto done;
if (ops & (1 << IPI_RESCHEDULE)) {
stats[IPI_RESCHEDULE]++;
@@ -160,6 +163,10 @@ void riscv_software_interrupt(void)
/* Order data access and bit testing. */
mb();
}
+
+done:
+ irq_exit();
+ set_irq_regs(old_regs);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 6a53c02e9c73..4d3a1048ad8b 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -26,3 +26,12 @@ void __init time_init(void)
lpj_fine = riscv_timebase / HZ;
timer_probe();
}
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 5080fdf8c296..ecec1778e3a4 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -183,6 +183,4 @@ void trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
- /* Enable interrupts */
- csr_write(CSR_IE, IE_SIE);
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index e827fae3bf90..678204231700 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -11,8 +11,12 @@
#include <linux/slab.h>
#include <linux/binfmts.h>
#include <linux/err.h>
-
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
#include <asm/vdso.h>
+#endif
extern char vdso_start[], vdso_end[];
@@ -26,7 +30,7 @@ static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void)
{
@@ -75,13 +79,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_len,
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret))
+ if (unlikely(ret)) {
mm->context.vdso = NULL;
+ goto end;
+ }
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
end:
mmap_write_unlock(mm);
return ret;
@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 4c8b2a4a6a70..38ba55b0eb9d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copied from arch/tile/kernel/vdso/Makefile
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
# Symbols present in the vdso
vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT
-vdso-syms += gettimeofday
-vdso-syms += clock_gettime
-vdso-syms += clock_getres
+vdso-syms += vgettimeofday
endif
vdso-syms += getcpu
vdso-syms += flush_icache
@@ -14,6 +16,10 @@ vdso-syms += flush_icache
# Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
deleted file mode 100644
index 91378a52eb22..000000000000
--- a/arch/riscv/kernel/vdso/clock_getres.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__vdso_clock_getres)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_getres
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
deleted file mode 100644
index 5371fd9bc01f..000000000000
--- a/arch/riscv/kernel/vdso/clock_gettime.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__vdso_clock_gettime)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_gettime
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
deleted file mode 100644
index e6fb8af88632..000000000000
--- a/arch/riscv/kernel/vdso/gettimeofday.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__vdso_gettimeofday)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_gettimeofday
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_gettimeofday)
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index f66a091cb890..e6f558bca71b 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2012 Regents of the University of California
*/
+#include <asm/page.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..d264943e2e47
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 9996f49959bd..f4adb3684f3d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -480,17 +480,6 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
-
-void free_initmem(void)
-{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
- /* Make the region as non-execuatble. */
- set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3d554887794e..cee3cb6455a2 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -978,7 +978,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d0ff26d157bc..d47c19718615 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3923,11 +3923,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
}
}
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
+
+ return true;
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 10dae8b96ed5..dde744682e63 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -181,7 +181,6 @@ config X86
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -233,6 +232,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
+ select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
@@ -822,14 +822,6 @@ config PVH
This option enables the PVH entry point for guest virtual machines
as specified in the x86/HVM direct boot ABI.
-config KVM_DEBUG_FS
- bool "Enable debug information for KVM Guests in debugfs"
- depends on KVM_GUEST && DEBUG_FS
- ---help---
- This option enables collection of various statistics for KVM guest.
- Statistics are displayed in debugfs filesystem. Enabling this option
- may incur significant overhead.
-
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 4c5355684321..fe605205b4ce 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,7 +9,9 @@
# Changed by many, many contributors over the years.
#
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5f7c262bcc99..7619742f91c9 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -17,7 +17,9 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 85eb381259c2..b7a5790d8d63 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -3,7 +3,13 @@
# Makefile for the x86 low level entry code
#
-OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 1c7f13bb6728..4208c1e3f601 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -341,30 +341,13 @@ For 32-bit we have the following conventions - kernel is built with
#endif
.endm
-#endif /* CONFIG_X86_64 */
+#else /* CONFIG_X86_64 */
+# undef UNWIND_HINT_IRET_REGS
+# define UNWIND_HINT_IRET_REGS
+#endif /* !CONFIG_X86_64 */
.macro STACKLEAK_ERASE
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
call stackleak_erase
#endif
.endm
-
-/*
- * This does 'call enter_from_user_mode' unless we can avoid it based on
- * kernel config or using the static jump infrastructure.
- */
-.macro CALL_enter_from_user_mode
-#ifdef CONFIG_CONTEXT_TRACKING
-#ifdef CONFIG_JUMP_LABEL
- STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_key, def=0
-#endif
- call enter_from_user_mode
-.Lafter_call_\@:
-#endif
-.endm
-
-#ifdef CONFIG_PARAVIRT_XXL
-#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
-#else
-#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
-#endif
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 76735ec813e6..bd3f14175193 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -27,6 +27,11 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_XEN_PV
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#endif
+
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/vdso.h>
@@ -35,21 +40,67 @@
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
#include <asm/syscall.h>
+#include <asm/irq_stack.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
#ifdef CONFIG_CONTEXT_TRACKING
-/* Called on entry from user mode with IRQs off. */
-__visible inline void enter_from_user_mode(void)
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ *
+ * Syscall entry disables interrupts, but user mode is traced as interrupts
+ * enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ */
+static noinstr void enter_from_user_mode(void)
{
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ enum ctx_state state = ct_state();
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
user_exit_irqoff();
+
+ instrumentation_begin();
+ CT_WARN_ON(state != CONTEXT_USER);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
}
#else
-static inline void enter_from_user_mode(void) {}
+static __always_inline void enter_from_user_mode(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
#endif
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall exit enables interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
+ * 4) Tell lockdep that interrupts are enabled
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ user_enter_irqoff();
+ mds_user_clear_cpu_buffers();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
{
#ifdef CONFIG_X86_64
@@ -179,8 +230,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
}
}
-/* Called with IRQs disabled. */
-__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+static void __prepare_exit_to_usermode(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
u32 cached_flags;
@@ -219,10 +269,14 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
*/
ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif
+}
- user_enter_irqoff();
-
- mds_user_clear_cpu_buffers();
+__visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ __prepare_exit_to_usermode(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#define SYSCALL_EXIT_WORK_FLAGS \
@@ -251,11 +305,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
tracehook_report_syscall_exit(regs, step);
}
-/*
- * Called with IRQs on and fully valid regs. Returns with IRQs off in a
- * state such that we can immediately switch to user mode.
- */
-__visible inline void syscall_return_slowpath(struct pt_regs *regs)
+static void __syscall_return_slowpath(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
u32 cached_flags = READ_ONCE(ti->flags);
@@ -276,15 +326,29 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
syscall_slow_exit_work(regs, cached_flags);
local_irq_disable();
- prepare_exit_to_usermode(regs);
+ __prepare_exit_to_usermode(regs);
+}
+
+/*
+ * Called with IRQs on and fully valid regs. Returns with IRQs off in a
+ * state such that we can immediately switch to user mode.
+ */
+__visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ __syscall_return_slowpath(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#ifdef CONFIG_X86_64
-__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
struct thread_info *ti;
enter_from_user_mode();
+ instrumentation_begin();
+
local_irq_enable();
ti = current_thread_info();
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
@@ -301,8 +365,10 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
regs->ax = x32_sys_call_table[nr](regs);
#endif
}
+ __syscall_return_slowpath(regs);
- syscall_return_slowpath(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#endif
@@ -313,7 +379,7 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
* extremely hot in workloads that use it, and it's usually called from
* do_fast_syscall_32, so forcibly inline it to improve performance.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
+static void do_syscall_32_irqs_on(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
unsigned int nr = (unsigned int)regs->orig_ax;
@@ -337,27 +403,62 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
regs->ax = ia32_sys_call_table[nr](regs);
}
- syscall_return_slowpath(regs);
+ __syscall_return_slowpath(regs);
}
/* Handles int $0x80 */
-__visible void do_int80_syscall_32(struct pt_regs *regs)
+__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
enter_from_user_mode();
+ instrumentation_begin();
+
local_irq_enable();
do_syscall_32_irqs_on(regs);
+
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+static bool __do_fast_syscall_32(struct pt_regs *regs)
+{
+ int res;
+
+ /* Fetch EBP from where the vDSO stashed it. */
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /*
+ * Micro-optimization: the pointer we're following is
+ * explicitly 32 bits, so it can't be out of range.
+ */
+ res = __get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ } else {
+ res = get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ }
+
+ if (res) {
+ /* User code screwed up. */
+ regs->ax = -EFAULT;
+ local_irq_disable();
+ __prepare_exit_to_usermode(regs);
+ return false;
+ }
+
+ /* Now this is just like a normal syscall. */
+ do_syscall_32_irqs_on(regs);
+ return true;
}
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
-__visible long do_fast_syscall_32(struct pt_regs *regs)
+__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
{
/*
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
* convention. Adjust regs so it looks like we entered using int80.
*/
-
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
- vdso_image_32.sym_int80_landing_pad;
+ vdso_image_32.sym_int80_landing_pad;
+ bool success;
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
@@ -367,33 +468,17 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
regs->ip = landing_pad;
enter_from_user_mode();
+ instrumentation_begin();
local_irq_enable();
+ success = __do_fast_syscall_32(regs);
- /* Fetch EBP from where the vDSO stashed it. */
- if (
-#ifdef CONFIG_X86_64
- /*
- * Micro-optimization: the pointer we're following is explicitly
- * 32 bits, so it can't be out of range.
- */
- __get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp)
-#else
- get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp)
-#endif
- ) {
-
- /* User code screwed up. */
- local_irq_disable();
- regs->ax = -EFAULT;
- prepare_exit_to_usermode(regs);
- return 0; /* Keep it simple: use IRET. */
- }
+ instrumentation_end();
+ exit_to_user_mode();
- /* Now this is just like a normal syscall. */
- do_syscall_32_irqs_on(regs);
+ /* If it failed, keep it simple: use IRET. */
+ if (!success)
+ return 0;
#ifdef CONFIG_X86_64
/*
@@ -431,3 +516,266 @@ SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
+
+/**
+ * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
+ * RCU handling
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
+ * invoked on entry and rcu_irq_exit() on exit.
+ *
+ * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking rcu_irq_enter() without undoing it.
+ *
+ * For user mode entries enter_from_user_mode() must be invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: True if RCU has been adjusted on a kernel entry
+ * False otherwise
+ *
+ * The return value must be fed into the rcu_exit argument of
+ * idtentry_exit_cond_rcu().
+ */
+bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
+{
+ if (user_mode(regs)) {
+ enter_from_user_mode();
+ return false;
+ }
+
+ /*
+ * If this entry hit the idle task invoke rcu_irq_enter() whether
+ * RCU is watching or not.
+ *
+ * Interupts can nest when the first interrupt invokes softirq
+ * processing on return which enables interrupts.
+ *
+ * Scheduler ticks in the idle task can mark quiescent state and
+ * terminate a grace period, if and only if the timer interrupt is
+ * not nested into another interrupt.
+ *
+ * Checking for __rcu_is_watching() here would prevent the nesting
+ * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
+ * the tick then rcu_flavor_sched_clock_irq() would wrongfully
+ * assume that it is the first interupt and eventually claim
+ * quiescient state and end grace periods prematurely.
+ *
+ * Unconditionally invoke rcu_irq_enter() so RCU state stays
+ * consistent.
+ *
+ * TINY_RCU does not support EQS, so let the compiler eliminate
+ * this part when enabled.
+ */
+ if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+ /*
+ * If RCU is not watching then the same careful
+ * sequence vs. lockdep and tracing is required
+ * as in enter_from_user_mode().
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+
+ return true;
+ }
+
+ /*
+ * If RCU is watching then RCU only wants to check whether it needs
+ * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
+ * already contains a warning when RCU is not watching, so no point
+ * in having another one here.
+ */
+ instrumentation_begin();
+ rcu_irq_enter_check_tick();
+ /* Use the combo lockdep/tracing function */
+ trace_hardirqs_off();
+ instrumentation_end();
+
+ return false;
+}
+
+static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
+{
+ if (may_sched && !preempt_count()) {
+ /* Sanity check RCU and thread stack */
+ rcu_irq_exit_check_preempt();
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ WARN_ON_ONCE(!on_thread_stack());
+ if (need_resched())
+ preempt_schedule_irq();
+ }
+ /* Covers both tracing and lockdep */
+ trace_hardirqs_on();
+}
+
+/**
+ * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
+ * handling
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @rcu_exit: Invoke rcu_irq_exit() if true
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and reguired and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
+ * function must be fed into the @rcu_exit argument.
+ */
+void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
+{
+ lockdep_assert_irqs_disabled();
+
+ /* Check whether this returns to user mode */
+ if (user_mode(regs)) {
+ prepare_exit_to_usermode(regs);
+ } else if (regs->flags & X86_EFLAGS_IF) {
+ /*
+ * If RCU was not watching on entry this needs to be done
+ * carefully and needs the same ordering of lockdep/tracing
+ * and RCU as the return to user mode path.
+ */
+ if (rcu_exit) {
+ instrumentation_begin();
+ /* Tell the tracer that IRET will enable interrupts */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+ rcu_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ instrumentation_begin();
+ idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
+ instrumentation_end();
+ } else {
+ /*
+ * IRQ flags state is correct already. Just tell RCU if it
+ * was not watching on entry.
+ */
+ if (rcu_exit)
+ rcu_irq_exit();
+ }
+}
+
+/**
+ * idtentry_enter_user - Handle state tracking on idtentry from user mode
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes enter_from_user_mode() to establish the proper context for
+ * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
+ */
+void noinstr idtentry_enter_user(struct pt_regs *regs)
+{
+ enter_from_user_mode();
+}
+
+/**
+ * idtentry_exit_user - Handle return from exception to user mode
+ * @regs: Pointer to pt_regs (exception entry regs)
+ *
+ * Runs the necessary preemption and work checks and returns to the caller
+ * with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to idtentry_enter_user().
+ */
+void noinstr idtentry_exit_user(struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+
+ prepare_exit_to_usermode(regs);
+}
+
+#ifdef CONFIG_XEN_PV
+#ifndef CONFIG_PREEMPTION
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+/*
+ * In case of scheduling the flag must be cleared and restored after
+ * returning from schedule as the task might move to a different CPU.
+ */
+static __always_inline bool get_and_clear_inhcall(void)
+{
+ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+ return inhcall;
+}
+
+static __always_inline void restore_inhcall(bool inhcall)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+}
+#else
+static __always_inline bool get_and_clear_inhcall(void) { return false; }
+static __always_inline void restore_inhcall(bool inhcall) { }
+#endif
+
+static void __xen_pv_evtchn_do_upcall(void)
+{
+ irq_enter_rcu();
+ inc_irq_stat(irq_hv_callback_count);
+
+ xen_hvm_evtchn_do_upcall();
+
+ irq_exit_rcu();
+}
+
+__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+ bool inhcall, rcu_exit;
+
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ old_regs = set_irq_regs(regs);
+
+ instrumentation_begin();
+ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+ instrumentation_begin();
+
+ set_irq_regs(old_regs);
+
+ inhcall = get_and_clear_inhcall();
+ if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
+ instrumentation_begin();
+ idtentry_exit_cond_resched(regs, true);
+ instrumentation_end();
+ restore_inhcall(inhcall);
+ } else {
+ idtentry_exit_cond_rcu(regs, rcu_exit);
+ }
+}
+#endif /* CONFIG_XEN_PV */
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index a5eed844e948..024d7d276cd4 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,40 +44,13 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
+#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
#include "calling.h"
.section .entry.text, "ax"
-/*
- * We use macros for low-level operations which need to be overridden
- * for paravirtualization. The following will never clobber any registers:
- * INTERRUPT_RETURN (aka. "iret")
- * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
- * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
- *
- * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
- * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
- * Allowing a register to be clobbered can shrink the paravirt replacement
- * enough to patch inline, increasing performance.
- */
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-#else
-# define preempt_stop(clobbers)
-#endif
-
-.macro TRACE_IRQS_IRET
-#ifdef CONFIG_TRACE_IRQFLAGS
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
- jz 1f
- TRACE_IRQS_ON
-1:
-#endif
-.endm
-
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
/*
@@ -726,10 +699,68 @@
.Lend_\@:
.endm
+
+/**
+ * idtentry - Macro to generate entry stubs for simple IDT entries
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
+ */
+.macro idtentry vector asmsym cfunc has_error_code:req
+SYM_CODE_START(\asmsym)
+ ASM_CLAC
+ cld
+
+ .if \has_error_code == 0
+ pushl $0 /* Clear the error code */
+ .endif
+
+ /* Push the C-function address into the GS slot */
+ pushl $\cfunc
+ /* Invoke the common exception entry */
+ jmp handle_exception
+SYM_CODE_END(\asmsym)
+.endm
+
+.macro idtentry_irq vector cfunc
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+SYM_CODE_START_LOCAL(asm_\cfunc)
+ ASM_CLAC
+ SAVE_ALL switch_stacks=1
+ ENCODE_FRAME_POINTER
+ movl %esp, %eax
+ movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */
+ movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
+ call \cfunc
+ jmp handle_exception_return
+SYM_CODE_END(asm_\cfunc)
+.endm
+
+.macro idtentry_sysvec vector cfunc
+ idtentry \vector asm_\cfunc \cfunc has_error_code=0
+.endm
+
+/*
+ * Include the defines which emit the idt entries which are shared
+ * shared between 32 and 64 bit and emit the __irqentry_text_* markers
+ * so the stacktrace boundary checks work.
+ */
+ .align 16
+ .globl __irqentry_text_start
+__irqentry_text_start:
+
+#include <asm/idtentry.h>
+
+ .align 16
+ .globl __irqentry_text_end
+__irqentry_text_end:
+
/*
* %eax: prev task
* %edx: next task
*/
+.pushsection .text, "ax"
SYM_CODE_START(__switch_to_asm)
/*
* Save callee-saved registers
@@ -776,6 +807,7 @@ SYM_CODE_START(__switch_to_asm)
jmp __switch_to
SYM_CODE_END(__switch_to_asm)
+.popsection
/*
* The unwinder expects the last frame on the stack to always be at the same
@@ -784,6 +816,7 @@ SYM_CODE_END(__switch_to_asm)
* asmlinkage function so its argument has to be pushed on the stack. This
* wrapper creates a proper "end of stack" frame header before the call.
*/
+.pushsection .text, "ax"
SYM_FUNC_START(schedule_tail_wrapper)
FRAME_BEGIN
@@ -794,6 +827,8 @@ SYM_FUNC_START(schedule_tail_wrapper)
FRAME_END
ret
SYM_FUNC_END(schedule_tail_wrapper)
+.popsection
+
/*
* A newly forked process directly context switches into this address.
*
@@ -801,6 +836,7 @@ SYM_FUNC_END(schedule_tail_wrapper)
* ebx: kernel thread func (NULL for user thread)
* edi: kernel thread arg
*/
+.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
call schedule_tail_wrapper
@@ -811,8 +847,7 @@ SYM_CODE_START(ret_from_fork)
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
call syscall_return_slowpath
- STACKLEAK_ERASE
- jmp restore_all
+ jmp .Lsyscall_32_done
/* kernel thread */
1: movl %edi, %eax
@@ -825,38 +860,7 @@ SYM_CODE_START(ret_from_fork)
movl $0, PT_EAX(%esp)
jmp 2b
SYM_CODE_END(ret_from_fork)
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
- # userspace resumption stub bypassing syscall exit tracing
-SYM_CODE_START_LOCAL(ret_from_exception)
- preempt_stop(CLBR_ANY)
-ret_from_intr:
-#ifdef CONFIG_VM86
- movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
- movb PT_CS(%esp), %al
- andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
-#else
- /*
- * We can be coming here from child spawned by kernel_thread().
- */
- movl PT_CS(%esp), %eax
- andl $SEGMENT_RPL_MASK, %eax
-#endif
- cmpl $USER_RPL, %eax
- jb restore_all_kernel # not returning to v8086 or userspace
-
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
- movl %esp, %eax
- call prepare_exit_to_usermode
- jmp restore_all
-SYM_CODE_END(ret_from_exception)
+.popsection
SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
/*
@@ -960,12 +964,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
jnz .Lsysenter_fix_flags
.Lsysenter_flags_fixed:
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movl %esp, %eax
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@@ -974,8 +972,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
STACKLEAK_ERASE
-/* Opportunistic SYSEXIT */
- TRACE_IRQS_ON /* User mode traces as IRQs on. */
+ /* Opportunistic SYSEXIT */
/*
* Setup entry stack - we keep the pointer in %eax and do the
@@ -1075,20 +1072,12 @@ SYM_FUNC_START(entry_INT80_32)
SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
- /*
- * User mode is traced as though IRQs are on, and the interrupt gate
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movl %esp, %eax
call do_int80_syscall_32
.Lsyscall_32_done:
-
STACKLEAK_ERASE
-restore_all:
- TRACE_IRQS_ON
+restore_all_switch_stack:
SWITCH_TO_ENTRY_STACK
CHECK_AND_APPLY_ESPFIX
@@ -1107,26 +1096,10 @@ restore_all:
*/
INTERRUPT_RETURN
-restore_all_kernel:
-#ifdef CONFIG_PREEMPTION
- DISABLE_INTERRUPTS(CLBR_ANY)
- cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz .Lno_preempt
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz .Lno_preempt
- call preempt_schedule_irq
-.Lno_preempt:
-#endif
- TRACE_IRQS_IRET
- PARANOID_EXIT_TO_KERNEL_MODE
- BUG_IF_WRONG_CR3
- RESTORE_REGS 4
- jmp .Lirq_return
-
.section .fixup, "ax"
-SYM_CODE_START(iret_exc)
+SYM_CODE_START(asm_iret_error)
pushl $0 # no error code
- pushl $do_iret_error
+ pushl $iret_error
#ifdef CONFIG_DEBUG_ENTRY
/*
@@ -1140,10 +1113,10 @@ SYM_CODE_START(iret_exc)
popl %eax
#endif
- jmp common_exception
-SYM_CODE_END(iret_exc)
+ jmp handle_exception
+SYM_CODE_END(asm_iret_error)
.previous
- _ASM_EXTABLE(.Lirq_return, iret_exc)
+ _ASM_EXTABLE(.Lirq_return, asm_iret_error)
SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -1193,192 +1166,21 @@ SYM_FUNC_END(entry_INT80_32)
#endif
.endm
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
- .align 8
-SYM_CODE_START(irq_entries_start)
- vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
- pushl $(~vector+0x80) /* Note: always in signed byte range */
- vector=vector+1
- jmp common_interrupt
- .align 8
- .endr
-SYM_CODE_END(irq_entries_start)
-
-#ifdef CONFIG_X86_LOCAL_APIC
- .align 8
-SYM_CODE_START(spurious_entries_start)
- vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
- pushl $(~vector+0x80) /* Note: always in signed byte range */
- vector=vector+1
- jmp common_spurious
- .align 8
- .endr
-SYM_CODE_END(spurious_entries_start)
-
-SYM_CODE_START_LOCAL(common_spurious)
- ASM_CLAC
- addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
- SAVE_ALL switch_stacks=1
- ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
- movl %esp, %eax
- call smp_spurious_interrupt
- jmp ret_from_intr
-SYM_CODE_END(common_spurious)
-#endif
-
-/*
- * the CPU automatically disables interrupts when executing an IRQ vector,
- * so IRQ-flags tracing has to follow that:
- */
- .p2align CONFIG_X86_L1_CACHE_SHIFT
-SYM_CODE_START_LOCAL(common_interrupt)
- ASM_CLAC
- addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
-
- SAVE_ALL switch_stacks=1
- ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
- movl %esp, %eax
- call do_IRQ
- jmp ret_from_intr
-SYM_CODE_END(common_interrupt)
-
-#define BUILD_INTERRUPT3(name, nr, fn) \
-SYM_FUNC_START(name) \
- ASM_CLAC; \
- pushl $~(nr); \
- SAVE_ALL switch_stacks=1; \
- ENCODE_FRAME_POINTER; \
- TRACE_IRQS_OFF \
- movl %esp, %eax; \
- call fn; \
- jmp ret_from_intr; \
-SYM_FUNC_END(name)
-
-#define BUILD_INTERRUPT(name, nr) \
- BUILD_INTERRUPT3(name, nr, smp_##name); \
-
-/* The include is where all of the SMP etc. interrupts come from */
-#include <asm/entry_arch.h>
-
-SYM_CODE_START(coprocessor_error)
- ASM_CLAC
- pushl $0
- pushl $do_coprocessor_error
- jmp common_exception
-SYM_CODE_END(coprocessor_error)
-
-SYM_CODE_START(simd_coprocessor_error)
- ASM_CLAC
- pushl $0
-#ifdef CONFIG_X86_INVD_BUG
- /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
- ALTERNATIVE "pushl $do_general_protection", \
- "pushl $do_simd_coprocessor_error", \
- X86_FEATURE_XMM
-#else
- pushl $do_simd_coprocessor_error
-#endif
- jmp common_exception
-SYM_CODE_END(simd_coprocessor_error)
-
-SYM_CODE_START(device_not_available)
- ASM_CLAC
- pushl $0
- pushl $do_device_not_available
- jmp common_exception
-SYM_CODE_END(device_not_available)
-
#ifdef CONFIG_PARAVIRT
SYM_CODE_START(native_iret)
iret
- _ASM_EXTABLE(native_iret, iret_exc)
+ _ASM_EXTABLE(native_iret, asm_iret_error)
SYM_CODE_END(native_iret)
#endif
-SYM_CODE_START(overflow)
- ASM_CLAC
- pushl $0
- pushl $do_overflow
- jmp common_exception
-SYM_CODE_END(overflow)
-
-SYM_CODE_START(bounds)
- ASM_CLAC
- pushl $0
- pushl $do_bounds
- jmp common_exception
-SYM_CODE_END(bounds)
-
-SYM_CODE_START(invalid_op)
- ASM_CLAC
- pushl $0
- pushl $do_invalid_op
- jmp common_exception
-SYM_CODE_END(invalid_op)
-
-SYM_CODE_START(coprocessor_segment_overrun)
- ASM_CLAC
- pushl $0
- pushl $do_coprocessor_segment_overrun
- jmp common_exception
-SYM_CODE_END(coprocessor_segment_overrun)
-
-SYM_CODE_START(invalid_TSS)
- ASM_CLAC
- pushl $do_invalid_TSS
- jmp common_exception
-SYM_CODE_END(invalid_TSS)
-
-SYM_CODE_START(segment_not_present)
- ASM_CLAC
- pushl $do_segment_not_present
- jmp common_exception
-SYM_CODE_END(segment_not_present)
-
-SYM_CODE_START(stack_segment)
- ASM_CLAC
- pushl $do_stack_segment
- jmp common_exception
-SYM_CODE_END(stack_segment)
-
-SYM_CODE_START(alignment_check)
- ASM_CLAC
- pushl $do_alignment_check
- jmp common_exception
-SYM_CODE_END(alignment_check)
-
-SYM_CODE_START(divide_error)
- ASM_CLAC
- pushl $0 # no error code
- pushl $do_divide_error
- jmp common_exception
-SYM_CODE_END(divide_error)
-
-#ifdef CONFIG_X86_MCE
-SYM_CODE_START(machine_check)
- ASM_CLAC
- pushl $0
- pushl $do_mce
- jmp common_exception
-SYM_CODE_END(machine_check)
-#endif
-
-SYM_CODE_START(spurious_interrupt_bug)
- ASM_CLAC
- pushl $0
- pushl $do_spurious_interrupt_bug
- jmp common_exception
-SYM_CODE_END(spurious_interrupt_bug)
-
#ifdef CONFIG_XEN_PV
-SYM_FUNC_START(xen_hypervisor_callback)
+/*
+ * See comment in entry_64.S for further explanation
+ *
+ * Note: This is not an actual IDT entry point. It's a XEN specific entry
+ * point and therefore named to match the 64-bit trampoline counterpart.
+ */
+SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback)
/*
* Check to see if we got the event in the critical
* region in xen_iret_direct, after we've reenabled
@@ -1395,14 +1197,11 @@ SYM_FUNC_START(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
+
mov %esp, %eax
- call xen_evtchn_do_upcall
-#ifndef CONFIG_PREEMPTION
- call xen_maybe_preempt_hcall
-#endif
- jmp ret_from_intr
-SYM_FUNC_END(xen_hypervisor_callback)
+ call xen_pv_evtchn_do_upcall
+ jmp handle_exception_return
+SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1429,11 +1228,11 @@ SYM_FUNC_START(xen_failsafe_callback)
popl %eax
lea 16(%esp), %esp
jz 5f
- jmp iret_exc
+ jmp asm_iret_error
5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
- jmp ret_from_exception
+ jmp handle_exception_return
.section .fixup, "ax"
6: xorl %eax, %eax
@@ -1456,56 +1255,7 @@ SYM_FUNC_START(xen_failsafe_callback)
SYM_FUNC_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
-#ifdef CONFIG_XEN_PVHVM
-BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- xen_evtchn_do_upcall)
-#endif
-
-
-#if IS_ENABLED(CONFIG_HYPERV)
-
-BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- hyperv_vector_handler)
-
-BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
- hyperv_reenlightenment_intr)
-
-BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
- hv_stimer0_vector_handler)
-
-#endif /* CONFIG_HYPERV */
-
-SYM_CODE_START(page_fault)
- ASM_CLAC
- pushl $do_page_fault
- jmp common_exception_read_cr2
-SYM_CODE_END(page_fault)
-
-SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
- /* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
-
- ENCODE_FRAME_POINTER
-
- /* fixup %gs */
- GS_TO_REG %ecx
- movl PT_GS(%esp), %edi
- REG_TO_PTGS %ecx
- SET_KERNEL_GS %ecx
-
- GET_CR2_INTO(%ecx) # might clobber %eax
-
- /* fixup orig %eax */
- movl PT_ORIG_EAX(%esp), %edx # get the error code
- movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
-
- TRACE_IRQS_OFF
- movl %esp, %eax # pt_regs pointer
- CALL_NOSPEC edi
- jmp ret_from_exception
-SYM_CODE_END(common_exception_read_cr2)
-
-SYM_CODE_START_LOCAL_NOALIGN(common_exception)
+SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
/* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
@@ -1520,23 +1270,35 @@ SYM_CODE_START_LOCAL_NOALIGN(common_exception)
movl PT_ORIG_EAX(%esp), %edx # get the error code
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
- TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC edi
- jmp ret_from_exception
-SYM_CODE_END(common_exception)
-SYM_CODE_START(debug)
+handle_exception_return:
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
/*
- * Entry from sysenter is now handled in common_exception
+ * We can be coming here from child spawned by kernel_thread().
*/
- ASM_CLAC
- pushl $0
- pushl $do_debug
- jmp common_exception
-SYM_CODE_END(debug)
+ movl PT_CS(%esp), %eax
+ andl $SEGMENT_RPL_MASK, %eax
+#endif
+ cmpl $USER_RPL, %eax # returning to v8086 or userspace ?
+ jnb ret_to_user
-SYM_CODE_START(double_fault)
+ PARANOID_EXIT_TO_KERNEL_MODE
+ BUG_IF_WRONG_CR3
+ RESTORE_REGS 4
+ jmp .Lirq_return
+
+ret_to_user:
+ movl %esp, %eax
+ jmp restore_all_switch_stack
+SYM_CODE_END(handle_exception)
+
+SYM_CODE_START(asm_exc_double_fault)
1:
/*
* This is a task gate handler, not an interrupt gate handler.
@@ -1574,7 +1336,7 @@ SYM_CODE_START(double_fault)
1:
hlt
jmp 1b
-SYM_CODE_END(double_fault)
+SYM_CODE_END(asm_exc_double_fault)
/*
* NMI is doubly nasty. It can happen on the first instruction of
@@ -1583,7 +1345,7 @@ SYM_CODE_END(double_fault)
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
-SYM_CODE_START(nmi)
+SYM_CODE_START(asm_exc_nmi)
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
@@ -1612,7 +1374,7 @@ SYM_CODE_START(nmi)
jb .Lnmi_from_sysenter_stack
/* Not on SYSENTER stack. */
- call do_nmi
+ call exc_nmi
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
@@ -1622,7 +1384,7 @@ SYM_CODE_START(nmi)
*/
movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- call do_nmi
+ call exc_nmi
movl %ebx, %esp
.Lnmi_return:
@@ -1676,21 +1438,9 @@ SYM_CODE_START(nmi)
lss (1+5+6)*4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
-SYM_CODE_END(nmi)
-
-SYM_CODE_START(int3)
- ASM_CLAC
- pushl $0
- pushl $do_int3
- jmp common_exception
-SYM_CODE_END(int3)
-
-SYM_CODE_START(general_protection)
- ASM_CLAC
- pushl $do_general_protection
- jmp common_exception
-SYM_CODE_END(general_protection)
+SYM_CODE_END(asm_exc_nmi)
+.pushsection .text, "ax"
SYM_CODE_START(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1701,3 +1451,4 @@ SYM_CODE_START(rewind_stack_do_exit)
call do_exit
1: jmp 1b
SYM_CODE_END(rewind_stack_do_exit)
+.popsection
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index eead1e2bebd5..d2a00c97e53f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -16,7 +16,6 @@
*
* Some macro usage:
* - SYM_FUNC_START/END:Define functions in the symbol table.
- * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
* - idtentry: Define exception entry points.
*/
#include <linux/linkage.h>
@@ -37,6 +36,7 @@
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <asm/frame.h>
+#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
#include <linux/err.h>
@@ -53,57 +53,6 @@ SYM_CODE_START(native_usergs_sysret64)
SYM_CODE_END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
-.macro TRACE_IRQS_FLAGS flags:req
-#ifdef CONFIG_TRACE_IRQFLAGS
- btl $9, \flags /* interrupts off? */
- jnc 1f
- TRACE_IRQS_ON
-1:
-#endif
-.endm
-
-.macro TRACE_IRQS_IRETQ
- TRACE_IRQS_FLAGS EFLAGS(%rsp)
-.endm
-
-/*
- * When dynamic function tracer is enabled it will add a breakpoint
- * to all locations that it is about to modify, sync CPUs, update
- * all the code, sync CPUs, then remove the breakpoints. In this time
- * if lockdep is enabled, it might jump back into the debug handler
- * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
- *
- * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
- * make sure the stack pointer does not get reset back to the top
- * of the debug stack, and instead just reuses the current stack.
- */
-#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
-
-.macro TRACE_IRQS_OFF_DEBUG
- call debug_stack_set_zero
- TRACE_IRQS_OFF
- call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_ON_DEBUG
- call debug_stack_set_zero
- TRACE_IRQS_ON
- call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_IRETQ_DEBUG
- btl $9, EFLAGS(%rsp) /* interrupts off? */
- jnc 1f
- TRACE_IRQS_ON_DEBUG
-1:
-.endm
-
-#else
-# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
-# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
-# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
-#endif
-
/*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
@@ -144,11 +93,6 @@ SYM_CODE_END(native_usergs_sysret64)
SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
- /*
- * Interrupts are off on entry.
- * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
- * it is too small to ever cause noticeable irq latency.
- */
swapgs
/* tss.sp2 is scratch space. */
@@ -167,15 +111,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
- TRACE_IRQS_OFF
-
/* IRQs are off. */
movq %rax, %rdi
movq %rsp, %rsi
call do_syscall_64 /* returns with IRQs disabled */
- TRACE_IRQS_ON /* return enables interrupts */
-
/*
* Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. If we're not,
@@ -279,6 +219,7 @@ SYM_CODE_END(entry_SYSCALL_64)
* %rdi: prev task
* %rsi: next task
*/
+.pushsection .text, "ax"
SYM_FUNC_START(__switch_to_asm)
/*
* Save callee-saved registers
@@ -321,6 +262,7 @@ SYM_FUNC_START(__switch_to_asm)
jmp __switch_to
SYM_FUNC_END(__switch_to_asm)
+.popsection
/*
* A newly forked process directly context switches into this address.
@@ -329,6 +271,7 @@ SYM_FUNC_END(__switch_to_asm)
* rbx: kernel thread func (NULL for user thread)
* r12: kernel thread arg
*/
+.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq %rax, %rdi
@@ -341,7 +284,6 @@ SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
- TRACE_IRQS_ON /* user mode is traced as IRQS on */
jmp swapgs_restore_regs_and_return_to_usermode
1:
@@ -357,34 +299,7 @@ SYM_CODE_START(ret_from_fork)
movq $0, RAX(%rsp)
jmp 2b
SYM_CODE_END(ret_from_fork)
-
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
- .align 8
-SYM_CODE_START(irq_entries_start)
- vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
- UNWIND_HINT_IRET_REGS
- pushq $(~vector+0x80) /* Note: always in signed byte range */
- jmp common_interrupt
- .align 8
- vector=vector+1
- .endr
-SYM_CODE_END(irq_entries_start)
-
- .align 8
-SYM_CODE_START(spurious_entries_start)
- vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
- UNWIND_HINT_IRET_REGS
- pushq $(~vector+0x80) /* Note: always in signed byte range */
- jmp common_spurious
- .align 8
- vector=vector+1
- .endr
-SYM_CODE_END(spurious_entries_start)
+.popsection
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
@@ -398,228 +313,185 @@ SYM_CODE_END(spurious_entries_start)
#endif
.endm
-/*
- * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers
- * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
- * Requires kernel GSBASE.
- *
- * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
+/**
+ * idtentry_body - Macro to emit code calling the C function
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
*/
-.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
- DEBUG_ENTRY_ASSERT_IRQS_OFF
+.macro idtentry_body cfunc has_error_code:req
- .if \save_ret
- /*
- * If save_ret is set, the original stack contains one additional
- * entry -- the return address. Therefore, move the address one
- * entry below %rsp to \old_rsp.
- */
- leaq 8(%rsp), \old_rsp
- .else
- movq %rsp, \old_rsp
- .endif
-
- .if \regs
- UNWIND_HINT_REGS base=\old_rsp
- .endif
+ call error_entry
+ UNWIND_HINT_REGS
- incl PER_CPU_VAR(irq_count)
- jnz .Lirq_stack_push_old_rsp_\@
+ movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
- /*
- * Right now, if we just incremented irq_count to zero, we've
- * claimed the IRQ stack but we haven't switched to it yet.
- *
- * If anything is added that can interrupt us here without using IST,
- * it must be *extremely* careful to limit its stack usage. This
- * could include kprobes and a hypothetical future IST-less #DB
- * handler.
- *
- * The OOPS unwinder relies on the word at the top of the IRQ
- * stack linking back to the previous RSP for the entire time we're
- * on the IRQ stack. For this to work reliably, we need to write
- * it before we actually move ourselves to the IRQ stack.
- */
+ .if \has_error_code == 1
+ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
+ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
+ .endif
- movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
- movq PER_CPU_VAR(hardirq_stack_ptr), %rsp
+ call \cfunc
-#ifdef CONFIG_DEBUG_ENTRY
- /*
- * If the first movq above becomes wrong due to IRQ stack layout
- * changes, the only way we'll notice is if we try to unwind right
- * here. Assert that we set up the stack right to catch this type
- * of bug quickly.
- */
- cmpq -8(%rsp), \old_rsp
- je .Lirq_stack_okay\@
- ud2
- .Lirq_stack_okay\@:
-#endif
+ jmp error_return
+.endm
-.Lirq_stack_push_old_rsp_\@:
- pushq \old_rsp
+/**
+ * idtentry - Macro to generate entry stubs for simple IDT entries
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
+ *
+ * The macro emits code to set up the kernel context for straight forward
+ * and simple IDT entries. No IST stack, no paranoid entry checks.
+ */
+.macro idtentry vector asmsym cfunc has_error_code:req
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
+ ASM_CLAC
- .if \regs
- UNWIND_HINT_REGS indirect=1
+ .if \has_error_code == 0
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
- .if \save_ret
- /*
- * Push the return address to the stack. This return address can
- * be found at the "real" original RSP, which was offset by 8 at
- * the beginning of this macro.
- */
- pushq -8(\old_rsp)
+ .if \vector == X86_TRAP_BP
+ /*
+ * If coming from kernel space, create a 6-word gap to allow the
+ * int3 handler to emulate a call instruction.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_no_gap_\@
+ .rept 6
+ pushq 5*8(%rsp)
+ .endr
+ UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
.endif
+
+ idtentry_body \cfunc \has_error_code
+
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
.endm
/*
- * Undoes ENTER_IRQ_STACK.
+ * Interrupt entry/exit.
+ *
+ + The interrupt stubs push (vector) onto the stack, which is the error_code
+ * position of idtentry exceptions, and jump to one of the two idtentry points
+ * (common/spurious).
+ *
+ * common_interrupt is a hotpath, align it to a cache line
*/
-.macro LEAVE_IRQ_STACK regs=1
- DEBUG_ENTRY_ASSERT_IRQS_OFF
- /* We need to be off the IRQ stack before decrementing irq_count. */
- popq %rsp
-
- .if \regs
- UNWIND_HINT_REGS
- .endif
-
- /*
- * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
- * the irq stack but we're not on it.
- */
-
- decl PER_CPU_VAR(irq_count)
+.macro idtentry_irq vector cfunc
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+ idtentry \vector asm_\cfunc \cfunc has_error_code=1
.endm
/*
- * Interrupt entry helper function.
+ * System vectors which invoke their handlers directly and are not
+ * going through the regular common device interrupt handling code.
+ */
+.macro idtentry_sysvec vector cfunc
+ idtentry \vector asm_\cfunc \cfunc has_error_code=0
+.endm
+
+/**
+ * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ *
+ * The macro emits code to set up the kernel context for #MC and #DB
+ *
+ * If the entry comes from user space it uses the normal entry path
+ * including the return to user space work and preemption checks on
+ * exit.
*
- * Entry runs with interrupts off. Stack layout at entry:
- * +----------------------------------------------------+
- * | regs->ss |
- * | regs->rsp |
- * | regs->eflags |
- * | regs->cs |
- * | regs->ip |
- * +----------------------------------------------------+
- * | regs->orig_ax = ~(interrupt number) |
- * +----------------------------------------------------+
- * | return address |
- * +----------------------------------------------------+
+ * If hits in kernel mode then it needs to go through the paranoid
+ * entry as the exception can hit any random state. No preemption
+ * check on exit to keep the paranoid path simple.
*/
-SYM_CODE_START(interrupt_entry)
- UNWIND_HINT_IRET_REGS offset=16
+.macro idtentry_mce_db vector asmsym cfunc
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS
ASM_CLAC
- cld
- testb $3, CS-ORIG_RAX+8(%rsp)
- jz 1f
- SWAPGS
- FENCE_SWAPGS_USER_ENTRY
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
+
/*
- * Switch to the thread stack. The IRET frame and orig_ax are
- * on the stack, as well as the return address. RDI..R12 are
- * not (yet) on the stack and space has not (yet) been
- * allocated for them.
+ * If the entry is from userspace, switch stacks and treat it as
+ * a normal entry.
*/
- pushq %rdi
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_switch_stack_\@
- /* Need to switch before accessing the thread stack. */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
- movq %rsp, %rdi
- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ /*
+ * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
+ * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
+ */
+ call paranoid_entry
- /*
- * We have RDI, return address, and orig_ax on the stack on
- * top of the IRET frame. That means offset=24
- */
- UNWIND_HINT_IRET_REGS base=%rdi offset=24
-
- pushq 7*8(%rdi) /* regs->ss */
- pushq 6*8(%rdi) /* regs->rsp */
- pushq 5*8(%rdi) /* regs->eflags */
- pushq 4*8(%rdi) /* regs->cs */
- pushq 3*8(%rdi) /* regs->ip */
- UNWIND_HINT_IRET_REGS
- pushq 2*8(%rdi) /* regs->orig_ax */
- pushq 8(%rdi) /* return address */
+ UNWIND_HINT_REGS
- movq (%rdi), %rdi
- jmp 2f
-1:
- FENCE_SWAPGS_KERNEL_ENTRY
-2:
- PUSH_AND_CLEAR_REGS save_ret=1
- ENCODE_FRAME_POINTER 8
+ movq %rsp, %rdi /* pt_regs pointer */
- testb $3, CS+8(%rsp)
- jz 1f
+ call \cfunc
- /*
- * IRQ from user mode.
- *
- * We need to tell lockdep that IRQs are off. We can't do this until
- * we fix gsbase, and we should do it before enter_from_user_mode
- * (which can take locks). Since TRACE_IRQS_OFF is idempotent,
- * the simplest way to handle it is to just call it twice if
- * we enter from user mode. There's no reason to optimize this since
- * TRACE_IRQS_OFF is a no-op if lockdep is off.
- */
- TRACE_IRQS_OFF
+ jmp paranoid_exit
- CALL_enter_from_user_mode
+ /* Switch to the regular task stack and use the noist entry point */
+.Lfrom_usermode_switch_stack_\@:
+ idtentry_body noist_\cfunc, has_error_code=0
-1:
- ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
- /* We entered an interrupt context - irqs are off: */
- TRACE_IRQS_OFF
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
+.endm
- ret
-SYM_CODE_END(interrupt_entry)
-_ASM_NOKPROBE(interrupt_entry)
+/*
+ * Double fault entry. Straight paranoid. No checks from which context
+ * this comes because for the espfix induced #DF this would do the wrong
+ * thing.
+ */
+.macro idtentry_df vector asmsym cfunc
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS offset=8
+ ASM_CLAC
+ /*
+ * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
+ * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
+ */
+ call paranoid_entry
+ UNWIND_HINT_REGS
-/* Interrupt entry/exit. */
+ movq %rsp, %rdi /* pt_regs pointer into first argument */
+ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
+ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
+ call \cfunc
+
+ jmp paranoid_exit
+
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
+.endm
/*
- * The interrupt stubs push (~vector+0x80) onto the stack and
- * then jump to common_spurious/interrupt.
+ * Include the defines which emit the idt entries which are shared
+ * shared between 32 and 64 bit and emit the __irqentry_text_* markers
+ * so the stacktrace boundary checks work.
*/
-SYM_CODE_START_LOCAL(common_spurious)
- addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call smp_spurious_interrupt /* rdi points to pt_regs */
- jmp ret_from_intr
-SYM_CODE_END(common_spurious)
-_ASM_NOKPROBE(common_spurious)
-
-/* common_interrupt is a hotpath. Align it */
- .p2align CONFIG_X86_L1_CACHE_SHIFT
-SYM_CODE_START_LOCAL(common_interrupt)
- addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call do_IRQ /* rdi points to pt_regs */
- /* 0(%rsp): old RSP */
-ret_from_intr:
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
-
- LEAVE_IRQ_STACK
+ .align 16
+ .globl __irqentry_text_start
+__irqentry_text_start:
- testb $3, CS(%rsp)
- jz retint_kernel
+#include <asm/idtentry.h>
- /* Interrupt came from user space */
-.Lretint_user:
- mov %rsp,%rdi
- call prepare_exit_to_usermode
- TRACE_IRQS_ON
+ .align 16
+ .globl __irqentry_text_end
+__irqentry_text_end:
+SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
@@ -662,23 +534,6 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
INTERRUPT_RETURN
-/* Returning to kernel space */
-retint_kernel:
-#ifdef CONFIG_PREEMPTION
- /* Interrupts are off */
- /* Check if we need preemption */
- btl $9, EFLAGS(%rsp) /* were interrupts off? */
- jnc 1f
- cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz 1f
- call preempt_schedule_irq
-1:
-#endif
- /*
- * The iretq could re-enable interrupts:
- */
- TRACE_IRQS_IRETQ
-
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates kernel mode. */
@@ -710,7 +565,7 @@ SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
/*
* This may fault. Non-paranoid faults on return to userspace are
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
- * Double-faults due to espfix64 are handled in do_double_fault.
+ * Double-faults due to espfix64 are handled in exc_double_fault.
* Other faults here are fatal.
*/
iretq
@@ -788,280 +643,32 @@ native_irq_return_ldt:
*/
jmp native_irq_return_iret
#endif
-SYM_CODE_END(common_interrupt)
-_ASM_NOKPROBE(common_interrupt)
-
-/*
- * APIC interrupts.
- */
-.macro apicinterrupt3 num sym do_sym
-SYM_CODE_START(\sym)
- UNWIND_HINT_IRET_REGS
- pushq $~(\num)
-.Lcommon_\sym:
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call \do_sym /* rdi points to pt_regs */
- jmp ret_from_intr
-SYM_CODE_END(\sym)
-_ASM_NOKPROBE(\sym)
-.endm
-
-/* Make sure APIC interrupt handlers end up in the irqentry section: */
-#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
-#define POP_SECTION_IRQENTRY .popsection
-
-.macro apicinterrupt num sym do_sym
-PUSH_SECTION_IRQENTRY
-apicinterrupt3 \num \sym \do_sym
-POP_SECTION_IRQENTRY
-.endm
-
-#ifdef CONFIG_SMP
-apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
-apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
-#endif
-
-#ifdef CONFIG_X86_UV
-apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
-#endif
-
-apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
-apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
-
-#ifdef CONFIG_HAVE_KVM
-apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
-apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
-apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
-#endif
-
-#ifdef CONFIG_X86_MCE_THRESHOLD
-apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
-apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
-#endif
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
-#endif
-
-#ifdef CONFIG_SMP
-apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
-apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
-apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
-#endif
-
-apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
-apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
-
-#ifdef CONFIG_IRQ_WORK
-apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
-#endif
+SYM_CODE_END(common_interrupt_return)
+_ASM_NOKPROBE(common_interrupt_return)
/*
- * Exception entry points.
- */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
-
-.macro idtentry_part do_sym, has_error_code:req, read_cr2:req, paranoid:req, shift_ist=-1, ist_offset=0
-
- .if \paranoid
- call paranoid_entry
- /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
- .else
- call error_entry
- .endif
- UNWIND_HINT_REGS
-
- .if \read_cr2
- /*
- * Store CR2 early so subsequent faults cannot clobber it. Use R12 as
- * intermediate storage as RDX can be clobbered in enter_from_user_mode().
- * GET_CR2_INTO can clobber RAX.
- */
- GET_CR2_INTO(%r12);
- .endif
-
- .if \shift_ist != -1
- TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
- .else
- TRACE_IRQS_OFF
- .endif
-
- .if \paranoid == 0
- testb $3, CS(%rsp)
- jz .Lfrom_kernel_no_context_tracking_\@
- CALL_enter_from_user_mode
-.Lfrom_kernel_no_context_tracking_\@:
- .endif
-
- movq %rsp, %rdi /* pt_regs pointer */
-
- .if \has_error_code
- movq ORIG_RAX(%rsp), %rsi /* get error code */
- movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
- .else
- xorl %esi, %esi /* no error code */
- .endif
-
- .if \shift_ist != -1
- subq $\ist_offset, CPU_TSS_IST(\shift_ist)
- .endif
-
- .if \read_cr2
- movq %r12, %rdx /* Move CR2 into 3rd argument */
- .endif
-
- call \do_sym
-
- .if \shift_ist != -1
- addq $\ist_offset, CPU_TSS_IST(\shift_ist)
- .endif
-
- .if \paranoid
- /* this procedure expect "no swapgs" flag in ebx */
- jmp paranoid_exit
- .else
- jmp error_exit
- .endif
-
-.endm
-
-/**
- * idtentry - Generate an IDT entry stub
- * @sym: Name of the generated entry point
- * @do_sym: C function to be called
- * @has_error_code: True if this IDT vector has an error code on the stack
- * @paranoid: non-zero means that this vector may be invoked from
- * kernel mode with user GSBASE and/or user CR3.
- * 2 is special -- see below.
- * @shift_ist: Set to an IST index if entries from kernel mode should
- * decrement the IST stack so that nested entries get a
- * fresh stack. (This is for #DB, which has a nasty habit
- * of recursing.)
- * @create_gap: create a 6-word stack gap when coming from kernel mode.
- * @read_cr2: load CR2 into the 3rd argument; done before calling any C code
- *
- * idtentry generates an IDT stub that sets up a usable kernel context,
- * creates struct pt_regs, and calls @do_sym. The stub has the following
- * special behaviors:
- *
- * On an entry from user mode, the stub switches from the trampoline or
- * IST stack to the normal thread stack. On an exit to user mode, the
- * normal exit-to-usermode path is invoked.
- *
- * On an exit to kernel mode, if @paranoid == 0, we check for preemption,
- * whereas we omit the preemption check if @paranoid != 0. This is purely
- * because the implementation is simpler this way. The kernel only needs
- * to check for asynchronous kernel preemption when IRQ handlers return.
- *
- * If @paranoid == 0, then the stub will handle IRET faults by pretending
- * that the fault came from user mode. It will handle gs_change faults by
- * pretending that the fault happened with kernel GSBASE. Since this handling
- * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have
- * @paranoid == 0. This special handling will do the wrong thing for
- * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0.
+ * Reload gs selector with exception handling
+ * edi: new selector
*
- * @paranoid == 2 is special: the stub will never switch stacks. This is for
- * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
+ * Is in entry.text as it shouldn't be instrumented.
*/
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0
-SYM_CODE_START(\sym)
- UNWIND_HINT_IRET_REGS offset=\has_error_code*8
-
- /* Sanity check */
- .if \shift_ist != -1 && \paranoid != 1
- .error "using shift_ist requires paranoid=1"
- .endif
-
- .if \create_gap && \paranoid
- .error "using create_gap requires paranoid=0"
- .endif
-
- ASM_CLAC
-
- .if \has_error_code == 0
- pushq $-1 /* ORIG_RAX: no syscall to restart */
- .endif
-
- .if \paranoid == 1
- testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
- jnz .Lfrom_usermode_switch_stack_\@
- .endif
-
- .if \create_gap == 1
- /*
- * If coming from kernel space, create a 6-word gap to allow the
- * int3 handler to emulate a call instruction.
- */
- testb $3, CS-ORIG_RAX(%rsp)
- jnz .Lfrom_usermode_no_gap_\@
- .rept 6
- pushq 5*8(%rsp)
- .endr
- UNWIND_HINT_IRET_REGS offset=8
-.Lfrom_usermode_no_gap_\@:
- .endif
-
- idtentry_part \do_sym, \has_error_code, \read_cr2, \paranoid, \shift_ist, \ist_offset
-
- .if \paranoid == 1
- /*
- * Entry from userspace. Switch stacks and treat it
- * as a normal entry. This means that paranoid handlers
- * run in real process context if user_mode(regs).
- */
-.Lfrom_usermode_switch_stack_\@:
- idtentry_part \do_sym, \has_error_code, \read_cr2, paranoid=0
- .endif
-
-_ASM_NOKPROBE(\sym)
-SYM_CODE_END(\sym)
-.endm
-
-idtentry divide_error do_divide_error has_error_code=0
-idtentry overflow do_overflow has_error_code=0
-idtentry bounds do_bounds has_error_code=0
-idtentry invalid_op do_invalid_op has_error_code=0
-idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault do_double_fault has_error_code=1 paranoid=2 read_cr2=1
-idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
-idtentry invalid_TSS do_invalid_TSS has_error_code=1
-idtentry segment_not_present do_segment_not_present has_error_code=1
-idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
-idtentry coprocessor_error do_coprocessor_error has_error_code=0
-idtentry alignment_check do_alignment_check has_error_code=1
-idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
-
-
- /*
- * Reload gs selector with exception handling
- * edi: new selector
- */
-SYM_FUNC_START(native_load_gs_index)
+SYM_FUNC_START(asm_load_gs_index)
FRAME_BEGIN
- pushfq
- DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
- TRACE_IRQS_OFF
- SWAPGS
+ swapgs
.Lgs_change:
movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
- SWAPGS
- TRACE_IRQS_FLAGS (%rsp)
- popfq
+ swapgs
FRAME_END
ret
-SYM_FUNC_END(native_load_gs_index)
-EXPORT_SYMBOL(native_load_gs_index)
+SYM_FUNC_END(asm_load_gs_index)
+EXPORT_SYMBOL(asm_load_gs_index)
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
.section .fixup, "ax"
/* running with kernelgs */
SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
- SWAPGS /* switch back to user gs */
+ swapgs /* switch back to user gs */
.macro ZAP_GS
/* This can't be a string because the preprocessor needs to see it. */
movl $__USER_DS, %eax
@@ -1074,20 +681,46 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SYM_CODE_END(.Lbad_gs)
.previous
-/* Call softirq on interrupt stack. Interrupts are off. */
-SYM_FUNC_START(do_softirq_own_stack)
- pushq %rbp
- mov %rsp, %rbp
- ENTER_IRQ_STACK regs=0 old_rsp=%r11
- call __do_softirq
- LEAVE_IRQ_STACK regs=0
+/*
+ * rdi: New stack pointer points to the top word of the stack
+ * rsi: Function pointer
+ * rdx: Function argument (can be NULL if none)
+ */
+SYM_FUNC_START(asm_call_on_stack)
+ /*
+ * Save the frame pointer unconditionally. This allows the ORC
+ * unwinder to handle the stack switch.
+ */
+ pushq %rbp
+ mov %rsp, %rbp
+
+ /*
+ * The unwinder relies on the word at the top of the new stack
+ * page linking back to the previous RSP.
+ */
+ mov %rsp, (%rdi)
+ mov %rdi, %rsp
+ /* Move the argument to the right place */
+ mov %rdx, %rdi
+
+1:
+ .pushsection .discard.instr_begin
+ .long 1b - .
+ .popsection
+
+ CALL_NOSPEC rsi
+
+2:
+ .pushsection .discard.instr_end
+ .long 2b - .
+ .popsection
+
+ /* Restore the previous stack pointer from RBP. */
leaveq
ret
-SYM_FUNC_END(do_softirq_own_stack)
+SYM_FUNC_END(asm_call_on_stack)
#ifdef CONFIG_XEN_PV
-idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-
/*
* A note on the "critical region" in our callback handler.
* We want to avoid stacking callback handlers due to events occurring
@@ -1100,9 +733,10 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* So, on entry to the handler we detect whether we interrupted an
* existing activation in its critical region -- if so, we pop the current
* activation and restart the handler using the previous one.
+ *
+ * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
*/
-/* do_hypervisor_callback(struct *pt_regs) */
-SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
+SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
/*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1112,15 +746,10 @@ SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
movq %rdi, %rsp /* we don't return, adjust the stack frame */
UNWIND_HINT_REGS
- ENTER_IRQ_STACK old_rsp=%r10
- call xen_evtchn_do_upcall
- LEAVE_IRQ_STACK
+ call xen_pv_evtchn_do_upcall
-#ifndef CONFIG_PREEMPTION
- call xen_maybe_preempt_hcall
-#endif
- jmp error_exit
-SYM_CODE_END(xen_do_hypervisor_callback)
+ jmp error_return
+SYM_CODE_END(exc_xen_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1155,7 +784,7 @@ SYM_CODE_START(xen_failsafe_callback)
addq $0x30, %rsp
pushq $0 /* RIP */
UNWIND_HINT_IRET_REGS offset=8
- jmp general_protection
+ jmp asm_exc_general_protection
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp), %rcx
movq 8(%rsp), %r11
@@ -1164,48 +793,10 @@ SYM_CODE_START(xen_failsafe_callback)
pushq $-1 /* orig_ax = -1 => not a system call */
PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
- jmp error_exit
+ jmp error_return
SYM_CODE_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
-#ifdef CONFIG_XEN_PVHVM
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- xen_hvm_callback_vector xen_evtchn_do_upcall
-#endif
-
-
-#if IS_ENABLED(CONFIG_HYPERV)
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- hyperv_callback_vector hyperv_vector_handler
-
-apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
- hyperv_reenlightenment_vector hyperv_reenlightenment_intr
-
-apicinterrupt3 HYPERV_STIMER0_VECTOR \
- hv_stimer0_callback_vector hv_stimer0_vector_handler
-#endif /* CONFIG_HYPERV */
-
-#if IS_ENABLED(CONFIG_ACRN_GUEST)
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- acrn_hv_callback_vector acrn_hv_vector_handler
-#endif
-
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
-idtentry int3 do_int3 has_error_code=0 create_gap=1
-idtentry stack_segment do_stack_segment has_error_code=1
-
-#ifdef CONFIG_XEN_PV
-idtentry xennmi do_nmi has_error_code=0
-idtentry xendebug do_debug has_error_code=0
-#endif
-
-idtentry general_protection do_general_protection has_error_code=1
-idtentry page_fault do_page_fault has_error_code=1 read_cr2=1
-
-#ifdef CONFIG_X86_MCE
-idtentry machine_check do_mce has_error_code=0 paranoid=1
-#endif
-
/*
* Save all registers in pt_regs, and switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
@@ -1261,17 +852,13 @@ SYM_CODE_END(paranoid_entry)
*/
SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
- TRACE_IRQS_IRETQ
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp restore_regs_and_return_to_kernel
.Lparanoid_exit_no_swapgs:
- TRACE_IRQS_IRETQ_DEBUG
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
jmp restore_regs_and_return_to_kernel
@@ -1335,7 +922,6 @@ SYM_CODE_START_LOCAL(error_entry)
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
.Lbstep_iret:
@@ -1362,14 +948,13 @@ SYM_CODE_START_LOCAL(error_entry)
jmp .Lerror_entry_from_usermode_after_swapgs
SYM_CODE_END(error_entry)
-SYM_CODE_START_LOCAL(error_exit)
+SYM_CODE_START_LOCAL(error_return)
UNWIND_HINT_REGS
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
+ DEBUG_ENTRY_ASSERT_IRQS_OFF
testb $3, CS(%rsp)
- jz retint_kernel
- jmp .Lretint_user
-SYM_CODE_END(error_exit)
+ jz restore_regs_and_return_to_kernel
+ jmp swapgs_restore_regs_and_return_to_usermode
+SYM_CODE_END(error_return)
/*
* Runs on exception stack. Xen PV does not go through this path at all,
@@ -1379,7 +964,7 @@ SYM_CODE_END(error_exit)
* %r14: Used to save/restore the CR3 of the interrupted context
* when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/
-SYM_CODE_START(nmi)
+SYM_CODE_START(asm_exc_nmi)
UNWIND_HINT_IRET_REGS
/*
@@ -1464,7 +1049,7 @@ SYM_CODE_START(nmi)
movq %rsp, %rdi
movq $-1, %rsi
- call do_nmi
+ call exc_nmi
/*
* Return back to user mode. We must *not* do the normal exit
@@ -1521,7 +1106,7 @@ SYM_CODE_START(nmi)
* end_repeat_nmi, then we are a nested NMI. We must not
* modify the "iret" frame because it's being written by
* the outer NMI. That's okay; the outer NMI handler is
- * about to about to call do_nmi anyway, so we can just
+ * about to about to call exc_nmi() anyway, so we can just
* resume the outer NMI.
*/
@@ -1640,7 +1225,7 @@ repeat_nmi:
* RSP is pointing to "outermost RIP". gsbase is unknown, but, if
* we're repeating an NMI, gsbase has the same value that it had on
* the first iteration. paranoid_entry will load the kernel
- * gsbase if needed before we call do_nmi. "NMI executing"
+ * gsbase if needed before we call exc_nmi(). "NMI executing"
* is zero.
*/
movq $1, 10*8(%rsp) /* Set "NMI executing". */
@@ -1674,10 +1259,9 @@ end_repeat_nmi:
call paranoid_entry
UNWIND_HINT_REGS
- /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
- call do_nmi
+ call exc_nmi
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
@@ -1714,7 +1298,7 @@ nmi_restore:
* about espfix64 on the way back to kernel mode.
*/
iretq
-SYM_CODE_END(nmi)
+SYM_CODE_END(asm_exc_nmi)
#ifndef CONFIG_IA32_EMULATION
/*
@@ -1728,6 +1312,7 @@ SYM_CODE_START(ignore_sysret)
SYM_CODE_END(ignore_sysret)
#endif
+.pushsection .text, "ax"
SYM_CODE_START(rewind_stack_do_exit)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
@@ -1739,3 +1324,4 @@ SYM_CODE_START(rewind_stack_do_exit)
call do_exit
SYM_CODE_END(rewind_stack_do_exit)
+.popsection
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index f1d3ccae5dd5..0f974ae01e62 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -46,12 +46,14 @@
* ebp user stack
* 0(%ebp) arg6
*/
-SYM_FUNC_START(entry_SYSENTER_compat)
+SYM_CODE_START(entry_SYSENTER_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
SWAPGS
- /* We are about to clobber %rsp anyway, clobbering here is OK */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+ pushq %rax
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ popq %rax
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
+
+ UNWIND_HINT_REGS
+
cld
/*
@@ -129,17 +134,11 @@ SYM_FUNC_START(entry_SYSENTER_compat)
jnz .Lsysenter_fix_flags
.Lsysenter_flags_fixed:
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
- ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
- "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
+ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
jmp sysret32_from_system_call
.Lsysenter_fix_flags:
@@ -147,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat)
popfq
jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
-SYM_FUNC_END(entry_SYSENTER_compat)
+SYM_CODE_END(entry_SYSENTER_compat)
/*
* 32-bit SYSCALL entry.
@@ -197,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat)
* 0(%esp) arg6
*/
SYM_CODE_START(entry_SYSCALL_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
swapgs
@@ -247,17 +247,13 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
+ UNWIND_HINT_REGS
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
- ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
- "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
+ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
/* Opportunistic SYSRET */
sysret32_from_system_call:
@@ -266,7 +262,7 @@ sysret32_from_system_call:
* stack. So let's erase the thread stack right now.
*/
STACKLEAK_ERASE
- TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
@@ -340,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
* ebp arg6
*/
SYM_CODE_START(entry_INT80_compat)
+ UNWIND_HINT_EMPTY
/*
* Interrupts are off on entry.
*/
@@ -361,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
/* In the Xen PV case we already run on the thread stack. */
- ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+ ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+
+ movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */
@@ -401,19 +401,12 @@ SYM_CODE_START(entry_INT80_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15 */
- cld
- /*
- * User mode is traced as though IRQs are on, and the interrupt
- * gate turned them off.
- */
- TRACE_IRQS_OFF
+ UNWIND_HINT_REGS
+
+ cld
movq %rsp, %rdi
call do_int80_syscall_32
-.Lsyscall_32_done:
-
- /* Go back to user mode. */
- TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
SYM_CODE_END(entry_INT80_compat)
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index dbe4493b534e..ccd32877a3c4 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -3,7 +3,6 @@
* Save registers before calling assembly functions. This avoids
* disturbance of register allocation in some inline assembly constructs.
* Copyright 2001,2002 by Andi Kleen, SuSE Labs.
- * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
*/
#include <linux/linkage.h>
#include "calling.h"
@@ -37,15 +36,6 @@ SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
-#ifdef CONFIG_TRACE_IRQFLAGS
- THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
- THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
-#endif
-
#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
@@ -53,9 +43,7 @@ SYM_FUNC_END(\name)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
#endif
-#if defined(CONFIG_TRACE_IRQFLAGS) \
- || defined(CONFIG_DEBUG_LOCK_ALLOC) \
- || defined(CONFIG_PREEMPTION)
+#ifdef CONFIG_PREEMPTION
SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
popq %r11
popq %r10
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 54e03ab26ff3..04e65f0698f6 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE
include $(srctree)/lib/vdso/Makefile
KBUILD_CFLAGS += $(DISABLE_LTO)
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
@@ -29,6 +32,9 @@ vobjs32-y += vdso32/vclock_gettime.o
# files to link into kernel
obj-y += vma.o
+KASAN_SANITIZE_vma.o := y
+UBSAN_SANITIZE_vma.o := y
+KCSAN_SANITIZE_vma.o := y
OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index e2137070386a..a54c6a401581 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -15,6 +15,7 @@
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
+#include <asm/idtentry.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
@@ -152,15 +153,11 @@ static inline bool hv_reenlightenment_available(void)
ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT;
}
-__visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
{
- entering_ack_irq();
-
+ ack_APIC_irq();
inc_irq_stat(irq_hv_reenlightenment_count);
-
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
-
- exiting_irq();
}
void set_hv_tscchange_cb(void (*cb)(void))
diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h
deleted file mode 100644
index 4adb13f08af7..000000000000
--- a/arch/x86/include/asm/acrn.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_ACRN_H
-#define _ASM_X86_ACRN_H
-
-extern void acrn_hv_callback_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_acrn_hv_callback_vector acrn_hv_callback_vector
-#endif
-
-extern void acrn_hv_vector_handler(struct pt_regs *regs);
-#endif /* _ASM_X86_ACRN_H */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index c7df20e78b09..455066a06f60 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -57,6 +57,7 @@ struct threshold_bank {
/* initialized to the number of CPUs on the node sharing this bank */
refcount_t cpus;
+ unsigned int shared;
};
struct amd_northbridge {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 19e94af9cc5d..2cc44e957c31 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -519,39 +519,6 @@ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
static inline void apic_smt_update(void) { }
#endif
-extern void irq_enter(void);
-extern void irq_exit(void);
-
-static inline void entering_irq(void)
-{
- irq_enter();
- kvm_set_cpu_l1tf_flush_l1d();
-}
-
-static inline void entering_ack_irq(void)
-{
- entering_irq();
- ack_APIC_irq();
-}
-
-static inline void ipi_entering_ack_irq(void)
-{
- irq_enter();
- ack_APIC_irq();
- kvm_set_cpu_l1tf_flush_l1d();
-}
-
-static inline void exiting_irq(void)
-{
- irq_exit();
-}
-
-static inline void exiting_ack_irq(void)
-{
- ack_APIC_irq();
- irq_exit();
-}
-
extern void ioapic_zap_locks(void);
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 115127c7ad28..bf35e476a776 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic_add_return arch_atomic_add_return
/**
* arch_atomic_sub_return - subtract integer and return
@@ -178,34 +179,39 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
+#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-static inline int arch_atomic_xchg(atomic_t *v, int new)
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic_xchg arch_atomic_xchg
-static inline void arch_atomic_and(int i, atomic_t *v)
+static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "andl %1,%0"
: "+m" (v->counter)
@@ -213,7 +219,7 @@ static inline void arch_atomic_and(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_and(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -221,8 +227,9 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
-static inline void arch_atomic_or(int i, atomic_t *v)
+static __always_inline void arch_atomic_or(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "orl %1,%0"
: "+m" (v->counter)
@@ -230,7 +237,7 @@ static inline void arch_atomic_or(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_or(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -238,8 +245,9 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
-static inline void arch_atomic_xor(int i, atomic_t *v)
+static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "xorl %1,%0"
: "+m" (v->counter)
@@ -247,7 +255,7 @@ static inline void arch_atomic_xor(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -255,6 +263,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
@@ -262,6 +271,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 52cfaecb13f9..5efd01b548d1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
/**
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
+#define arch_atomic64_xchg arch_atomic64_xchg
/**
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_add_return arch_atomic64_add_return
/*
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 95c6ceac66b9..809bd010a751 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 0367efdc5b7a..35460fef39b8 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
-static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
+ /*
+ * Because this is a plain access, we need to disable KCSAN here to
+ * avoid double instrumentation via instrumented bitops.
+ */
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index facba9bc30ca..fb34ff641e0a 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -70,14 +70,17 @@ do { \
#define HAVE_ARCH_BUG
#define BUG() \
do { \
+ instrumentation_begin(); \
_BUG_FLAGS(ASM_UD2, 0); \
unreachable(); \
} while (0)
#define __WARN_FLAGS(flags) \
do { \
+ instrumentation_begin(); \
_BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
annotate_reachable(); \
+ instrumentation_end(); \
} while (0)
#include <asm-generic/bug.h>
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 02c0078d3787..8902fdb7de13 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -11,15 +11,11 @@
#ifdef CONFIG_X86_64
/* Macro to enforce the same ordering and stack sizes */
-#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
+#define ESTACKS_MEMBERS(guardsize) \
char DF_stack_guard[guardsize]; \
char DF_stack[EXCEPTION_STKSZ]; \
char NMI_stack_guard[guardsize]; \
char NMI_stack[EXCEPTION_STKSZ]; \
- char DB2_stack_guard[guardsize]; \
- char DB2_stack[db2_holesize]; \
- char DB1_stack_guard[guardsize]; \
- char DB1_stack[EXCEPTION_STKSZ]; \
char DB_stack_guard[guardsize]; \
char DB_stack[EXCEPTION_STKSZ]; \
char MCE_stack_guard[guardsize]; \
@@ -28,12 +24,12 @@
/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
- ESTACKS_MEMBERS(0, 0)
+ ESTACKS_MEMBERS(0)
};
/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
+ ESTACKS_MEMBERS(PAGE_SIZE)
};
/*
@@ -42,8 +38,6 @@ struct cea_exception_stacks {
enum exception_stack_ordering {
ESTACK_DF,
ESTACK_NMI,
- ESTACK_DB2,
- ESTACK_DB1,
ESTACK_DB,
ESTACK_MCE,
N_EXCEPTION_STACKS
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 1a8609a15856..e89558a3fe4a 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7);
native_set_debugreg(register, value)
#endif
-static inline unsigned long native_get_debugreg(int regno)
+static __always_inline unsigned long native_get_debugreg(int regno)
{
unsigned long val = 0; /* Damn you, gcc! */
@@ -47,7 +47,7 @@ static inline unsigned long native_get_debugreg(int regno)
return val;
}
-static inline void native_set_debugreg(int regno, unsigned long value)
+static __always_inline void native_set_debugreg(int regno, unsigned long value)
{
switch (regno) {
case 0:
@@ -85,7 +85,7 @@ static inline void hw_breakpoint_disable(void)
set_debugreg(0UL, 3);
}
-static inline int hw_breakpoint_active(void)
+static __always_inline bool hw_breakpoint_active(void)
{
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
}
@@ -94,24 +94,38 @@ extern void aout_dump_debugregs(struct user *dump);
extern void hw_breakpoint_restore(void);
-#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(int, debug_stack_usage);
-static inline void debug_stack_usage_inc(void)
+static __always_inline unsigned long local_db_save(void)
{
- __this_cpu_inc(debug_stack_usage);
+ unsigned long dr7;
+
+ if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
+ return 0;
+
+ get_debugreg(dr7, 7);
+ dr7 &= ~0x400; /* architecturally set bit */
+ if (dr7)
+ set_debugreg(0, 7);
+ /*
+ * Ensure the compiler doesn't lower the above statements into
+ * the critical section; disabling breakpoints late would not
+ * be good.
+ */
+ barrier();
+
+ return dr7;
}
-static inline void debug_stack_usage_dec(void)
+
+static __always_inline void local_db_restore(unsigned long dr7)
{
- __this_cpu_dec(debug_stack_usage);
+ /*
+ * Ensure the compiler doesn't raise this statement into
+ * the critical section; enabling breakpoints early would
+ * not be good.
+ */
+ barrier();
+ if (dr7)
+ set_debugreg(dr7, 7);
}
-void debug_stack_set_zero(void);
-void debug_stack_reset(void);
-#else /* !X86_64 */
-static inline void debug_stack_set_zero(void) { }
-static inline void debug_stack_reset(void) { }
-static inline void debug_stack_usage_inc(void) { }
-static inline void debug_stack_usage_dec(void) { }
-#endif /* X86_64 */
#ifdef CONFIG_CPU_SUP_AMD
extern void set_dr_addr_mask(unsigned long mask, int dr);
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 68a99d2a5f33..1ced11d31932 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -40,11 +40,6 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
desc->l = 0;
}
-extern struct desc_ptr idt_descr;
-extern gate_desc idt_table[];
-extern const struct desc_ptr debug_idt_descr;
-extern gate_desc debug_idt_table[];
-
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
@@ -214,7 +209,7 @@ static inline void native_load_gdt(const struct desc_ptr *dtr)
asm volatile("lgdt %0"::"m" (*dtr));
}
-static inline void native_load_idt(const struct desc_ptr *dtr)
+static __always_inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
@@ -386,64 +381,23 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
desc->limit1 = (limit >> 16) & 0xf;
}
-void update_intr_gate(unsigned int n, const void *addr);
void alloc_intr_gate(unsigned int n, const void *addr);
extern unsigned long system_vectors[];
-#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(u32, debug_idt_ctr);
-static inline bool is_debug_idt_enabled(void)
-{
- if (this_cpu_read(debug_idt_ctr))
- return true;
-
- return false;
-}
-
-static inline void load_debug_idt(void)
-{
- load_idt((const struct desc_ptr *)&debug_idt_descr);
-}
-#else
-static inline bool is_debug_idt_enabled(void)
-{
- return false;
-}
-
-static inline void load_debug_idt(void)
-{
-}
-#endif
-
-/*
- * The load_current_idt() must be called with interrupts disabled
- * to avoid races. That way the IDT will always be set back to the expected
- * descriptor. It's also called when a CPU is being initialized, and
- * that doesn't need to disable interrupts, as nothing should be
- * bothering the CPU then.
- */
-static inline void load_current_idt(void)
-{
- if (is_debug_idt_enabled())
- load_debug_idt();
- else
- load_idt((const struct desc_ptr *)&idt_descr);
-}
-
+extern void load_current_idt(void);
extern void idt_setup_early_handler(void);
extern void idt_setup_early_traps(void);
extern void idt_setup_traps(void);
extern void idt_setup_apic_and_irq_gates(void);
+extern bool idt_is_f00f_address(unsigned long address);
#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
extern void idt_setup_ist_traps(void);
-extern void idt_setup_debugidt_traps(void);
#else
static inline void idt_setup_early_pf(void) { }
static inline void idt_setup_ist_traps(void) { }
-static inline void idt_setup_debugidt_traps(void) { }
#endif
extern void idt_invalidate(void *addr);
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
deleted file mode 100644
index 416422762845..000000000000
--- a/arch/x86/include/asm/entry_arch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is designed to contain the BUILD_INTERRUPT specifications for
- * all of the extra named interrupt vectors used by the architecture.
- * Usually this is the Inter Process Interrupts (IPIs)
- */
-
-/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
- */
-#ifdef CONFIG_SMP
-BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
-BUILD_INTERRUPT(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR)
-BUILD_INTERRUPT(reboot_interrupt, REBOOT_VECTOR)
-#endif
-
-#ifdef CONFIG_HAVE_KVM
-BUILD_INTERRUPT(kvm_posted_intr_ipi, POSTED_INTR_VECTOR)
-BUILD_INTERRUPT(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR)
-BUILD_INTERRUPT(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR)
-#endif
-
-/*
- * every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-
-BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
-
-#ifdef CONFIG_IRQ_WORK
-BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_MCE_THRESHOLD
-BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
-BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR)
-#endif
-#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 4154bc5f6a4e..74c12437401e 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -28,28 +28,6 @@
#include <asm/irq.h>
#include <asm/sections.h>
-/* Interrupt handlers registered during init_IRQ */
-extern asmlinkage void apic_timer_interrupt(void);
-extern asmlinkage void x86_platform_ipi(void);
-extern asmlinkage void kvm_posted_intr_ipi(void);
-extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
-extern asmlinkage void kvm_posted_intr_nested_ipi(void);
-extern asmlinkage void error_interrupt(void);
-extern asmlinkage void irq_work_interrupt(void);
-extern asmlinkage void uv_bau_message_intr1(void);
-
-extern asmlinkage void spurious_interrupt(void);
-extern asmlinkage void thermal_interrupt(void);
-extern asmlinkage void reschedule_interrupt(void);
-
-extern asmlinkage void irq_move_cleanup_interrupt(void);
-extern asmlinkage void reboot_interrupt(void);
-extern asmlinkage void threshold_interrupt(void);
-extern asmlinkage void deferred_error_interrupt(void);
-
-extern asmlinkage void call_function_interrupt(void);
-extern asmlinkage void call_function_single_interrupt(void);
-
#ifdef CONFIG_X86_LOCAL_APIC
struct irq_data;
struct pci_dev;
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
new file mode 100644
index 000000000000..cf51c50eb356
--- /dev/null
+++ b/arch/x86/include/asm/idtentry.h
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IDTENTRY_H
+#define _ASM_X86_IDTENTRY_H
+
+/* Interrupts/Exceptions */
+#include <asm/trapnr.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/hardirq.h>
+
+#include <asm/irq_stack.h>
+
+void idtentry_enter_user(struct pt_regs *regs);
+void idtentry_exit_user(struct pt_regs *regs);
+
+bool idtentry_enter_cond_rcu(struct pt_regs *regs);
+void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
+
+/**
+ * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
+ * No error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
+ * declares the entry points for usage in C code. There is an ASM variant
+ * as well which is used to emit the entry stubs in entry_32/64.S.
+ */
+#define DECLARE_IDTENTRY(vector, func) \
+ asmlinkage void asm_##func(void); \
+ asmlinkage void xen_asm_##func(void); \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY - Emit code for simple IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * idtentry_enter() contains common code which has to be invoked before
+ * arbitrary code in the body. idtentry_exit() contains common code
+ * which has to run before returning to the low level assembly code.
+ */
+#define DEFINE_IDTENTRY(func) \
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __##func (regs); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
+
+/* Special case for 32bit IRET 'trap' */
+#define DECLARE_IDTENTRY_SW DECLARE_IDTENTRY
+#define DEFINE_IDTENTRY_SW DEFINE_IDTENTRY
+
+/**
+ * DECLARE_IDTENTRY_ERRORCODE - Declare functions for simple IDT entry points
+ * Error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Same as DECLARE_IDTENTRY, but has an extra error_code argument for the
+ * C-handler.
+ */
+#define DECLARE_IDTENTRY_ERRORCODE(vector, func) \
+ asmlinkage void asm_##func(void); \
+ asmlinkage void xen_asm_##func(void); \
+ __visible void func(struct pt_regs *regs, unsigned long error_code)
+
+/**
+ * DEFINE_IDTENTRY_ERRORCODE - Emit code for simple IDT entry points
+ * Error code pushed by hardware
+ * @func: Function name of the entry point
+ *
+ * Same as DEFINE_IDTENTRY, but has an extra error_code argument
+ */
+#define DEFINE_IDTENTRY_ERRORCODE(func) \
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code); \
+ \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __##func (regs, error_code); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code)
+
+/**
+ * DECLARE_IDTENTRY_RAW - Declare functions for raw IDT entry points
+ * No error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY().
+ */
+#define DECLARE_IDTENTRY_RAW(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_RAW - Emit code for raw IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * Contrary to DEFINE_IDTENTRY() this does not invoke the
+ * idtentry_enter/exit() helpers before and after the body invocation. This
+ * needs to be done in the body itself if applicable. Use if extra work
+ * is required before the enter/exit() helpers are invoked.
+ */
+#define DEFINE_IDTENTRY_RAW(func) \
+__visible noinstr void func(struct pt_regs *regs)
+
+/**
+ * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
+ * Error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_ERRORCODE()
+ */
+#define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_RAW_ERRORCODE - Emit code for raw IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the
+ * idtentry_enter/exit() helpers before and after the body invocation. This
+ * needs to be done in the body itself if applicable. Use if extra work
+ * is required before the enter/exit() helpers are invoked.
+ */
+#define DEFINE_IDTENTRY_RAW_ERRORCODE(func) \
+__visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
+
+/**
+ * DECLARE_IDTENTRY_IRQ - Declare functions for device interrupt IDT entry
+ * points (common/spurious)
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_ERRORCODE()
+ */
+#define DECLARE_IDTENTRY_IRQ(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
+ * @func: Function name of the entry point
+ *
+ * The vector number is pushed by the low level entry stub and handed
+ * to the function as error_code argument which needs to be truncated
+ * to an u8 because the push is sign extending.
+ *
+ * On 64-bit idtentry_enter/exit() are invoked in the ASM entry code before
+ * and after switching to the interrupt stack. On 32-bit this happens in C.
+ *
+ * irq_enter/exit_rcu() are invoked before the function body and the
+ * KVM L1D flush request is set.
+ */
+#define DEFINE_IDTENTRY_IRQ(func) \
+static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
+ \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ irq_enter_rcu(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ __##func (regs, (u8)error_code); \
+ irq_exit_rcu(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+
+/**
+ * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Maps to DECLARE_IDTENTRY().
+ */
+#define DECLARE_IDTENTRY_SYSVEC(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
+ * @func: Function name of the entry point
+ *
+ * idtentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
+ * function body. KVM L1D flush request is set.
+ *
+ * Runs the function on the interrupt stack if the entry hit kernel mode
+ */
+#define DEFINE_IDTENTRY_SYSVEC(func) \
+static void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ irq_enter_rcu(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ run_on_irqstack_cond(__##func, regs, regs); \
+ irq_exit_rcu(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static noinline void __##func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT
+ * entry points
+ * @func: Function name of the entry point
+ *
+ * Runs the function on the interrupted stack. No switch to IRQ stack and
+ * only the minimal __irq_enter/exit() handling.
+ *
+ * Only use for 'empty' vectors like reschedule IPI and KVM posted
+ * interrupt vectors.
+ */
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __irq_enter_raw(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ __##func (regs); \
+ __irq_exit_raw(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Maps to DECLARE_IDTENTRY(). Distinct entry point to handle the 32/64-bit
+ * difference
+ */
+#define DECLARE_IDTENTRY_XENCB(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#ifdef CONFIG_X86_64
+/**
+ * DECLARE_IDTENTRY_IST - Declare functions for IST handling IDT entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_RAW, but declares also the NOIST C handler
+ * which is called from the ASM entry point on user mode entry
+ */
+#define DECLARE_IDTENTRY_IST(vector, func) \
+ DECLARE_IDTENTRY_RAW(vector, func); \
+ __visible void noist_##func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY_IST - Emit code for IST entry points
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW
+ */
+#define DEFINE_IDTENTRY_IST(func) \
+ DEFINE_IDTENTRY_RAW(func)
+
+/**
+ * DEFINE_IDTENTRY_NOIST - Emit code for NOIST entry points which
+ * belong to a IST entry point (MCE, DB)
+ * @func: Function name of the entry point. Must be the same as
+ * the function name of the corresponding IST variant
+ *
+ * Maps to DEFINE_IDTENTRY_RAW().
+ */
+#define DEFINE_IDTENTRY_NOIST(func) \
+ DEFINE_IDTENTRY_RAW(noist_##func)
+
+/**
+ * DECLARE_IDTENTRY_DF - Declare functions for double fault
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DECLARE_IDTENTRY_DF(vector, func) \
+ DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_DF - Emit code for double fault
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DEFINE_IDTENTRY_DF(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+
+#else /* CONFIG_X86_64 */
+
+/* Maps to a regular IDTENTRY on 32bit for now */
+# define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY
+# define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY
+
+/**
+ * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares two functions:
+ * - The ASM entry point: asm_##func
+ * - The C handler called from the C shim
+ */
+#define DECLARE_IDTENTRY_DF(vector, func) \
+ asmlinkage void asm_##func(void); \
+ __visible void func(struct pt_regs *regs, \
+ unsigned long error_code, \
+ unsigned long address)
+
+/**
+ * DEFINE_IDTENTRY_DF - Emit code for double fault on 32bit
+ * @func: Function name of the entry point
+ *
+ * This is called through the doublefault shim which already provides
+ * cr2 in the address argument.
+ */
+#define DEFINE_IDTENTRY_DF(func) \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code, \
+ unsigned long address)
+
+#endif /* !CONFIG_X86_64 */
+
+/* C-Code mapping */
+#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
+
+#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
+
+#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
+
+/**
+ * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
+ * indirection magic.
+ */
+#define DECLARE_IDTENTRY_XEN(vector, func) \
+ asmlinkage void xen_asm_exc_xen##func(void); \
+ asmlinkage void asm_exc_xen##func(void)
+
+#else /* !__ASSEMBLY__ */
+
+/*
+ * The ASM variants for DECLARE_IDTENTRY*() which emit the ASM entry stubs.
+ */
+#define DECLARE_IDTENTRY(vector, func) \
+ idtentry vector asm_##func func has_error_code=0
+
+#define DECLARE_IDTENTRY_ERRORCODE(vector, func) \
+ idtentry vector asm_##func func has_error_code=1
+
+/* Special case for 32bit IRET 'trap'. Do not emit ASM code */
+#define DECLARE_IDTENTRY_SW(vector, func)
+
+#define DECLARE_IDTENTRY_RAW(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/* Entries for common/spurious (device) interrupts */
+#define DECLARE_IDTENTRY_IRQ(vector, func) \
+ idtentry_irq vector func
+
+/* System vector entries */
+#define DECLARE_IDTENTRY_SYSVEC(vector, func) \
+ idtentry_sysvec vector func
+
+#ifdef CONFIG_X86_64
+# define DECLARE_IDTENTRY_MCE(vector, func) \
+ idtentry_mce_db vector asm_##func func
+
+# define DECLARE_IDTENTRY_DEBUG(vector, func) \
+ idtentry_mce_db vector asm_##func func
+
+# define DECLARE_IDTENTRY_DF(vector, func) \
+ idtentry_df vector asm_##func func
+
+# define DECLARE_IDTENTRY_XENCB(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#else
+# define DECLARE_IDTENTRY_MCE(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+# define DECLARE_IDTENTRY_DEBUG(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/* No ASM emitted for DF as this goes through a C shim */
+# define DECLARE_IDTENTRY_DF(vector, func)
+
+/* No ASM emitted for XEN hypervisor callback */
+# define DECLARE_IDTENTRY_XENCB(vector, func)
+
+#endif
+
+/* No ASM code emitted for NMI */
+#define DECLARE_IDTENTRY_NMI(vector, func)
+
+/* XEN NMI and DB wrapper */
+#define DECLARE_IDTENTRY_XEN(vector, func) \
+ idtentry vector asm_exc_xen##func exc_##func has_error_code=0
+
+/*
+ * ASM code to emit the common vector entry stubs where each stub is
+ * packed into 8 bytes.
+ *
+ * Note, that the 'pushq imm8' is emitted via '.byte 0x6a, vector' because
+ * GCC treats the local vector variable as unsigned int and would expand
+ * all vectors above 0x7F to a 5 byte push. The original code did an
+ * adjustment of the vector number to be in the signed byte range to avoid
+ * this. While clever it's mindboggling counterintuitive and requires the
+ * odd conversion back to a real vector number in the C entry points. Using
+ * .byte achieves the same thing and the only fixup needed in the C entry
+ * point is to mask off the bits above bit 7 because the push is sign
+ * extending.
+ */
+ .align 8
+SYM_CODE_START(irq_entries_start)
+ vector=FIRST_EXTERNAL_VECTOR
+ pos = .
+ .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ UNWIND_HINT_IRET_REGS
+ .byte 0x6a, vector
+ jmp asm_common_interrupt
+ nop
+ /* Ensure that the above is 8 bytes max */
+ . = pos + 8
+ pos=pos+8
+ vector=vector+1
+ .endr
+SYM_CODE_END(irq_entries_start)
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ .align 8
+SYM_CODE_START(spurious_entries_start)
+ vector=FIRST_SYSTEM_VECTOR
+ pos = .
+ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+ UNWIND_HINT_IRET_REGS
+ .byte 0x6a, vector
+ jmp asm_spurious_interrupt
+ nop
+ /* Ensure that the above is 8 bytes max */
+ . = pos + 8
+ pos=pos+8
+ vector=vector+1
+ .endr
+SYM_CODE_END(spurious_entries_start)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The actual entry points. Note that DECLARE_IDTENTRY*() serves two
+ * purposes:
+ * - provide the function declarations when included from C-Code
+ * - emit the ASM stubs when included from entry_32/64.S
+ *
+ * This avoids duplicate defines and ensures that everything is consistent.
+ */
+
+/*
+ * Dummy trap number so the low level ASM macro vector number checks do not
+ * match which results in emitting plain IDTENTRY stubs without bells and
+ * whistels.
+ */
+#define X86_TRAP_OTHER 0xFFFF
+
+/* Simple exception entry points. No hardware error code */
+DECLARE_IDTENTRY(X86_TRAP_DE, exc_divide_error);
+DECLARE_IDTENTRY(X86_TRAP_OF, exc_overflow);
+DECLARE_IDTENTRY(X86_TRAP_BR, exc_bounds);
+DECLARE_IDTENTRY(X86_TRAP_NM, exc_device_not_available);
+DECLARE_IDTENTRY(X86_TRAP_OLD_MF, exc_coproc_segment_overrun);
+DECLARE_IDTENTRY(X86_TRAP_SPURIOUS, exc_spurious_interrupt_bug);
+DECLARE_IDTENTRY(X86_TRAP_MF, exc_coprocessor_error);
+DECLARE_IDTENTRY(X86_TRAP_XF, exc_simd_coprocessor_error);
+
+/* 32bit software IRET trap. Do not emit ASM code */
+DECLARE_IDTENTRY_SW(X86_TRAP_IRET, iret_error);
+
+/* Simple exception entries with error code pushed by hardware */
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_TS, exc_invalid_tss);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_NP, exc_segment_not_present);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_SS, exc_stack_segment);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_GP, exc_general_protection);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_AC, exc_alignment_check);
+
+/* Raw exception entries which need extra work */
+DECLARE_IDTENTRY_RAW(X86_TRAP_UD, exc_invalid_op);
+DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
+DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
+
+#ifdef CONFIG_X86_MCE
+DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+#endif
+
+/* NMI */
+DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
+DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi);
+
+/* #DB */
+DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
+DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug);
+
+/* #DF */
+DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
+
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+#endif
+
+/* Device interrupts common/spurious */
+DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER, common_interrupt);
+#ifdef CONFIG_X86_LOCAL_APIC
+DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER, spurious_interrupt);
+#endif
+
+/* System vector entry points */
+#ifdef CONFIG_X86_LOCAL_APIC
+DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR, sysvec_error_interrupt);
+DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR, sysvec_spurious_apic_interrupt);
+DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR, sysvec_apic_timer_interrupt);
+DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR, sysvec_x86_platform_ipi);
+#endif
+
+#ifdef CONFIG_SMP
+DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
+DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR, sysvec_irq_move_cleanup);
+DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
+DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
+DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+# ifdef CONFIG_X86_UV
+DECLARE_IDTENTRY_SYSVEC(UV_BAU_MESSAGE, sysvec_uv_bau_message);
+# endif
+
+# ifdef CONFIG_X86_MCE_THRESHOLD
+DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
+# endif
+
+# ifdef CONFIG_X86_MCE_AMD
+DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
+# endif
+
+# ifdef CONFIG_X86_THERMAL_VECTOR
+DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
+# endif
+
+# ifdef CONFIG_IRQ_WORK
+DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
+# endif
+#endif
+
+#ifdef CONFIG_HAVE_KVM
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
+#endif
+
+#if IS_ENABLED(CONFIG_HYPERV)
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR, sysvec_hyperv_stimer0);
+#endif
+
+#if IS_ENABLED(CONFIG_ACRN_GUEST)
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback);
+#endif
+
+#ifdef CONFIG_XEN_PVHVM
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback);
+#endif
+
+#undef X86_TRAP_OTHER
+
+#endif
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 8f1e94f29a16..a338a6deb950 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -89,6 +89,8 @@
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 72fba0eeeb30..528c8a71fe7f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -11,6 +11,13 @@
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
+/*
+ * The irq entry code is in the noinstr section and the start/end of
+ * __irqentry_text is emitted via labels. Make the build fail if
+ * something moves a C function into the __irq_entry section.
+ */
+#define __irq_entry __invalid_section
+
static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
@@ -26,17 +33,14 @@ extern void fixup_irqs(void);
#ifdef CONFIG_HAVE_KVM
extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
-extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs);
-extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs);
-extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs);
#endif
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
-extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+extern void __handle_irq(struct irq_desc *desc, struct pt_regs *regs);
-extern __visible void do_IRQ(struct pt_regs *regs);
+extern __visible void do_IRQ(struct pt_regs *regs, unsigned long vector);
extern void init_ISA_irqs(void);
@@ -46,7 +50,6 @@ extern void __init init_IRQ(void);
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
-extern __visible void smp_x86_platform_ipi(struct pt_regs *regs);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
deleted file mode 100644
index 187ce59aea28..000000000000
--- a/arch/x86/include/asm/irq_regs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Per-cpu current frame pointer - the location of the last exception frame on
- * the stack, stored in the per-cpu area.
- *
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-#ifndef _ASM_X86_IRQ_REGS_H
-#define _ASM_X86_IRQ_REGS_H
-
-#include <asm/percpu.h>
-
-#define ARCH_HAS_OWN_IRQ_REGS
-
-DECLARE_PER_CPU(struct pt_regs *, irq_regs);
-
-static inline struct pt_regs *get_irq_regs(void)
-{
- return __this_cpu_read(irq_regs);
-}
-
-static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
-{
- struct pt_regs *old_regs;
-
- old_regs = get_irq_regs();
- __this_cpu_write(irq_regs, new_regs);
-
- return old_regs;
-}
-
-#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
new file mode 100644
index 000000000000..4ae66f097101
--- /dev/null
+++ b/arch/x86/include/asm/irq_stack.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IRQ_STACK_H
+#define _ASM_X86_IRQ_STACK_H
+
+#include <linux/ptrace.h>
+
+#include <asm/processor.h>
+
+#ifdef CONFIG_X86_64
+static __always_inline bool irqstack_active(void)
+{
+ return __this_cpu_read(irq_count) != -1;
+}
+
+void asm_call_on_stack(void *sp, void *func, void *arg);
+
+static __always_inline void __run_on_irqstack(void *func, void *arg)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_on_stack(tos - 8, func, arg);
+ __this_cpu_sub(irq_count, 1);
+}
+
+#else /* CONFIG_X86_64 */
+static inline bool irqstack_active(void) { return false; }
+static inline void __run_on_irqstack(void *func, void *arg) { }
+#endif /* !CONFIG_X86_64 */
+
+static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+ if (!regs)
+ return !irqstack_active();
+ return !user_mode(regs) && !irqstack_active();
+}
+
+static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+ struct pt_regs *regs)
+{
+ void (*__func)(void *arg) = func;
+
+ lockdep_assert_irqs_disabled();
+
+ if (irq_needs_irq_stack(regs))
+ __run_on_irqstack(__func, arg);
+ else
+ __func(arg);
+}
+
+#endif
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 80b35e3adf03..800ffce0db29 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -10,7 +10,6 @@ static inline bool arch_irq_work_has_interrupt(void)
return boot_cpu_has(X86_FEATURE_APIC);
}
extern void arch_irq_work_raise(void);
-extern __visible void smp_irq_work_interrupt(struct pt_regs *regs);
#else
static inline bool arch_irq_work_has_interrupt(void)
{
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 8a0e56e1dcc9..02a0cf547d7b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -17,7 +17,7 @@
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
-extern inline unsigned long native_save_fl(void)
+extern __always_inline unsigned long native_save_fl(void)
{
unsigned long flags;
@@ -44,12 +44,12 @@ extern inline void native_restore_fl(unsigned long flags)
:"memory", "cc");
}
-static inline void native_irq_disable(void)
+static __always_inline void native_irq_disable(void)
{
asm volatile("cli": : :"memory");
}
-static inline void native_irq_enable(void)
+static __always_inline void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
}
@@ -74,22 +74,22 @@ static inline __cpuidle void native_halt(void)
#ifndef __ASSEMBLY__
#include <linux/types.h>
-static inline notrace unsigned long arch_local_save_flags(void)
+static __always_inline unsigned long arch_local_save_flags(void)
{
return native_save_fl();
}
-static inline notrace void arch_local_irq_restore(unsigned long flags)
+static __always_inline void arch_local_irq_restore(unsigned long flags)
{
native_restore_fl(flags);
}
-static inline notrace void arch_local_irq_disable(void)
+static __always_inline void arch_local_irq_disable(void)
{
native_irq_disable();
}
-static inline notrace void arch_local_irq_enable(void)
+static __always_inline void arch_local_irq_enable(void)
{
native_irq_enable();
}
@@ -115,7 +115,7 @@ static inline __cpuidle void halt(void)
/*
* For spinlocks, etc:
*/
-static inline notrace unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
arch_local_irq_disable();
@@ -159,12 +159,12 @@ static inline notrace unsigned long arch_local_irq_save(void)
#endif /* CONFIG_PARAVIRT_XXL */
#ifndef __ASSEMBLY__
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
-static inline int arch_irqs_disabled(void)
+static __always_inline int arch_irqs_disabled(void)
{
unsigned long flags = arch_local_save_flags();
@@ -172,38 +172,4 @@ static inline int arch_irqs_disabled(void)
}
#endif /* !__ASSEMBLY__ */
-#ifdef __ASSEMBLY__
-#ifdef CONFIG_TRACE_IRQFLAGS
-# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
-# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
-#else
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_X86_64
-# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
-# define LOCKDEP_SYS_EXIT_IRQ \
- TRACE_IRQS_ON; \
- sti; \
- call lockdep_sys_exit_thunk; \
- cli; \
- TRACE_IRQS_OFF;
-# else
-# define LOCKDEP_SYS_EXIT \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call lockdep_sys_exit; \
- popl %edx; \
- popl %ecx; \
- popl %eax;
-# define LOCKDEP_SYS_EXIT_IRQ
-# endif
-#else
-# define LOCKDEP_SYS_EXIT
-# define LOCKDEP_SYS_EXIT_IRQ
-#endif
-#endif /* __ASSEMBLY__ */
-
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1da5858501ca..f8998e97457f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1306,7 +1306,6 @@ struct kvm_arch_async_pf {
extern u64 __read_mostly host_efer;
extern struct kvm_x86_ops kvm_x86_ops;
-extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -1671,7 +1670,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap);
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 57fd1966c4ea..49d3a9edb06f 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -141,7 +141,7 @@ static inline void kvm_disable_steal_time(void)
return;
}
-static inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
return false;
}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f9cea081c05b..cf503824529c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -127,6 +127,17 @@
#define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
#define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
+#define XEC(x, mask) (((x) >> 16) & mask)
+
+/* mce.kflags flag bits for logging etc. */
+#define MCE_HANDLED_CEC BIT_ULL(0)
+#define MCE_HANDLED_UC BIT_ULL(1)
+#define MCE_HANDLED_EXTLOG BIT_ULL(2)
+#define MCE_HANDLED_NFIT BIT_ULL(3)
+#define MCE_HANDLED_EDAC BIT_ULL(4)
+#define MCE_HANDLED_MCELOG BIT_ULL(5)
+#define MCE_IN_KERNEL_RECOV BIT_ULL(6)
+
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
@@ -142,14 +153,16 @@ struct mce_log_buffer {
struct mce entry[];
};
+/* Highest last */
enum mce_notifier_prios {
- MCE_PRIO_FIRST = INT_MAX,
- MCE_PRIO_UC = INT_MAX - 1,
- MCE_PRIO_EXTLOG = INT_MAX - 2,
- MCE_PRIO_NFIT = INT_MAX - 3,
- MCE_PRIO_EDAC = INT_MAX - 4,
- MCE_PRIO_MCELOG = 1,
- MCE_PRIO_LOWEST = 0,
+ MCE_PRIO_LOWEST,
+ MCE_PRIO_MCELOG,
+ MCE_PRIO_EDAC,
+ MCE_PRIO_NFIT,
+ MCE_PRIO_EXTLOG,
+ MCE_PRIO_UC,
+ MCE_PRIO_EARLY,
+ MCE_PRIO_CEC
};
struct notifier_block;
@@ -238,7 +251,7 @@ extern void mce_disable_bank(int bank);
/*
* Exception handler
*/
-void do_machine_check(struct pt_regs *, long);
+void do_machine_check(struct pt_regs *pt_regs);
/*
* Threshold handler
@@ -347,5 +360,4 @@ umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return
#endif
static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
-
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index d30805ed323e..60b944dd2df1 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -54,20 +54,8 @@ typedef int (*hyperv_fill_flush_list_func)(
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered()
-void hyperv_callback_vector(void);
-void hyperv_reenlightenment_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_hyperv_callback_vector hyperv_callback_vector
-#endif
void hyperv_vector_handler(struct pt_regs *regs);
-/*
- * Routines for stimer0 Direct Mode handling.
- * On x86/x64, there are no percpu actions to take.
- */
-void hv_stimer0_vector_handler(struct pt_regs *regs);
-void hv_stimer0_callback_vector(void);
-
static inline void hv_enable_stimer0_percpu_irq(int irq) {}
static inline void hv_disable_stimer0_percpu_irq(int irq) {}
@@ -226,7 +214,6 @@ void hyperv_setup_mmu_ops(void);
void *hv_alloc_hyperv_page(void);
void *hv_alloc_hyperv_zeroed_page(void);
void hv_free_hyperv_page(unsigned long addr);
-void hyperv_reenlightenment_intr(struct pt_regs *regs);
void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index d52d1aacdd97..e7752b4038ff 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -262,7 +262,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
* combination with microcode which triggers a CPU buffer flush when the
* instruction is executed.
*/
-static inline void mds_clear_cpu_buffers(void)
+static __always_inline void mds_clear_cpu_buffers(void)
{
static const u16 ds = __KERNEL_DS;
@@ -283,7 +283,7 @@ static inline void mds_clear_cpu_buffers(void)
*
* Clear CPU buffers if the corresponding static key is enabled
*/
-static inline void mds_user_clear_cpu_buffers(void)
+static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index ef76a04b4daf..d7acae4120d5 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -32,13 +32,6 @@ extern pmd_t initial_pg_pmd[];
void paging_init(void);
void sync_initial_page_table(void);
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok( ..)'
- */
-#undef TEST_ACCESS_OK
-
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29ee0c088009..42cd333616c4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -823,7 +823,7 @@ static inline void prefetch(const void *x)
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol:
*/
-static inline void prefetchw(const void *x)
+static __always_inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH, "prefetchw %P1",
X86_FEATURE_3DNOWPREFETCH,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d6475fdd327..ebedeab48704 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
* On x86_64, vm86 mode is mercifully nonexistent, and we don't need
* the extra check.
*/
-static inline int user_mode(struct pt_regs *regs)
+static __always_inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index ec2c0a094b5d..5948218f35c5 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -86,28 +86,35 @@ int set_direct_map_default_noflush(struct page *page);
extern int kernel_set_to_readonly;
#ifdef CONFIG_X86_64
-static inline int set_mce_nospec(unsigned long pfn)
+/*
+ * Prevent speculative access to the page by either unmapping
+ * it (if we do not require access to any part of the page) or
+ * marking it uncacheable (if we want to try to retrieve data
+ * from non-poisoned lines in the page).
+ */
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;
/*
- * Mark the linear address as UC to make sure we don't log more
- * errors because of speculative access to the page.
* We would like to just call:
- * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
+ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
- * This relies on set_memory_uc() properly sanitizing any __pa()
+ * This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
- rc = set_memory_uc(decoy_addr, 1);
+ if (unmap)
+ rc = set_memory_np(decoy_addr, 1);
+ else
+ rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 6d37b8fcfc77..eb8e781c4353 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -7,6 +7,7 @@
#include <asm/nops.h>
#include <asm/processor-flags.h>
+#include <linux/irqflags.h>
#include <linux/jump_label.h>
/*
@@ -27,14 +28,14 @@ static inline unsigned long native_read_cr0(void)
return val;
}
-static inline unsigned long native_read_cr2(void)
+static __always_inline unsigned long native_read_cr2(void)
{
unsigned long val;
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
-static inline void native_write_cr2(unsigned long val)
+static __always_inline void native_write_cr2(unsigned long val)
{
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
@@ -129,7 +130,16 @@ static inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory");
}
-extern asmlinkage void native_load_gs_index(unsigned);
+extern asmlinkage void asm_load_gs_index(unsigned int selector);
+
+static inline void native_load_gs_index(unsigned int selector)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ asm_load_gs_index(selector);
+ local_irq_restore(flags);
+}
static inline unsigned long __read_cr4(void)
{
@@ -150,12 +160,12 @@ static inline void write_cr0(unsigned long x)
native_write_cr0(x);
}
-static inline unsigned long read_cr2(void)
+static __always_inline unsigned long read_cr2(void)
{
return native_read_cr2();
}
-static inline void write_cr2(unsigned long x)
+static __always_inline void write_cr2(unsigned long x)
{
native_write_cr2(x);
}
@@ -186,7 +196,7 @@ static inline void wbinvd(void)
#ifdef CONFIG_X86_64
-static inline void load_gs_index(unsigned selector)
+static inline void load_gs_index(unsigned int selector)
{
native_load_gs_index(selector);
}
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 67315fa3956a..6593b42cb379 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -64,7 +64,7 @@ extern void text_poke_finish(void);
#define DISP32_SIZE 4
-static inline int text_opcode_size(u8 opcode)
+static __always_inline int text_opcode_size(u8 opcode)
{
int size = 0;
@@ -118,12 +118,14 @@ extern __ro_after_init struct mm_struct *poking_mm;
extern __ro_after_init unsigned long poking_addr;
#ifndef CONFIG_UML_X86
-static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+static __always_inline
+void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
-static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+static __always_inline
+void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
/*
* The int3 handler in entry_64.S adds a gap between the
@@ -138,7 +140,8 @@ static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
*(unsigned long *)regs->sp = val;
}
-static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+static __always_inline
+void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func);
diff --git a/arch/x86/include/asm/trace/common.h b/arch/x86/include/asm/trace/common.h
index 57c8da027d99..f0f9bcdb74d9 100644
--- a/arch/x86/include/asm/trace/common.h
+++ b/arch/x86/include/asm/trace/common.h
@@ -5,12 +5,8 @@
DECLARE_STATIC_KEY_FALSE(trace_pagefault_key);
#define trace_pagefault_enabled() \
static_branch_unlikely(&trace_pagefault_key)
-DECLARE_STATIC_KEY_FALSE(trace_resched_ipi_key);
-#define trace_resched_ipi_enabled() \
- static_branch_unlikely(&trace_resched_ipi_key)
#else
static inline bool trace_pagefault_enabled(void) { return false; }
-static inline bool trace_resched_ipi_enabled(void) { return false; }
#endif
#endif
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 33b9d0f0aafe..88e7f0f3bf62 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -10,9 +10,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
-extern int trace_resched_ipi_reg(void);
-extern void trace_resched_ipi_unreg(void);
-
DECLARE_EVENT_CLASS(x86_irq_vector,
TP_PROTO(int vector),
@@ -37,18 +34,6 @@ DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
TP_PROTO(int vector), \
TP_ARGS(vector), NULL, NULL);
-#define DEFINE_RESCHED_IPI_EVENT(name) \
-DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
- TP_PROTO(int vector), \
- TP_ARGS(vector), \
- trace_resched_ipi_reg, \
- trace_resched_ipi_unreg); \
-DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
- TP_PROTO(int vector), \
- TP_ARGS(vector), \
- trace_resched_ipi_reg, \
- trace_resched_ipi_unreg);
-
/*
* local_timer - called when entering/exiting a local timer interrupt
* vector handler
@@ -99,7 +84,7 @@ TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
/*
* reschedule - called when entering/exiting a reschedule vector handler
*/
-DEFINE_RESCHED_IPI_EVENT(reschedule);
+DEFINE_IRQ_VECTOR_EVENT(reschedule);
/*
* call_function - called when entering/exiting a call function interrupt
diff --git a/arch/x86/include/asm/trapnr.h b/arch/x86/include/asm/trapnr.h
new file mode 100644
index 000000000000..082f45631fa9
--- /dev/null
+++ b/arch/x86/include/asm/trapnr.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_TRAPNR_H
+#define _ASM_X86_TRAPNR_H
+
+/* Interrupts/Exceptions */
+
+#define X86_TRAP_DE 0 /* Divide-by-zero */
+#define X86_TRAP_DB 1 /* Debug */
+#define X86_TRAP_NMI 2 /* Non-maskable Interrupt */
+#define X86_TRAP_BP 3 /* Breakpoint */
+#define X86_TRAP_OF 4 /* Overflow */
+#define X86_TRAP_BR 5 /* Bound Range Exceeded */
+#define X86_TRAP_UD 6 /* Invalid Opcode */
+#define X86_TRAP_NM 7 /* Device Not Available */
+#define X86_TRAP_DF 8 /* Double Fault */
+#define X86_TRAP_OLD_MF 9 /* Coprocessor Segment Overrun */
+#define X86_TRAP_TS 10 /* Invalid TSS */
+#define X86_TRAP_NP 11 /* Segment Not Present */
+#define X86_TRAP_SS 12 /* Stack Segment Fault */
+#define X86_TRAP_GP 13 /* General Protection Fault */
+#define X86_TRAP_PF 14 /* Page Fault */
+#define X86_TRAP_SPURIOUS 15 /* Spurious Interrupt */
+#define X86_TRAP_MF 16 /* x87 Floating-Point Exception */
+#define X86_TRAP_AC 17 /* Alignment Check */
+#define X86_TRAP_MC 18 /* Machine Check */
+#define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */
+#define X86_TRAP_VE 20 /* Virtualization Exception */
+#define X86_TRAP_CP 21 /* Control Protection Exception */
+#define X86_TRAP_IRET 32 /* IRET Exception */
+
+#endif
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 2ae904bf25e4..714b1a30e7b0 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -6,85 +6,9 @@
#include <linux/kprobes.h>
#include <asm/debugreg.h>
+#include <asm/idtentry.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
-#define dotraplinkage __visible
-
-asmlinkage void divide_error(void);
-asmlinkage void debug(void);
-asmlinkage void nmi(void);
-asmlinkage void int3(void);
-asmlinkage void overflow(void);
-asmlinkage void bounds(void);
-asmlinkage void invalid_op(void);
-asmlinkage void device_not_available(void);
-#ifdef CONFIG_X86_64
-asmlinkage void double_fault(void);
-#endif
-asmlinkage void coprocessor_segment_overrun(void);
-asmlinkage void invalid_TSS(void);
-asmlinkage void segment_not_present(void);
-asmlinkage void stack_segment(void);
-asmlinkage void general_protection(void);
-asmlinkage void page_fault(void);
-asmlinkage void async_page_fault(void);
-asmlinkage void spurious_interrupt_bug(void);
-asmlinkage void coprocessor_error(void);
-asmlinkage void alignment_check(void);
-#ifdef CONFIG_X86_MCE
-asmlinkage void machine_check(void);
-#endif /* CONFIG_X86_MCE */
-asmlinkage void simd_coprocessor_error(void);
-
-#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
-asmlinkage void xen_divide_error(void);
-asmlinkage void xen_xennmi(void);
-asmlinkage void xen_xendebug(void);
-asmlinkage void xen_int3(void);
-asmlinkage void xen_overflow(void);
-asmlinkage void xen_bounds(void);
-asmlinkage void xen_invalid_op(void);
-asmlinkage void xen_device_not_available(void);
-asmlinkage void xen_double_fault(void);
-asmlinkage void xen_coprocessor_segment_overrun(void);
-asmlinkage void xen_invalid_TSS(void);
-asmlinkage void xen_segment_not_present(void);
-asmlinkage void xen_stack_segment(void);
-asmlinkage void xen_general_protection(void);
-asmlinkage void xen_page_fault(void);
-asmlinkage void xen_spurious_interrupt_bug(void);
-asmlinkage void xen_coprocessor_error(void);
-asmlinkage void xen_alignment_check(void);
-#ifdef CONFIG_X86_MCE
-asmlinkage void xen_machine_check(void);
-#endif /* CONFIG_X86_MCE */
-asmlinkage void xen_simd_coprocessor_error(void);
-#endif
-
-dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code);
-dotraplinkage void do_debug(struct pt_regs *regs, long error_code);
-dotraplinkage void do_nmi(struct pt_regs *regs, long error_code);
-dotraplinkage void do_int3(struct pt_regs *regs, long error_code);
-dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
-dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
-dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
-dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
-dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
-dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
-dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
-dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code);
-dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code);
-dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
-dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);
-dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);
-dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code);
-dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);
-#ifdef CONFIG_X86_32
-dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code);
-#endif
-dotraplinkage void do_mce(struct pt_regs *regs, long error_code);
-
#ifdef CONFIG_X86_64
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace
@@ -92,6 +16,11 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
void __init trap_init(void);
#endif
+#ifdef CONFIG_X86_F00F_BUG
+/* For handling the FOOF bug */
+void handle_invalid_op(struct pt_regs *regs);
+#endif
+
static inline int get_si_code(unsigned long condition)
{
if (condition & DR_STEP)
@@ -105,16 +34,6 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
-#ifndef CONFIG_X86_32
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
-asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
-asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
-#endif
-
-void smp_apic_timer_interrupt(struct pt_regs *regs);
-void smp_spurious_interrupt(struct pt_regs *regs);
-void smp_error_interrupt(struct pt_regs *regs);
-asmlinkage void smp_irq_move_cleanup_interrupt(void);
#ifdef CONFIG_VMAP_STACK
void __noreturn handle_stack_overflow(const char *message,
@@ -122,31 +41,6 @@ void __noreturn handle_stack_overflow(const char *message,
unsigned long fault_address);
#endif
-/* Interrupts/Exceptions */
-enum {
- X86_TRAP_DE = 0, /* 0, Divide-by-zero */
- X86_TRAP_DB, /* 1, Debug */
- X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
- X86_TRAP_BP, /* 3, Breakpoint */
- X86_TRAP_OF, /* 4, Overflow */
- X86_TRAP_BR, /* 5, Bound Range Exceeded */
- X86_TRAP_UD, /* 6, Invalid Opcode */
- X86_TRAP_NM, /* 7, Device Not Available */
- X86_TRAP_DF, /* 8, Double Fault */
- X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
- X86_TRAP_TS, /* 10, Invalid TSS */
- X86_TRAP_NP, /* 11, Segment Not Present */
- X86_TRAP_SS, /* 12, Stack Segment Fault */
- X86_TRAP_GP, /* 13, General Protection Fault */
- X86_TRAP_PF, /* 14, Page Fault */
- X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
- X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
- X86_TRAP_AC, /* 17, Alignment Check */
- X86_TRAP_MC, /* 18, Machine Check */
- X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
- X86_TRAP_IRET = 32, /* 32, IRET Exception */
-};
-
/*
* Page fault error code bits:
*
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 13687bf0e0a9..f1188bd47658 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -12,6 +12,8 @@
#define _ASM_X86_UV_UV_BAU_H
#include <linux/bitmap.h>
+#include <asm/idtentry.h>
+
#define BITSPERBYTE 8
/*
@@ -799,12 +801,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
bitmap_zero(&dstp->bits, nbits);
}
-extern void uv_bau_message_intr1(void);
-#ifdef CONFIG_TRACING
-#define trace_uv_bau_message_intr1 uv_bau_message_intr1
-#endif
-extern void uv_bau_timeout_intr1(void);
-
struct atomic_short {
short counter;
};
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 9a6dc9b4ec99..fb81fea99093 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return __vdso_data;
}
+static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return true;
+}
+#define vdso_clocksource_ok arch_vdso_clocksource_ok
+
+/*
+ * Clocksource read value validation to handle PV and HyperV clocksources
+ * which can be invalidated asynchronously and indicate invalidation by
+ * returning U64_MAX, which can be effectively tested by checking for a
+ * negative value after casting it to s64.
+ */
+static inline bool arch_vdso_cycles_ok(u64 cycles)
+{
+ return (s64)cycles >= 0;
+}
+#define vdso_cycles_ok arch_vdso_cycles_ok
+
/*
* x86 specific delta calculation.
*
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 955c2a2e1cf9..db9adc081c5a 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -35,6 +35,7 @@ struct mce {
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
__u64 ppin; /* Protected Processor Inventory Number */
__u32 microcode; /* Microcode revision */
+ __u64 kflags; /* Internal kernel use */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 8ef4369a4f06..e77261db2391 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
+# With some compiler versions the generated code results in boot hangs, caused
+# by several compilation units. To be safe, disable all instrumentation.
+KCSAN_SANITIZE := n
+
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a9195ce8265d..8fd39ff74a49 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1011,28 +1011,29 @@ struct bp_patching_desc {
static struct bp_patching_desc *bp_desc;
-static inline struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
+static __always_inline
+struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
{
- struct bp_patching_desc *desc = READ_ONCE(*descp); /* rcu_dereference */
+ struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
- if (!desc || !atomic_inc_not_zero(&desc->refs))
+ if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
return NULL;
return desc;
}
-static inline void put_desc(struct bp_patching_desc *desc)
+static __always_inline void put_desc(struct bp_patching_desc *desc)
{
smp_mb__before_atomic();
- atomic_dec(&desc->refs);
+ arch_atomic_dec(&desc->refs);
}
-static inline void *text_poke_addr(struct text_poke_loc *tp)
+static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
{
return _stext + tp->rel_addr;
}
-static int notrace patch_cmp(const void *key, const void *elt)
+static __always_inline int patch_cmp(const void *key, const void *elt)
{
struct text_poke_loc *tp = (struct text_poke_loc *) elt;
@@ -1042,9 +1043,8 @@ static int notrace patch_cmp(const void *key, const void *elt)
return 1;
return 0;
}
-NOKPROBE_SYMBOL(patch_cmp);
-int notrace poke_int3_handler(struct pt_regs *regs)
+int noinstr poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
@@ -1077,9 +1077,9 @@ int notrace poke_int3_handler(struct pt_regs *regs)
* Skip the binary search if there is a single member in the vector.
*/
if (unlikely(desc->nr_entries > 1)) {
- tp = bsearch(ip, desc->vec, desc->nr_entries,
- sizeof(struct text_poke_loc),
- patch_cmp);
+ tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
+ sizeof(struct text_poke_loc),
+ patch_cmp);
if (!tp)
goto out_put;
} else {
@@ -1118,7 +1118,6 @@ out_put:
put_desc(desc);
return ret;
}
-NOKPROBE_SYMBOL(poke_int3_handler);
#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
static struct text_poke_loc tp_vec[TP_VEC_MAX];
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index b6b3297851f3..18f6b7c4bd79 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -18,9 +18,11 @@
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
+#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
@@ -33,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
{}
};
@@ -50,6 +53,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
@@ -65,6 +69,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4b1d31be50b4..e0e2f020ec02 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1088,23 +1088,14 @@ static void local_apic_timer_interrupt(void)
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- /*
- * NOTE! We'd better ACK the irq immediately,
- * because timer handling can be slow.
- *
- * update_process_times() expects us to have done irq_enter().
- * Besides, if we don't timer interrupts ignore the global
- * interrupt lock, which is the WrongThing (tm) to do.
- */
- entering_ack_irq();
+ ack_APIC_irq();
trace_local_timer_entry(LOCAL_TIMER_VECTOR);
local_apic_timer_interrupt();
trace_local_timer_exit(LOCAL_TIMER_VECTOR);
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -2060,7 +2051,7 @@ void __init init_apic_mappings(void)
unsigned int new_apicid;
if (apic_validate_deadline_timer())
- pr_debug("TSC deadline timer available\n");
+ pr_info("TSC deadline timer available\n");
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
@@ -2120,15 +2111,21 @@ void __init register_lapic_address(unsigned long address)
* Local APIC interrupts
*/
-/*
- * This interrupt should _never_ happen with our APIC/SMP architecture
+/**
+ * spurious_interrupt - Catch all for interrupts raised on unused vectors
+ * @regs: Pointer to pt_regs on stack
+ * @vector: The vector number
+ *
+ * This is invoked from ASM entry code to catch all interrupts which
+ * trigger on an entry which is routed to the common_spurious idtentry
+ * point.
+ *
+ * Also called from sysvec_spurious_apic_interrupt().
*/
-__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_IRQ(spurious_interrupt)
{
- u8 vector = ~regs->orig_ax;
u32 v;
- entering_irq();
trace_spurious_apic_entry(vector);
inc_irq_stat(irq_spurious_count);
@@ -2158,13 +2155,17 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
}
out:
trace_spurious_apic_exit(vector);
- exiting_irq();
+}
+
+DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
+{
+ __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
}
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
-__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
{
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
@@ -2178,7 +2179,6 @@ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
};
u32 v, i = 0;
- entering_irq();
trace_error_apic_entry(ERROR_APIC_VECTOR);
/* First tickle the hardware, only then report what went on. -- REW */
@@ -2202,7 +2202,6 @@ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
apic_printk(APIC_DEBUG, KERN_CONT "\n");
trace_error_apic_exit(ERROR_APIC_VECTOR);
- exiting_irq();
}
/**
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 159bd0cb8548..5cbaca58af95 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -115,7 +115,8 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* denote it as spurious which is no harm as this is a rare event
* and interrupt handlers have to cope with spurious interrupts
* anyway. If the vector is unused, then it is marked so it won't
- * trigger the 'No irq handler for vector' warning in do_IRQ().
+ * trigger the 'No irq handler for vector' warning in
+ * common_interrupt().
*
* This requires to hold vector lock to prevent concurrent updates to
* the affected vector.
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 67768e54438b..c48be6e1f676 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -861,13 +861,13 @@ static void free_moved_vector(struct apic_chip_data *apicd)
apicd->move_in_progress = 0;
}
-asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
+DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
{
struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
struct apic_chip_data *apicd;
struct hlist_node *tmp;
- entering_ack_irq();
+ ack_APIC_irq();
/* Prevent vectors vanishing under us */
raw_spin_lock(&vector_lock);
@@ -892,7 +892,6 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
}
raw_spin_unlock(&vector_lock);
- exiting_irq();
}
static void __send_cleanup_vector(struct apic_chip_data *apicd)
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index c2a47016f243..828be792231e 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -57,9 +57,6 @@ int main(void)
BLANK();
#undef ENTRY
- OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- DEFINE(DB_STACK_OFFSET, offsetof(struct cea_exception_stacks, DB_stack) -
- offsetof(struct cea_exception_stacks, DB1_stack));
BLANK();
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7dc4ad68eb41..dba6a83bc349 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -13,6 +13,9 @@ endif
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
+# As above, instrumenting secondary CPU boot code causes boot hangs.
+KCSAN_SANITIZE_common.o := n
+
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)
diff --git a/arch/x86/kernel/cpu/acrn.c b/arch/x86/kernel/cpu/acrn.c
index 676022e71791..1da9b1c9a2db 100644
--- a/arch/x86/kernel/cpu/acrn.c
+++ b/arch/x86/kernel/cpu/acrn.c
@@ -10,10 +10,10 @@
*/
#include <linux/interrupt.h>
-#include <asm/acrn.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hypervisor.h>
+#include <asm/idtentry.h>
#include <asm/irq_regs.h>
static uint32_t __init acrn_detect(void)
@@ -24,7 +24,7 @@ static uint32_t __init acrn_detect(void)
static void __init acrn_init_platform(void)
{
/* Setup the IDT for ACRN hypervisor callback */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, acrn_hv_callback_vector);
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_acrn_hv_callback);
}
static bool acrn_x2apic_available(void)
@@ -39,7 +39,7 @@ static bool acrn_x2apic_available(void)
static void (*acrn_intr_handler)(void);
-__visible void __irq_entry acrn_hv_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -50,13 +50,12 @@ __visible void __irq_entry acrn_hv_vector_handler(struct pt_regs *regs)
* will block the interrupt whose vector is lower than
* HYPERVISOR_CALLBACK_VECTOR.
*/
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
if (acrn_intr_handler)
acrn_intr_handler();
- exiting_irq();
set_irq_regs(old_regs);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b6f887be440c..0b71970d2d3d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break;
}
- /*
- * At this point, an STIBP mode other than "off" has been set.
- * If STIBP support is not being forced, check if STIBP always-on
- * is preferred.
- */
- if (mode != SPECTRE_V2_USER_STRICT &&
- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
-
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
@@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
+
+ spectre_v2_user_ibpb = mode;
}
- /* If enhanced IBRS is enabled no STIBP required */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ /*
+ * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+ * required.
+ */
+ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
- * If SMT is not possible or STIBP is not available clear the STIBP
- * mode.
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
*/
- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ if (mode != SPECTRE_V2_USER_STRICT &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
+ /*
+ * If STIBP is not available, clear the STIBP mode.
+ */
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
+
+ spectre_v2_user_stibp = mode;
+
set_mode:
- spectre_v2_user = mode;
- /* Only print the STIBP mode when SMT possible */
- if (smt_possible)
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
@@ -1257,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
- * mode.
+ * mode. It can neither be enabled if it was force-disabled
+ * by a previous prctl call.
+
*/
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+ task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
@@ -1275,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
@@ -1309,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -1340,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
- switch (spectre_v2_user) {
- case SPECTRE_V2_USER_NONE:
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_PRCTL:
- case SPECTRE_V2_USER_SECCOMP:
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return PR_SPEC_DISABLE;
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_STRICT:
- case SPECTRE_V2_USER_STRICT_PREFERRED:
- return PR_SPEC_DISABLE;
- default:
+ } else
return PR_SPEC_NOT_AFFECTED;
- }
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@@ -1594,7 +1610,7 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8be042df12c3..043d93cdcaad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1706,25 +1706,6 @@ void syscall_init(void)
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
}
-DEFINE_PER_CPU(int, debug_stack_usage);
-DEFINE_PER_CPU(u32, debug_idt_ctr);
-
-void debug_stack_set_zero(void)
-{
- this_cpu_inc(debug_idt_ctr);
- load_current_idt();
-}
-NOKPROBE_SYMBOL(debug_stack_set_zero);
-
-void debug_stack_reset(void)
-{
- if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
- return;
- if (this_cpu_dec_return(debug_idt_ctr) == 0)
- load_current_idt();
-}
-NOKPROBE_SYMBOL(debug_stack_reset);
-
#else /* CONFIG_X86_64 */
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 63926c94eb5f..c25a67a34bd3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn)
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
{}
};
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 52de616a8065..99be063fcb1b 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -192,7 +192,12 @@ EXPORT_SYMBOL_GPL(smca_banks);
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
-static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
+
+/*
+ * A list of the banks enabled on each logical CPU. Controls which respective
+ * descriptors to initialize later in mce_threshold_create_device().
+ */
+static DEFINE_PER_CPU(unsigned int, bank_map);
/* Map of banks that have more than MCA_MISC0 available. */
static DEFINE_PER_CPU(u32, smca_misc_banks_map);
@@ -381,6 +386,10 @@ static void threshold_restart_bank(void *_tr)
struct thresh_restart *tr = _tr;
u32 hi, lo;
+ /* sysfs write might race against an offline operation */
+ if (this_cpu_read(threshold_banks))
+ return;
+
rdmsr(tr->b->address, lo, hi);
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
@@ -568,14 +577,19 @@ bool amd_filter_mce(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
struct cpuinfo_x86 *c = &boot_cpu_data;
- u8 xec = (m->status >> 16) & 0x3F;
/* See Family 17h Models 10h-2Fh Erratum #1114. */
if (c->x86 == 0x17 &&
c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
- bank_type == SMCA_IF && xec == 10)
+ bank_type == SMCA_IF && XEC(m->status, 0x3f) == 10)
return true;
+ /* NB GART TLB error reporting is disabled by default. */
+ if (c->x86 < 0x17) {
+ if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5)
+ return true;
+ }
+
return false;
}
@@ -907,14 +921,13 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
mce_log(&m);
}
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
{
- entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
inc_irq_stat(irq_deferred_error_count);
deferred_error_int_vector();
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
/*
@@ -1016,13 +1029,22 @@ static void log_and_reset_block(struct threshold_block *block)
static void amd_threshold_interrupt(void)
{
struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
+ struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank, cpu = smp_processor_id();
+ /*
+ * Validate that the threshold bank has been initialized already. The
+ * handler is installed at boot time, but on a hotplug event the
+ * interrupt might fire before the data has been initialized.
+ */
+ if (!bp)
+ return;
+
for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
- first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
+ first_block = bp[bank]->blocks;
if (!first_block)
continue;
@@ -1071,7 +1093,8 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
memset(&tr, 0, sizeof(tr));
tr.b = b;
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
+ if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1))
+ return -ENODEV;
return size;
}
@@ -1095,7 +1118,8 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
b->threshold_limit = new;
tr.b = b;
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
+ if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1))
+ return -ENODEV;
return size;
}
@@ -1104,7 +1128,9 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
u32 lo, hi;
- rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
+ /* CPU might be offline by now */
+ if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi))
+ return -ENODEV;
return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
(THRESHOLD_MAX - b->threshold_limit)));
@@ -1209,10 +1235,10 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
u32 low, high;
int err;
- if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
+ if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS))
return 0;
- if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
+ if (rdmsr_safe(address, &low, &high))
return 0;
if (!(high & MASK_VALID_HI)) {
@@ -1247,6 +1273,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
INIT_LIST_HEAD(&b->miscj);
+ /* This is safe as @tb is not visible yet */
if (tb->blocks)
list_add(&b->miscj, &tb->blocks->miscj);
else
@@ -1267,13 +1294,12 @@ recurse:
if (b)
kobject_uevent(&b->kobj, KOBJ_ADD);
- return err;
+ return 0;
out_free:
if (b) {
- kobject_put(&b->kobj);
list_del(&b->miscj);
- kfree(b);
+ kobject_put(&b->kobj);
}
return err;
}
@@ -1302,9 +1328,10 @@ static int __threshold_add_blocks(struct threshold_bank *b)
return err;
}
-static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
+ unsigned int bank)
{
- struct device *dev = per_cpu(mce_device, cpu);
+ struct device *dev = this_cpu_read(mce_device);
struct amd_northbridge *nb = NULL;
struct threshold_bank *b = NULL;
const char *name = get_name(bank, NULL);
@@ -1324,7 +1351,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- per_cpu(threshold_banks, cpu)[bank] = b;
+ bp[bank] = b;
refcount_inc(&b->cpus);
err = __threshold_add_blocks(b);
@@ -1339,6 +1366,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
+ /* Associate the bank with the per-CPU MCE device */
b->kobj = kobject_create_and_add(name, &dev->kobj);
if (!b->kobj) {
err = -EINVAL;
@@ -1346,6 +1374,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
if (is_shared_bank(bank)) {
+ b->shared = 1;
refcount_set(&b->cpus, 1);
/* nb is already initialized, see above */
@@ -1357,16 +1386,16 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
if (err)
- goto out_free;
-
- per_cpu(threshold_banks, cpu)[bank] = b;
+ goto out_kobj;
+ bp[bank] = b;
return 0;
- out_free:
+out_kobj:
+ kobject_put(b->kobj);
+out_free:
kfree(b);
-
- out:
+out:
return err;
}
@@ -1375,21 +1404,16 @@ static void threshold_block_release(struct kobject *kobj)
kfree(to_block(kobj));
}
-static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
+static void deallocate_threshold_blocks(struct threshold_bank *bank)
{
- struct threshold_block *pos = NULL;
- struct threshold_block *tmp = NULL;
- struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
+ struct threshold_block *pos, *tmp;
- if (!head)
- return;
-
- list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+ list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
list_del(&pos->miscj);
kobject_put(&pos->kobj);
}
- kobject_put(&head->blocks->kobj);
+ kobject_put(&bank->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)
@@ -1403,122 +1427,102 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
kobject_del(&pos->kobj);
}
-static void threshold_remove_bank(unsigned int cpu, int bank)
+static void threshold_remove_bank(struct threshold_bank *bank)
{
struct amd_northbridge *nb;
- struct threshold_bank *b;
- b = per_cpu(threshold_banks, cpu)[bank];
- if (!b)
- return;
+ if (!bank->blocks)
+ goto out_free;
- if (!b->blocks)
- goto free_out;
+ if (!bank->shared)
+ goto out_dealloc;
- if (is_shared_bank(bank)) {
- if (!refcount_dec_and_test(&b->cpus)) {
- __threshold_remove_blocks(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- return;
- } else {
- /*
- * the last CPU on this node using the shared bank is
- * going away, remove that bank now.
- */
- nb = node_to_amd_nb(amd_get_nb_id(cpu));
- nb->bank4 = NULL;
- }
+ if (!refcount_dec_and_test(&bank->cpus)) {
+ __threshold_remove_blocks(bank);
+ return;
+ } else {
+ /*
+ * The last CPU on this node using the shared bank is going
+ * away, remove that bank now.
+ */
+ nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
+ nb->bank4 = NULL;
}
- deallocate_threshold_block(cpu, bank);
+out_dealloc:
+ deallocate_threshold_blocks(bank);
-free_out:
- kobject_del(b->kobj);
- kobject_put(b->kobj);
- kfree(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
+out_free:
+ kobject_put(bank->kobj);
+ kfree(bank);
}
int mce_threshold_remove_device(unsigned int cpu)
{
- unsigned int bank;
+ struct threshold_bank **bp = this_cpu_read(threshold_banks);
+ unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
- for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- threshold_remove_bank(cpu, bank);
+ if (!bp)
+ return 0;
+
+ /*
+ * Clear the pointer before cleaning up, so that the interrupt won't
+ * touch anything of this.
+ */
+ this_cpu_write(threshold_banks, NULL);
+
+ for (bank = 0; bank < numbanks; bank++) {
+ if (bp[bank]) {
+ threshold_remove_bank(bp[bank]);
+ bp[bank] = NULL;
+ }
}
- kfree(per_cpu(threshold_banks, cpu));
- per_cpu(threshold_banks, cpu) = NULL;
+ kfree(bp);
return 0;
}
-/* create dir/files for all valid threshold banks */
+/**
+ * mce_threshold_create_device - Create the per-CPU MCE threshold device
+ * @cpu: The plugged in CPU
+ *
+ * Create directories and files for all valid threshold banks.
+ *
+ * This is invoked from the CPU hotplug callback which was installed in
+ * mcheck_init_device(). The invocation happens in context of the hotplug
+ * thread running on @cpu. The callback is invoked on all CPUs which are
+ * online when the callback is installed or during a real hotplug event.
+ */
int mce_threshold_create_device(unsigned int cpu)
{
- unsigned int bank;
+ unsigned int numbanks, bank;
struct threshold_bank **bp;
- int err = 0;
+ int err;
- bp = per_cpu(threshold_banks, cpu);
+ if (!mce_flags.amd_threshold)
+ return 0;
+
+ bp = this_cpu_read(threshold_banks);
if (bp)
return 0;
- bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
- GFP_KERNEL);
+ numbanks = this_cpu_read(mce_num_banks);
+ bp = kcalloc(numbanks, sizeof(*bp), GFP_KERNEL);
if (!bp)
return -ENOMEM;
- per_cpu(threshold_banks, cpu) = bp;
-
- for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ for (bank = 0; bank < numbanks; ++bank) {
+ if (!(this_cpu_read(bank_map) & (1 << bank)))
continue;
- err = threshold_create_bank(cpu, bank);
+ err = threshold_create_bank(bp, cpu, bank);
if (err)
- goto err;
- }
- return err;
-err:
- mce_threshold_remove_device(cpu);
- return err;
-}
-
-static __init int threshold_init_device(void)
-{
- unsigned lcpu = 0;
-
- /* to hit CPUs online before the notifier is up */
- for_each_online_cpu(lcpu) {
- int err = mce_threshold_create_device(lcpu);
-
- if (err)
- return err;
+ goto out_err;
}
+ this_cpu_write(threshold_banks, bp);
if (thresholding_irq_en)
mce_threshold_vector = amd_threshold_interrupt;
-
return 0;
+out_err:
+ mce_threshold_remove_device(cpu);
+ return err;
}
-/*
- * there are 3 funcs which need to be _initcalled in a logic sequence:
- * 1. xen_late_init_mcelog
- * 2. mcheck_init_device
- * 3. threshold_init_device
- *
- * xen_late_init_mcelog must register xen_mce_chrdev_device before
- * native mce_chrdev_device registration if running under xen platform;
- *
- * mcheck_init_device should be inited before threshold_init_device to
- * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
- *
- * so we use following _initcalls
- * 1. device_initcall(xen_late_init_mcelog);
- * 2. device_initcall_sync(mcheck_init_device);
- * 3. late_initcall(threshold_init_device);
- *
- * when running under xen, the initcall order is 1,2,3;
- * on baremetal, we skip 1 and we do only 2 and 3.
- */
-late_initcall(threshold_init_device);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index e9265e2f28c9..ce9120c4f740 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -130,7 +130,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
-void mce_setup(struct mce *m)
+noinstr void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
@@ -140,12 +140,12 @@ void mce_setup(struct mce *m)
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
- rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+ m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
- rdmsrl(MSR_PPIN, m->ppin);
+ m->ppin = __rdmsr(MSR_PPIN);
else if (this_cpu_has(X86_FEATURE_AMD_PPIN))
- rdmsrl(MSR_AMD_PPIN, m->ppin);
+ m->ppin = __rdmsr(MSR_AMD_PPIN);
m->microcode = boot_cpu_data.microcode;
}
@@ -160,29 +160,17 @@ void mce_log(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_log);
-/*
- * We run the default notifier if we have only the UC, the first and the
- * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
- * notifiers registered on the chain.
- */
-#define NUM_DEFAULT_NOTIFIERS 3
-static atomic_t num_notifiers;
-
void mce_register_decode_chain(struct notifier_block *nb)
{
if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
return;
- atomic_inc(&num_notifiers);
-
blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
- atomic_dec(&num_notifiers);
-
blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -265,6 +253,7 @@ static void __print_mce(struct mce *m)
}
pr_cont("\n");
+
/*
* Note this output is parsed by external tools and old fields
* should not be changed.
@@ -531,6 +520,14 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
+static bool whole_page(struct mce *m)
+{
+ if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
+ return true;
+
+ return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
+}
+
bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
@@ -546,22 +543,7 @@ bool mce_is_correctable(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_correctable);
-static bool cec_add_mce(struct mce *m)
-{
- if (!m)
- return false;
-
- /* We eat only correctable DRAM errors with usable addresses. */
- if (mce_is_memory_error(m) &&
- mce_is_correctable(m) &&
- mce_usable_address(m))
- if (!cec_add_elem(m->addr >> PAGE_SHIFT))
- return true;
-
- return false;
-}
-
-static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
+static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *m = (struct mce *)data;
@@ -569,9 +551,6 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
if (!m)
return NOTIFY_DONE;
- if (cec_add_mce(m))
- return NOTIFY_STOP;
-
/* Emit the trace record: */
trace_mce_record(m);
@@ -582,9 +561,9 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
}
-static struct notifier_block first_nb = {
- .notifier_call = mce_first_notifier,
- .priority = MCE_PRIO_FIRST,
+static struct notifier_block early_nb = {
+ .notifier_call = mce_early_notifier,
+ .priority = MCE_PRIO_EARLY,
};
static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
@@ -601,8 +580,10 @@ static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
pfn = mce->addr >> PAGE_SHIFT;
- if (!memory_failure(pfn, 0))
- set_mce_nospec(pfn);
+ if (!memory_failure(pfn, 0)) {
+ set_mce_nospec(pfn, whole_page(mce));
+ mce->kflags |= MCE_HANDLED_UC;
+ }
return NOTIFY_OK;
}
@@ -620,10 +601,8 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
if (!m)
return NOTIFY_DONE;
- if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
- return NOTIFY_DONE;
-
- __print_mce(m);
+ if (mca_cfg.print_all || !m->kflags)
+ __print_mce(m);
return NOTIFY_DONE;
}
@@ -1100,13 +1079,15 @@ static void mce_clear_state(unsigned long *toclear)
* kdump kernel establishing a new #MC handler where a broadcasted MCE
* might not get handled properly.
*/
-static bool __mc_check_crashing_cpu(int cpu)
+static noinstr bool mce_check_crashing_cpu(void)
{
+ unsigned int cpu = smp_processor_id();
+
if (cpu_is_offline(cpu) ||
(crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;
- mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
if (mcgstatus & MCG_STATUS_LMCES)
@@ -1114,7 +1095,7 @@ static bool __mc_check_crashing_cpu(int cpu)
}
if (mcgstatus & MCG_STATUS_RIPV) {
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
return true;
}
}
@@ -1200,11 +1181,12 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
- if (!(p->mce_status & MCG_STATUS_RIPV))
+
+ if (!p->mce_ripv)
flags |= MF_MUST_KILL;
if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags)) {
- set_mce_nospec(p->mce_addr >> PAGE_SHIFT);
+ set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
return;
}
@@ -1230,12 +1212,11 @@ static void kill_me_maybe(struct callback_head *cb)
* backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion.
*/
-void noinstr do_machine_check(struct pt_regs *regs, long error_code)
+void noinstr do_machine_check(struct pt_regs *regs)
{
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
- int cpu = smp_processor_id();
struct mce m, *final;
char *msg = NULL;
int worst = 0;
@@ -1264,11 +1245,6 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
*/
int lmce = 1;
- if (__mc_check_crashing_cpu(cpu))
- return;
-
- nmi_enter();
-
this_cpu_inc(mce_exception_count);
mce_gather_info(&m, regs);
@@ -1356,7 +1332,7 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
sync_core();
if (worst != MCE_AR_SEVERITY && !kill_it)
- goto out_ist;
+ return;
/* Fault was in user mode and we need to take some action */
if ((m.cs & 3) == 3) {
@@ -1364,18 +1340,27 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
BUG_ON(!on_thread_stack() || !user_mode(regs));
current->mce_addr = m.addr;
- current->mce_status = m.mcgstatus;
+ current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(&m);
current->mce_kill_me.func = kill_me_maybe;
if (kill_it)
current->mce_kill_me.func = kill_me_now;
task_work_add(current, &current->mce_kill_me, true);
} else {
- if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
- mce_panic("Failed kernel mode recovery", &m, msg);
+ /*
+ * Handle an MCE which has happened in kernel space but from
+ * which the kernel can recover: ex_has_fault_handler() has
+ * already verified that the rIP at which the error happened is
+ * a rIP from which the kernel can recover (by jumping to
+ * recovery code specified in _ASM_EXTABLE_FAULT()) and the
+ * corresponding exception handler which would do that is the
+ * proper one.
+ */
+ if (m.kflags & MCE_IN_KERNEL_RECOV) {
+ if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
+ mce_panic("Failed kernel mode recovery", &m, msg);
+ }
}
-
-out_ist:
- nmi_exit();
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1765,6 +1750,7 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
+ mce_flags.amd_threshold = 1;
if (mce_flags.smca) {
msr_ops.ctl = smca_ctl_reg;
@@ -1902,21 +1888,84 @@ bool filter_mce(struct mce *m)
}
/* Handle unconfigured int18 (should never happen) */
-static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void unexpected_machine_check(struct pt_regs *regs)
{
+ instrumentation_begin();
pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
smp_processor_id());
+ instrumentation_end();
}
/* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
- unexpected_machine_check;
+void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
-dotraplinkage notrace void do_mce(struct pt_regs *regs, long error_code)
+static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
- machine_check_vector(regs, error_code);
+ /*
+ * Only required when from kernel mode. See
+ * mce_check_crashing_cpu() for details.
+ */
+ if (machine_check_vector == do_machine_check &&
+ mce_check_crashing_cpu())
+ return;
+
+ nmi_enter();
+ /*
+ * The call targets are marked noinstr, but objtool can't figure
+ * that out because it's an indirect call. Annotate it.
+ */
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ machine_check_vector(regs);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
+ nmi_exit();
+}
+
+static __always_inline void exc_machine_check_user(struct pt_regs *regs)
+{
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+ machine_check_vector(regs);
+ instrumentation_end();
+ idtentry_exit_user(regs);
}
-NOKPROBE_SYMBOL(do_mce);
+
+#ifdef CONFIG_X86_64
+/* MCE hit kernel mode */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ exc_machine_check_kernel(regs);
+ local_db_restore(dr7);
+}
+
+/* The user mode variant. */
+DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ exc_machine_check_user(regs);
+ local_db_restore(dr7);
+}
+#else
+/* 32bit unified entry point */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ if (user_mode(regs))
+ exc_machine_check_user(regs);
+ else
+ exc_machine_check_kernel(regs);
+ local_db_restore(dr7);
+}
+#endif
/*
* Called for each booted CPU to set up machine checks.
@@ -1999,6 +2048,7 @@ void mce_disable_bank(int bank)
* mce=no_cmci Disables CMCI
* mce=no_lmce Disables LMCE
* mce=dont_log_ce Clears corrected events silently, no log created for CEs.
+ * mce=print_all Print all machine check logs to console
* mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
* mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
* monarchtimeout is how long to wait for other CPUs on machine
@@ -2027,6 +2077,8 @@ static int __init mcheck_enable(char *str)
cfg->lmce_disabled = 1;
else if (!strcmp(str, "dont_log_ce"))
cfg->dont_log_ce = true;
+ else if (!strcmp(str, "print_all"))
+ cfg->print_all = true;
else if (!strcmp(str, "ignore_ce"))
cfg->ignore_ce = true;
else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
@@ -2049,7 +2101,7 @@ __setup("mce", mcheck_enable);
int __init mcheck_init(void)
{
mcheck_intel_therm_init();
- mce_register_decode_chain(&first_nb);
+ mce_register_decode_chain(&early_nb);
mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
@@ -2293,6 +2345,7 @@ static ssize_t store_int_with_restart(struct device *s,
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
+static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
static struct dev_ext_attribute dev_attr_check_interval = {
__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
@@ -2317,6 +2370,7 @@ static struct device_attribute *mce_device_attrs[] = {
#endif
&dev_attr_monarch_timeout.attr,
&dev_attr_dont_log_ce.attr,
+ &dev_attr_print_all.attr,
&dev_attr_ignore_ce.attr,
&dev_attr_cmci_disabled.attr,
NULL
@@ -2489,6 +2543,13 @@ static __init void mce_init_banks(void)
}
}
+/*
+ * When running on XEN, this initcall is ordered against the XEN mcelog
+ * initcall:
+ *
+ * device_initcall(xen_late_init_mcelog);
+ * device_initcall_sync(mcheck_init_device);
+ */
static __init int mcheck_init_device(void)
{
int err;
@@ -2520,6 +2581,10 @@ static __init int mcheck_init_device(void)
if (err)
goto err_out_mem;
+ /*
+ * Invokes mce_cpu_online() on all CPUs which are online when
+ * the state is installed.
+ */
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
mce_cpu_online, mce_cpu_pre_down);
if (err < 0)
@@ -2609,7 +2674,6 @@ static int __init mcheck_late_init(void)
static_branch_inc(&mcsafe_key);
mcheck_debugfs_init();
- cec_init();
/*
* Flush out everything that has been logged during early boot, now that
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index d089567a9ce8..43c466020ed5 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -39,6 +39,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
unsigned int entry;
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
mutex_lock(&mce_chrdev_read_mutex);
entry = mcelog->next;
@@ -56,6 +59,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
memcpy(mcelog->entry + entry, mce, sizeof(struct mce));
mcelog->entry[entry].finished = 1;
+ mcelog->entry[entry].kflags = 0;
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
@@ -63,6 +67,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
unlock:
mutex_unlock(&mce_chrdev_read_mutex);
+ mce->kflags |= MCE_HANDLED_MCELOG;
return NOTIFY_OK;
}
@@ -324,6 +329,7 @@ static const struct file_operations mce_chrdev_ops = {
.write = mce_chrdev_write,
.poll = mce_chrdev_poll,
.unlocked_ioctl = mce_chrdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
@@ -343,7 +349,7 @@ static __init int dev_mcelog_init_device(void)
if (!mcelog)
return -ENOMEM;
- strncpy(mcelog->signature, MCE_LOG_SIGNATURE, sizeof(mcelog->signature));
+ memcpy(mcelog->signature, MCE_LOG_SIGNATURE, sizeof(mcelog->signature));
mcelog->len = mce_log_len;
mcelog->recordlen = sizeof(struct mce);
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 3413b41b8d55..0593b192eb8f 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -146,9 +146,9 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
regs.cs = m->cs;
pregs = &regs;
}
- /* in mcheck exeception handler, irq will be disabled */
+ /* do_machine_check() expects interrupts disabled -- at least */
local_irq_save(flags);
- do_machine_check(pregs, 0);
+ do_machine_check(pregs);
local_irq_restore(flags);
m->finished = 0;
}
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 3b008172ad73..6473070b5da4 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -9,7 +9,7 @@
#include <asm/mce.h>
/* Pointer to the installed machine check handler for this CPU setup. */
-extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+extern void (*machine_check_vector)(struct pt_regs *);
enum severity_level {
MCE_NO_SEVERITY,
@@ -119,6 +119,7 @@ struct mca_config {
bool dont_log_ce;
bool cmci_disabled;
bool ignore_ce;
+ bool print_all;
__u64 lmce_disabled : 1,
disabled : 1,
@@ -148,7 +149,7 @@ struct mce_vendor_flags {
* Recovery. It indicates support for data poisoning in HW and deferred
* error interrupts.
*/
- succor : 1,
+ succor : 1,
/*
* (AMD) SMCA: This bit indicates support for Scalable MCA which expands
@@ -156,9 +157,12 @@ struct mce_vendor_flags {
* banks. Also, to accommodate the new banks and registers, the MCA
* register space is moved to a new MSR range.
*/
- smca : 1,
+ smca : 1,
- __reserved_0 : 61;
+ /* AMD-style error thresholding banks present. */
+ amd_threshold : 1,
+
+ __reserved_0 : 60;
};
extern struct mce_vendor_flags mce_flags;
diff --git a/arch/x86/kernel/cpu/mce/p5.c b/arch/x86/kernel/cpu/mce/p5.c
index 5ee94aa1b766..19e90cae8e97 100644
--- a/arch/x86/kernel/cpu/mce/p5.c
+++ b/arch/x86/kernel/cpu/mce/p5.c
@@ -21,12 +21,11 @@
int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
-static void pentium_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void pentium_machine_check(struct pt_regs *regs)
{
u32 loaddr, hi, lotype;
- nmi_enter();
-
+ instrumentation_begin();
rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
@@ -39,8 +38,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
}
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- nmi_exit();
+ instrumentation_end();
}
/* Set up machine check reporting for processors with Intel style MCE: */
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index 87bcdc6dc2f0..e1da619add19 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -213,8 +213,12 @@ static int error_context(struct mce *m)
{
if ((m->cs & 3) == 3)
return IN_USER;
- if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip))
+
+ if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) {
+ m->kflags |= MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
+ }
+
return IN_KERNEL;
}
diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index f36dc0742085..a7cd2d203ced 100644
--- a/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -614,14 +614,13 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{
- entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c
index 28812cc15300..6a059a035021 100644
--- a/arch/x86/kernel/cpu/mce/threshold.c
+++ b/arch/x86/kernel/cpu/mce/threshold.c
@@ -21,12 +21,11 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_threshold)
{
- entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
inc_irq_stat(irq_threshold_count);
mce_threshold_vector();
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
diff --git a/arch/x86/kernel/cpu/mce/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c
index b3938c195365..9c9f0abd2d7f 100644
--- a/arch/x86/kernel/cpu/mce/winchip.c
+++ b/arch/x86/kernel/cpu/mce/winchip.c
@@ -17,14 +17,12 @@
#include "internal.h"
/* Machine check handler for WinChip C6: */
-static void winchip_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void winchip_machine_check(struct pt_regs *regs)
{
- nmi_enter();
-
+ instrumentation_begin();
pr_emerg("CPU0: Machine Check Exception.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- nmi_exit();
+ instrumentation_end();
}
/* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index ebf34c7bc8bc..af94f05a5c66 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -23,6 +23,7 @@
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/desc.h>
+#include <asm/idtentry.h>
#include <asm/irq_regs.h>
#include <asm/i8259.h>
#include <asm/apic.h>
@@ -40,11 +41,10 @@ static void (*hv_stimer0_handler)(void);
static void (*hv_kexec_handler)(void);
static void (*hv_crash_handler)(struct pt_regs *regs);
-__visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_irq();
inc_irq_stat(irq_hv_callback_count);
if (vmbus_handler)
vmbus_handler();
@@ -52,7 +52,6 @@ __visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
ack_APIC_irq();
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -73,19 +72,16 @@ EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
* Routines to do per-architecture handling of stimer0
* interrupts when in Direct Mode
*/
-
-__visible void __irq_entry hv_stimer0_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_irq();
inc_irq_stat(hyperv_stimer0_count);
if (hv_stimer0_handler)
hv_stimer0_handler();
add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
ack_APIC_irq();
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -331,17 +327,19 @@ static void __init ms_hyperv_init_platform(void)
x86_platform.apic_post_init = hyperv_init;
hyperv_setup_mmu_ops();
/* Setup the IDT for hypervisor callback */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback);
/* Setup the IDT for reenlightenment notifications */
- if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT)
+ if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT) {
alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
- hyperv_reenlightenment_vector);
+ asm_sysvec_hyperv_reenlightenment);
+ }
/* Setup the IDT for stimer0 */
- if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
+ if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) {
alloc_intr_gate(HYPERV_STIMER0_VECTOR,
- hv_stimer0_callback_vector);
+ asm_sysvec_hyperv_stimer0);
+ }
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 2ccc57f152a4..759d392cbe9f 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -10,7 +10,6 @@
#include <asm/desc.h>
#include <asm/traps.h>
-extern void double_fault(void);
#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
#define TSS(x) this_cpu_read(cpu_tss_rw.x86_tss.x)
@@ -21,7 +20,7 @@ static void set_df_gdt_entry(unsigned int cpu);
* Called by double_fault with CR0.TS and EFLAGS.NT cleared. The CPU thinks
* we're running the doublefault task. Cannot return.
*/
-asmlinkage notrace void __noreturn doublefault_shim(void)
+asmlinkage noinstr void __noreturn doublefault_shim(void)
{
unsigned long cr2;
struct pt_regs regs;
@@ -40,7 +39,7 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
* Fill in pt_regs. A downside of doing this in C is that the unwinder
* won't see it (no ENCODE_FRAME_POINTER), so a nested stack dump
* won't successfully unwind to the source of the double fault.
- * The main dump from do_double_fault() is fine, though, since it
+ * The main dump from exc_double_fault() is fine, though, since it
* uses these regs directly.
*
* If anyone ever cares, this could be moved to asm.
@@ -70,7 +69,7 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
regs.cx = TSS(cx);
regs.bx = TSS(bx);
- do_double_fault(&regs, 0, cr2);
+ exc_double_fault(&regs, 0, cr2);
/*
* x86_32 does not save the original CR3 anywhere on a task switch.
@@ -84,7 +83,6 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
*/
panic("cannot return from double fault\n");
}
-NOKPROBE_SYMBOL(doublefault_shim);
DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
.tss = {
@@ -95,7 +93,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
.ldt = 0,
.io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
- .ip = (unsigned long) double_fault,
+ .ip = (unsigned long) asm_exc_double_fault,
.flags = X86_EFLAGS_FIXED,
.es = __USER_DS,
.cs = __KERNEL_CS,
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 460ae7f66818..4a94d38cd141 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -22,15 +22,13 @@
static const char * const exception_stack_names[] = {
[ ESTACK_DF ] = "#DF",
[ ESTACK_NMI ] = "NMI",
- [ ESTACK_DB2 ] = "#DB2",
- [ ESTACK_DB1 ] = "#DB1",
[ ESTACK_DB ] = "#DB",
[ ESTACK_MCE ] = "#MC",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -79,7 +77,6 @@ static const
struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(DF),
EPAGERANGE(NMI),
- EPAGERANGE(DB1),
EPAGERANGE(DB),
EPAGERANGE(MCE),
};
@@ -91,7 +88,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
struct pt_regs *regs;
unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 4d13c57f370a..983cd53ed4c9 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -991,7 +991,15 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+ * SETUP_EFI is supplied by kexec and does not need to be
+ * reserved.
+ */
+ if (data->type != SETUP_EFI)
+ e820__range_update_kexec(pa_data,
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
if (data->type == SETUP_INDIRECT &&
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index aa5d28aeb31e..083a3da7bb73 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -12,7 +12,7 @@
#include <asm/frame.h>
.code64
- .section .entry.text, "ax"
+ .section .text, "ax"
#ifdef CONFIG_FRAME_POINTER
/* Save parent and function stack frames (rip and rbp) */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4fc33fdf0f16..16da4ac01597 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -29,15 +29,16 @@
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/asm-offsets.h>
#include <asm/paravirt.h>
+#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
#else
#define INTERRUPT_RETURN iretq
+#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
#endif
-/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
+/*
+ * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
- *
*/
-
#define l4_index(x) (((x) >> 39) & 511)
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 4d8d53ed02c9..8cdf29ffd95f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,6 +32,8 @@
#include <asm/processor.h>
#include <asm/debugreg.h>
#include <asm/user.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
@@ -97,6 +99,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
unsigned long *dr7;
int i;
+ lockdep_assert_irqs_disabled();
+
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
@@ -115,6 +119,12 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 |= encode_dr7(i, info->len, info->type);
+ /*
+ * Ensure we first write cpu_dr7 before we set the DR7 register.
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+ */
+ barrier();
+
set_debugreg(*dr7, 7);
if (info->mask)
set_dr_addr_mask(info->mask, i);
@@ -134,9 +144,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- unsigned long *dr7;
+ unsigned long dr7;
int i;
+ lockdep_assert_irqs_disabled();
+
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
@@ -149,12 +161,20 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;
- dr7 = this_cpu_ptr(&cpu_dr7);
- *dr7 &= ~__encode_dr7(i, info->len, info->type);
+ dr7 = this_cpu_read(cpu_dr7);
+ dr7 &= ~__encode_dr7(i, info->len, info->type);
- set_debugreg(*dr7, 7);
+ set_debugreg(dr7, 7);
if (info->mask)
set_dr_addr_mask(0, i);
+
+ /*
+ * Ensure the write to cpu_dr7 is after we've set the DR7 register.
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+ */
+ barrier();
+
+ this_cpu_write(cpu_dr7, dr7);
}
static int arch_bp_generic_len(int x86_len)
@@ -227,10 +247,76 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
}
+/*
+ * Checks whether the range [addr, end], overlaps the area [base, base + size).
+ */
+static inline bool within_area(unsigned long addr, unsigned long end,
+ unsigned long base, unsigned long size)
+{
+ return end >= base && addr < (base + size);
+}
+
+/*
+ * Checks whether the range from addr to end, inclusive, overlaps the fixed
+ * mapped CPU entry area range or other ranges used for CPU entry.
+ */
+static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
+{
+ int cpu;
+
+ /* CPU entry erea is always used for CPU entry */
+ if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
+ CPU_ENTRY_AREA_TOTAL_SIZE))
+ return true;
+
+ for_each_possible_cpu(cpu) {
+ /* The original rw GDT is being used after load_direct_gdt() */
+ if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
+ GDT_SIZE))
+ return true;
+
+ /*
+ * cpu_tss_rw is not directly referenced by hardware, but
+ * cpu_tss_rw is also used in CPU entry code,
+ */
+ if (within_area(addr, end,
+ (unsigned long)&per_cpu(cpu_tss_rw, cpu),
+ sizeof(struct tss_struct)))
+ return true;
+
+ /*
+ * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
+ * If a data breakpoint on it, it will cause an unwanted #DB.
+ * Protect the full cpu_tlbstate structure to be sure.
+ */
+ if (within_area(addr, end,
+ (unsigned long)&per_cpu(cpu_tlbstate, cpu),
+ sizeof(struct tlb_state)))
+ return true;
+ }
+
+ return false;
+}
+
static int arch_build_bp_info(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
+ unsigned long bp_end;
+
+ bp_end = attr->bp_addr + attr->bp_len - 1;
+ if (bp_end < attr->bp_addr)
+ return -EINVAL;
+
+ /*
+ * Prevent any breakpoint of any type that overlaps the CPU
+ * entry area and data. This protects the IST stacks and also
+ * reduces the chance that we ever find out what happens if
+ * there's a data breakpoint on the GDT, IDT, or TSS.
+ */
+ if (within_cpu_entry(attr->bp_addr, bp_end))
+ return -EINVAL;
+
hw->address = attr->bp_addr;
hw->mask = 0;
@@ -439,7 +525,7 @@ static int hw_breakpoint_handler(struct die_args *args)
{
int i, cpu, rc = NOTIFY_STOP;
struct perf_event *bp;
- unsigned long dr7, dr6;
+ unsigned long dr6;
unsigned long *dr6_p;
/* The DR6 value is pointed by args->err */
@@ -454,9 +540,6 @@ static int hw_breakpoint_handler(struct die_args *args)
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
- get_debugreg(dr7, 7);
- /* Disable breakpoints during exception handling */
- set_debugreg(0UL, 7);
/*
* Assert that local interrupts are disabled
* Reset the DRn bits in the virtualized register value.
@@ -513,7 +596,6 @@ static int hw_breakpoint_handler(struct die_args *args)
(dr6 & (~DR_TRAP_BITS)))
rc = NOTIFY_DONE;
- set_debugreg(dr7, 7);
put_cpu();
return rc;
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 87ef69a72c52..0db21206f2f3 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -4,6 +4,8 @@
*/
#include <linux/interrupt.h>
+#include <asm/cpu_entry_area.h>
+#include <asm/set_memory.h>
#include <asm/traps.h>
#include <asm/proto.h>
#include <asm/desc.h>
@@ -51,15 +53,23 @@ struct idt_data {
#define TSKG(_vector, _gdt) \
G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3)
+#define IDT_TABLE_SIZE (IDT_ENTRIES * sizeof(gate_desc))
+
+static bool idt_setup_done __initdata;
+
/*
* Early traps running on the DEFAULT_STACK because the other interrupt
* stacks work only after cpu_init().
*/
static const __initconst struct idt_data early_idts[] = {
- INTG(X86_TRAP_DB, debug),
- SYSG(X86_TRAP_BP, int3),
+ INTG(X86_TRAP_DB, asm_exc_debug),
+ SYSG(X86_TRAP_BP, asm_exc_int3),
+
#ifdef CONFIG_X86_32
- INTG(X86_TRAP_PF, page_fault),
+ /*
+ * Not possible on 64-bit. See idt_setup_early_pf() for details.
+ */
+ INTG(X86_TRAP_PF, asm_exc_page_fault),
#endif
};
@@ -70,33 +80,33 @@ static const __initconst struct idt_data early_idts[] = {
* set up TSS.
*/
static const __initconst struct idt_data def_idts[] = {
- INTG(X86_TRAP_DE, divide_error),
- INTG(X86_TRAP_NMI, nmi),
- INTG(X86_TRAP_BR, bounds),
- INTG(X86_TRAP_UD, invalid_op),
- INTG(X86_TRAP_NM, device_not_available),
- INTG(X86_TRAP_OLD_MF, coprocessor_segment_overrun),
- INTG(X86_TRAP_TS, invalid_TSS),
- INTG(X86_TRAP_NP, segment_not_present),
- INTG(X86_TRAP_SS, stack_segment),
- INTG(X86_TRAP_GP, general_protection),
- INTG(X86_TRAP_SPURIOUS, spurious_interrupt_bug),
- INTG(X86_TRAP_MF, coprocessor_error),
- INTG(X86_TRAP_AC, alignment_check),
- INTG(X86_TRAP_XF, simd_coprocessor_error),
+ INTG(X86_TRAP_DE, asm_exc_divide_error),
+ INTG(X86_TRAP_NMI, asm_exc_nmi),
+ INTG(X86_TRAP_BR, asm_exc_bounds),
+ INTG(X86_TRAP_UD, asm_exc_invalid_op),
+ INTG(X86_TRAP_NM, asm_exc_device_not_available),
+ INTG(X86_TRAP_OLD_MF, asm_exc_coproc_segment_overrun),
+ INTG(X86_TRAP_TS, asm_exc_invalid_tss),
+ INTG(X86_TRAP_NP, asm_exc_segment_not_present),
+ INTG(X86_TRAP_SS, asm_exc_stack_segment),
+ INTG(X86_TRAP_GP, asm_exc_general_protection),
+ INTG(X86_TRAP_SPURIOUS, asm_exc_spurious_interrupt_bug),
+ INTG(X86_TRAP_MF, asm_exc_coprocessor_error),
+ INTG(X86_TRAP_AC, asm_exc_alignment_check),
+ INTG(X86_TRAP_XF, asm_exc_simd_coprocessor_error),
#ifdef CONFIG_X86_32
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
#else
- INTG(X86_TRAP_DF, double_fault),
+ INTG(X86_TRAP_DF, asm_exc_double_fault),
#endif
- INTG(X86_TRAP_DB, debug),
+ INTG(X86_TRAP_DB, asm_exc_debug),
#ifdef CONFIG_X86_MCE
- INTG(X86_TRAP_MC, &machine_check),
+ INTG(X86_TRAP_MC, asm_exc_machine_check),
#endif
- SYSG(X86_TRAP_OF, overflow),
+ SYSG(X86_TRAP_OF, asm_exc_overflow),
#if defined(CONFIG_IA32_EMULATION)
SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
#elif defined(CONFIG_X86_32)
@@ -109,95 +119,63 @@ static const __initconst struct idt_data def_idts[] = {
*/
static const __initconst struct idt_data apic_idts[] = {
#ifdef CONFIG_SMP
- INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
- INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
- INTG(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt),
- INTG(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt),
- INTG(REBOOT_VECTOR, reboot_interrupt),
+ INTG(RESCHEDULE_VECTOR, asm_sysvec_reschedule_ipi),
+ INTG(CALL_FUNCTION_VECTOR, asm_sysvec_call_function),
+ INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single),
+ INTG(IRQ_MOVE_CLEANUP_VECTOR, asm_sysvec_irq_move_cleanup),
+ INTG(REBOOT_VECTOR, asm_sysvec_reboot),
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
- INTG(THERMAL_APIC_VECTOR, thermal_interrupt),
+ INTG(THERMAL_APIC_VECTOR, asm_sysvec_thermal),
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
- INTG(THRESHOLD_APIC_VECTOR, threshold_interrupt),
+ INTG(THRESHOLD_APIC_VECTOR, asm_sysvec_threshold),
#endif
#ifdef CONFIG_X86_MCE_AMD
- INTG(DEFERRED_ERROR_VECTOR, deferred_error_interrupt),
+ INTG(DEFERRED_ERROR_VECTOR, asm_sysvec_deferred_error),
#endif
#ifdef CONFIG_X86_LOCAL_APIC
- INTG(LOCAL_TIMER_VECTOR, apic_timer_interrupt),
- INTG(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi),
+ INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt),
+ INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi),
# ifdef CONFIG_HAVE_KVM
- INTG(POSTED_INTR_VECTOR, kvm_posted_intr_ipi),
- INTG(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi),
- INTG(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi),
+ INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi),
+ INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi),
+ INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi),
# endif
# ifdef CONFIG_IRQ_WORK
- INTG(IRQ_WORK_VECTOR, irq_work_interrupt),
+ INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work),
# endif
-#ifdef CONFIG_X86_UV
- INTG(UV_BAU_MESSAGE, uv_bau_message_intr1),
-#endif
- INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt),
- INTG(ERROR_APIC_VECTOR, error_interrupt),
+# ifdef CONFIG_X86_UV
+ INTG(UV_BAU_MESSAGE, asm_sysvec_uv_bau_message),
+# endif
+ INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt),
+ INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt),
#endif
};
-#ifdef CONFIG_X86_64
-/*
- * Early traps running on the DEFAULT_STACK because the other interrupt
- * stacks work only after cpu_init().
- */
-static const __initconst struct idt_data early_pf_idts[] = {
- INTG(X86_TRAP_PF, page_fault),
-};
-
-/*
- * Override for the debug_idt. Same as the default, but with interrupt
- * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
- */
-static const __initconst struct idt_data dbg_idts[] = {
- INTG(X86_TRAP_DB, debug),
-};
-#endif
-
-/* Must be page-aligned because the real IDT is used in a fixmap. */
-gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
+/* Must be page-aligned because the real IDT is used in the cpu entry area */
+static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
struct desc_ptr idt_descr __ro_after_init = {
- .size = (IDT_ENTRIES * 2 * sizeof(unsigned long)) - 1,
+ .size = IDT_TABLE_SIZE - 1,
.address = (unsigned long) idt_table,
};
-#ifdef CONFIG_X86_64
-/* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
-
-/*
- * The exceptions which use Interrupt stacks. They are setup after
- * cpu_init() when the TSS has been initialized.
- */
-static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, double_fault, IST_INDEX_DF),
-#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, &machine_check, IST_INDEX_MCE),
-#endif
-};
+void load_current_idt(void)
+{
+ lockdep_assert_irqs_disabled();
+ load_idt(&idt_descr);
+}
-/*
- * Override for the debug_idt. Same as the default, but with interrupt
- * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
- */
-const struct desc_ptr debug_idt_descr = {
- .size = IDT_ENTRIES * 16 - 1,
- .address = (unsigned long) debug_idt_table,
-};
+#ifdef CONFIG_X86_F00F_BUG
+bool idt_is_f00f_address(unsigned long address)
+{
+ return ((address - idt_descr.address) >> 3) == 6;
+}
#endif
static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
@@ -214,7 +192,7 @@ static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
#endif
}
-static void
+static __init void
idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
{
gate_desc desc;
@@ -227,7 +205,7 @@ idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sy
}
}
-static void set_intr_gate(unsigned int n, const void *addr)
+static __init void set_intr_gate(unsigned int n, const void *addr)
{
struct idt_data data;
@@ -266,6 +244,27 @@ void __init idt_setup_traps(void)
}
#ifdef CONFIG_X86_64
+/*
+ * Early traps running on the DEFAULT_STACK because the other interrupt
+ * stacks work only after cpu_init().
+ */
+static const __initconst struct idt_data early_pf_idts[] = {
+ INTG(X86_TRAP_PF, asm_exc_page_fault),
+};
+
+/*
+ * The exceptions which use Interrupt stacks. They are setup after
+ * cpu_init() when the TSS has been initialized.
+ */
+static const __initconst struct idt_data ist_idts[] = {
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
+#ifdef CONFIG_X86_MCE
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+};
+
/**
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
*
@@ -273,8 +272,10 @@ void __init idt_setup_traps(void)
* cpu_init() is invoked and sets up TSS. The IST variant is installed
* after that.
*
- * FIXME: Why is 32bit and 64bit installing the PF handler at different
- * places in the early setup code?
+ * Note, that X86_64 cannot install the real #PF handler in
+ * idt_setup_early_traps() because the memory intialization needs the #PF
+ * handler from the early_idt_handler_array to initialize the early page
+ * tables.
*/
void __init idt_setup_early_pf(void)
{
@@ -289,17 +290,20 @@ void __init idt_setup_ist_traps(void)
{
idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
}
+#endif
-/**
- * idt_setup_debugidt_traps - Initialize the debug idt table with debug traps
- */
-void __init idt_setup_debugidt_traps(void)
+static void __init idt_map_in_cea(void)
{
- memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
-
- idt_setup_from_table(debug_idt_table, dbg_idts, ARRAY_SIZE(dbg_idts), false);
+ /*
+ * Set the IDT descriptor to a fixed read-only location in the cpu
+ * entry area, so that the "sidt" instruction will not leak the
+ * location of the kernel, and to defend the IDT against arbitrary
+ * memory write vulnerabilities.
+ */
+ cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
+ PAGE_KERNEL_RO);
+ idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
}
-#endif
/**
* idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates
@@ -318,11 +322,23 @@ void __init idt_setup_apic_and_irq_gates(void)
#ifdef CONFIG_X86_LOCAL_APIC
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
- set_bit(i, system_vectors);
+ /*
+ * Don't set the non assigned system vectors in the
+ * system_vectors bitmap. Otherwise they show up in
+ * /proc/interrupts.
+ */
entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
set_intr_gate(i, entry);
}
#endif
+ /* Map IDT into CPU entry area and reload it. */
+ idt_map_in_cea();
+ load_idt(&idt_descr);
+
+ /* Make the IDT table read only */
+ set_memory_ro((unsigned long)&idt_table, 1);
+
+ idt_setup_done = true;
}
/**
@@ -352,16 +368,14 @@ void idt_invalidate(void *addr)
load_idt(&idt);
}
-void __init update_intr_gate(unsigned int n, const void *addr)
+void __init alloc_intr_gate(unsigned int n, const void *addr)
{
- if (WARN_ON_ONCE(!test_bit(n, system_vectors)))
+ if (WARN_ON(n < FIRST_SYSTEM_VECTOR))
return;
- set_intr_gate(n, addr);
-}
-void alloc_intr_gate(unsigned int n, const void *addr)
-{
- BUG_ON(n < FIRST_SYSTEM_VECTOR);
- if (!test_and_set_bit(n, system_vectors))
+ if (WARN_ON(idt_setup_done))
+ return;
+
+ if (!WARN_ON(test_and_set_bit(n, system_vectors)))
set_intr_gate(n, addr);
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index c7965ff429c5..181060247e3c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -13,12 +13,14 @@
#include <linux/export.h>
#include <linux/irq.h>
+#include <asm/irq_stack.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/irq.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
#include <asm/desc.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
@@ -26,9 +28,6 @@
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
-DEFINE_PER_CPU(struct pt_regs *, irq_regs);
-EXPORT_PER_CPU_SYMBOL(irq_regs);
-
atomic_t irq_err_count;
/*
@@ -224,35 +223,35 @@ u64 arch_irq_stat(void)
return sum;
}
+static __always_inline void handle_irq(struct irq_desc *desc,
+ struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_X86_64))
+ run_on_irqstack_cond(desc->handle_irq, desc, regs);
+ else
+ __handle_irq(desc, regs);
+}
/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
+ * common_interrupt() handles all normal device IRQ's (the special SMP
+ * cross-CPU interrupts have their own entry points).
*/
-__visible void __irq_entry do_IRQ(struct pt_regs *regs)
+DEFINE_IDTENTRY_IRQ(common_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct irq_desc * desc;
- /* high bit used in ret_from_ code */
- unsigned vector = ~regs->orig_ax;
-
- entering_irq();
+ struct irq_desc *desc;
- /* entering_irq() tells RCU that we're not quiescent. Check it. */
+ /* entry code tells RCU that we're not quiescent. Check it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
desc = __this_cpu_read(vector_irq[vector]);
if (likely(!IS_ERR_OR_NULL(desc))) {
- if (IS_ENABLED(CONFIG_X86_32))
- handle_irq(desc, regs);
- else
- generic_handle_irq_desc(desc);
+ handle_irq(desc, regs);
} else {
ack_APIC_irq();
if (desc == VECTOR_UNUSED) {
- pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
+ pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
__func__, smp_processor_id(),
vector);
} else {
@@ -260,8 +259,6 @@ __visible void __irq_entry do_IRQ(struct pt_regs *regs)
}
}
- exiting_irq();
-
set_irq_regs(old_regs);
}
@@ -271,17 +268,16 @@ void (*x86_platform_ipi_callback)(void) = NULL;
/*
* Handler for X86_PLATFORM_IPI_VECTOR.
*/
-__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_ack_irq();
+ ack_APIC_irq();
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
x86_platform_ipi_callback();
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
- exiting_irq();
set_irq_regs(old_regs);
}
#endif
@@ -302,41 +298,29 @@ EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
/*
* Handler for POSTED_INTERRUPT_VECTOR.
*/
-__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_ipis);
- exiting_irq();
- set_irq_regs(old_regs);
}
/*
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
*/
-__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_wakeup_ipis);
kvm_posted_intr_wakeup_handler();
- exiting_irq();
- set_irq_regs(old_regs);
}
/*
* Handler for POSTED_INTERRUPT_NESTED_VECTOR.
*/
-__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_nested_ipis);
- exiting_irq();
- set_irq_regs(old_regs);
}
#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a759ca97cd01..0b79efc87be5 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -148,7 +148,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
-void handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
int overflow = check_stack_overflow();
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6b32ab009c19..1b4fe93a86c5 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -20,6 +20,7 @@
#include <linux/sched/task_stack.h>
#include <asm/cpu_entry_area.h>
+#include <asm/irq_stack.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -70,3 +71,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return 0;
return map_irq_stack(cpu);
}
+
+void do_softirq_own_stack(void)
+{
+ run_on_irqstack_cond(__do_softirq, NULL, NULL);
+}
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 80bee7695a20..890d4778cd35 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -9,18 +9,18 @@
#include <linux/irq_work.h>
#include <linux/hardirq.h>
#include <asm/apic.h>
+#include <asm/idtentry.h>
#include <asm/trace/irq_vectors.h>
#include <linux/interrupt.h>
#ifdef CONFIG_X86_LOCAL_APIC
-__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_irq_work_entry(IRQ_WORK_VECTOR);
inc_irq_stat(apic_irq_work_irqs);
irq_work_run();
trace_irq_work_exit(IRQ_WORK_VECTOR);
- exiting_irq();
}
void arch_irq_work_raise(void)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 85de8fa69b24..3bafe1bd4dc7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1073,13 +1073,6 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
int __init arch_populate_kprobe_blacklist(void)
{
- int ret;
-
- ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
- (unsigned long)__irqentry_text_end);
- if (ret)
- return ret;
-
return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
(unsigned long)__entry_text_end);
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 234f58e0fe8c..321c19950285 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -286,9 +286,7 @@ static int can_optimize(unsigned long paddr)
* stack handling and registers setup.
*/
if (((paddr >= (unsigned long)__entry_text_start) &&
- (paddr < (unsigned long)__entry_text_end)) ||
- ((paddr >= (unsigned long)__irqentry_text_start) &&
- (paddr < (unsigned long)__irqentry_text_end)))
+ (paddr < (unsigned long)__entry_text_end)))
return 0;
/* Check there is enough space for a relative jump. */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d6f22a3a1f7d..df63786e7bfa 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
-#include <linux/debugfs.h>
#include <linux/nmi.h>
#include <linux/swait.h>
#include <asm/timer.h>
@@ -218,7 +217,7 @@ again:
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
-u32 kvm_read_and_reset_apf_flags(void)
+noinstr u32 kvm_read_and_reset_apf_flags(void)
{
u32 flags = 0;
@@ -230,11 +229,11 @@ u32 kvm_read_and_reset_apf_flags(void)
return flags;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
-NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
-bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
u32 reason = kvm_read_and_reset_apf_flags();
+ bool rcu_exit;
switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -244,6 +243,9 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false;
}
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ instrumentation_begin();
+
/*
* If the host managed to inject an async #PF into an interrupt
* disabled region, then die hard as this is not going to end well
@@ -258,13 +260,13 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
/* Page is swapped out by the host. */
kvm_async_pf_task_wait_schedule(token);
} else {
- rcu_irq_enter();
kvm_async_pf_task_wake(token);
- rcu_irq_exit();
}
+
+ instrumentation_end();
+ idtentry_exit_cond_rcu(regs, rcu_exit);
return true;
}
-NOKPROBE_SYMBOL(__kvm_handle_async_pf);
static void __init paravirt_ops_setup(void)
{
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index bdcc5146de96..2de365f15684 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -303,7 +303,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error);
static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
-static void default_do_nmi(struct pt_regs *regs)
+static noinstr void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int handled;
@@ -329,6 +329,9 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_write(last_nmi_rip, regs->ip);
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -342,7 +345,7 @@ static void default_do_nmi(struct pt_regs *regs)
*/
if (handled > 1)
__this_cpu_write(swallow_nmi, true);
- return;
+ goto out;
}
/*
@@ -374,7 +377,7 @@ static void default_do_nmi(struct pt_regs *regs)
#endif
__this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
- return;
+ goto out;
}
raw_spin_unlock(&nmi_reason_lock);
@@ -412,8 +415,12 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_add(nmi_stats.swallow, 1);
else
unknown_nmi_error(reason, regs);
+
+out:
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
}
-NOKPROBE_SYMBOL(default_do_nmi);
/*
* NMIs can page fault or hit breakpoints which will cause it to lose
@@ -467,44 +474,9 @@ enum nmi_states {
};
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2);
+static DEFINE_PER_CPU(unsigned long, nmi_dr7);
-#ifdef CONFIG_X86_64
-/*
- * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
- * some care, the inner breakpoint will clobber the outer breakpoint's
- * stack.
- *
- * If a breakpoint is being processed, and the debug stack is being
- * used, if an NMI comes in and also hits a breakpoint, the stack
- * pointer will be set to the same fixed address as the breakpoint that
- * was interrupted, causing that stack to be corrupted. To handle this
- * case, check if the stack that was interrupted is the debug stack, and
- * if so, change the IDT so that new breakpoints will use the current
- * stack and not switch to the fixed address. On return of the NMI,
- * switch back to the original IDT.
- */
-static DEFINE_PER_CPU(int, update_debug_stack);
-
-static bool notrace is_debug_stack(unsigned long addr)
-{
- struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
- unsigned long top = CEA_ESTACK_TOP(cs, DB);
- unsigned long bot = CEA_ESTACK_BOT(cs, DB1);
-
- if (__this_cpu_read(debug_stack_usage))
- return true;
- /*
- * Note, this covers the guard page between DB and DB1 as well to
- * avoid two checks. But by all means @addr can never point into
- * the guard page.
- */
- return addr >= bot && addr < top;
-}
-NOKPROBE_SYMBOL(is_debug_stack);
-#endif
-
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_RAW(exc_nmi)
{
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
return;
@@ -517,18 +489,7 @@ do_nmi(struct pt_regs *regs, long error_code)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
-#ifdef CONFIG_X86_64
- /*
- * If we interrupted a breakpoint, it is possible that
- * the nmi handler will have breakpoints too. We need to
- * change the IDT such that breakpoints that happen here
- * continue to use the NMI stack.
- */
- if (unlikely(is_debug_stack(regs->sp))) {
- debug_stack_set_zero();
- this_cpu_write(update_debug_stack, 1);
- }
-#endif
+ this_cpu_write(nmi_dr7, local_db_save());
nmi_enter();
@@ -539,12 +500,7 @@ nmi_restart:
nmi_exit();
-#ifdef CONFIG_X86_64
- if (unlikely(this_cpu_read(update_debug_stack))) {
- debug_stack_reset();
- this_cpu_write(update_debug_stack, 0);
- }
-#endif
+ local_db_restore(this_cpu_read(nmi_dr7));
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
@@ -554,7 +510,6 @@ nmi_restart:
if (user_mode(regs))
mds_user_clear_cpu_buffers();
}
-NOKPROBE_SYMBOL(do_nmi);
void stop_nmi(void)
{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8e3d0347b664..f362ce0d5ac0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
- /*
- * If TIF_SSBD is different, select the proper mitigation
- * method. Note that if SSBD mitigation is disabled or permanentely
- * enabled this branch can't be taken because nothing can set
- * TIF_SSBD.
- */
- if (tif_diff & _TIF_SSBD) {
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ /* Handle change of TIF_SSBD depending on the mitigation method. */
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- msr |= ssbd_tif_to_spec_ctrl(tifn);
- updmsr = true;
- }
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ updmsr |= !!(tif_diff & _TIF_SSBD);
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
}
- /*
- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
- * otherwise avoid the MSR write.
- */
+ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e040ba6be27b..0ec7ced727fe 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
+ { /* Handle problems with rebooting on Apple MacBook6,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ },
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index b8d4e9c3c070..eff4ce3b10da 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apic.h>
+#include <asm/idtentry.h>
#include <asm/nmi.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
@@ -130,13 +131,11 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
-
-asmlinkage __visible void smp_reboot_interrupt(void)
+DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
cpu_emergency_vmxoff();
stop_this_cpu(NULL);
- irq_exit();
}
static int register_stop_handler(void)
@@ -221,47 +220,33 @@ static void native_stop_other_cpus(int wait)
/*
* Reschedule call back. KVM uses this interrupt to force a cpu out of
- * guest mode
+ * guest mode.
*/
-__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
{
ack_APIC_irq();
+ trace_reschedule_entry(RESCHEDULE_VECTOR);
inc_irq_stat(irq_resched_count);
- kvm_set_cpu_l1tf_flush_l1d();
-
- if (trace_resched_ipi_enabled()) {
- /*
- * scheduler_ipi() might call irq_enter() as well, but
- * nested calls are fine.
- */
- irq_enter();
- trace_reschedule_entry(RESCHEDULE_VECTOR);
- scheduler_ipi();
- trace_reschedule_exit(RESCHEDULE_VECTOR);
- irq_exit();
- return;
- }
scheduler_ipi();
+ trace_reschedule_exit(RESCHEDULE_VECTOR);
}
-__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
inc_irq_stat(irq_call_count);
generic_smp_call_function_interrupt();
trace_call_function_exit(CALL_FUNCTION_VECTOR);
- exiting_irq();
}
-__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
inc_irq_stat(irq_call_count);
generic_smp_call_function_single_interrupt();
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
- exiting_irq();
}
static int __init nonmi_ipi_setup(char *str)
diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c
index ab03fede1422..f8d65c99feb8 100644
--- a/arch/x86/kernel/sys_ia32.c
+++ b/arch/x86/kernel/sys_ia32.c
@@ -135,26 +135,30 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
typeof(ubuf->st_gid) gid = 0;
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
- if (!access_ok(ubuf, sizeof(struct stat64)) ||
- __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
- __put_user(stat->ino, &ubuf->__st_ino) ||
- __put_user(stat->ino, &ubuf->st_ino) ||
- __put_user(stat->mode, &ubuf->st_mode) ||
- __put_user(stat->nlink, &ubuf->st_nlink) ||
- __put_user(uid, &ubuf->st_uid) ||
- __put_user(gid, &ubuf->st_gid) ||
- __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
- __put_user(stat->size, &ubuf->st_size) ||
- __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
- __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
- __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
- __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
- __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
- __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
- __put_user(stat->blksize, &ubuf->st_blksize) ||
- __put_user(stat->blocks, &ubuf->st_blocks))
+ if (!user_write_access_begin(ubuf, sizeof(struct stat64)))
return -EFAULT;
+ unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault);
+ unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault);
+ unsafe_put_user(stat->ino, &ubuf->st_ino, Efault);
+ unsafe_put_user(stat->mode, &ubuf->st_mode, Efault);
+ unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault);
+ unsafe_put_user(uid, &ubuf->st_uid, Efault);
+ unsafe_put_user(gid, &ubuf->st_gid, Efault);
+ unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault);
+ unsafe_put_user(stat->size, &ubuf->st_size, Efault);
+ unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault);
+ unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault);
+ unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault);
+ unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault);
+ unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault);
+ unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault);
+ unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault);
+ unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault);
+ user_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename,
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 371a6b348e44..e42faa792c07 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -25,10 +25,6 @@
#include <asm/hpet.h>
#include <asm/time.h>
-#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-#endif
-
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 496748ed266a..fcfc077afe2d 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -25,20 +25,3 @@ void trace_pagefault_unreg(void)
{
static_branch_dec(&trace_pagefault_key);
}
-
-#ifdef CONFIG_SMP
-
-DEFINE_STATIC_KEY_FALSE(trace_resched_ipi_key);
-
-int trace_resched_ipi_reg(void)
-{
- static_branch_inc(&trace_resched_ipi_key);
- return 0;
-}
-
-void trace_resched_ipi_unreg(void)
-{
- static_branch_dec(&trace_resched_ipi_key);
-}
-
-#endif
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4cc541051994..af75109485c2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -97,24 +97,6 @@ int is_valid_bugaddr(unsigned long addr)
return ud == INSN_UD0 || ud == INSN_UD2;
}
-int fixup_bug(struct pt_regs *regs, int trapnr)
-{
- if (trapnr != X86_TRAP_UD)
- return 0;
-
- switch (report_bug(regs->ip, regs)) {
- case BUG_TRAP_TYPE_NONE:
- case BUG_TRAP_TYPE_BUG:
- break;
-
- case BUG_TRAP_TYPE_WARN:
- regs->ip += LEN_UD2;
- return 1;
- }
-
- return 0;
-}
-
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
struct pt_regs *regs, long error_code)
@@ -145,7 +127,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
* process no chance to handle the signal and notice the
* kernel fault information, so that won't result in polluting
* the information about previously queued, but not yet
- * delivered, faults. See also do_general_protection below.
+ * delivered, faults. See also exc_general_protection below.
*/
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
@@ -190,41 +172,119 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
{
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- /*
- * WARN*()s end up here; fix them up before we call the
- * notifier chain.
- */
- if (!user_mode(regs) && fixup_bug(regs, trapnr))
- return;
-
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
+ cond_local_irq_disable(regs);
}
}
-#define IP ((void __user *)uprobe_get_trap_addr(regs))
-#define DO_ERROR(trapnr, signr, sicode, addr, str, name) \
-dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
-{ \
- do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \
+/*
+ * Posix requires to provide the address of the faulting instruction for
+ * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
+ *
+ * This address is usually regs->ip, but when an uprobe moved the code out
+ * of line then regs->ip points to the XOL code which would confuse
+ * anything which analyzes the fault address vs. the unmodified binary. If
+ * a trap happened in XOL code then uprobe maps regs->ip back to the
+ * original instruction address.
+ */
+static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
+{
+ return (void __user *)uprobe_get_trap_addr(regs);
}
-DO_ERROR(X86_TRAP_DE, SIGFPE, FPE_INTDIV, IP, "divide error", divide_error)
-DO_ERROR(X86_TRAP_OF, SIGSEGV, 0, NULL, "overflow", overflow)
-DO_ERROR(X86_TRAP_UD, SIGILL, ILL_ILLOPN, IP, "invalid opcode", invalid_op)
-DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, 0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(X86_TRAP_TS, SIGSEGV, 0, NULL, "invalid TSS", invalid_TSS)
-DO_ERROR(X86_TRAP_NP, SIGBUS, 0, NULL, "segment not present", segment_not_present)
-DO_ERROR(X86_TRAP_SS, SIGBUS, 0, NULL, "stack segment", stack_segment)
-#undef IP
+DEFINE_IDTENTRY(exc_divide_error)
+{
+ do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
+ FPE_INTDIV, error_get_trap_addr(regs));
+}
-dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_overflow)
{
- char *str = "alignment check";
+ do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
+}
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+#ifdef CONFIG_X86_F00F_BUG
+void handle_invalid_op(struct pt_regs *regs)
+#else
+static inline void handle_invalid_op(struct pt_regs *regs)
+#endif
+{
+ do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
+ ILL_ILLOPN, error_get_trap_addr(regs));
+}
+
+DEFINE_IDTENTRY_RAW(exc_invalid_op)
+{
+ bool rcu_exit;
+
+ /*
+ * Handle BUG/WARN like NMIs instead of like normal idtentries:
+ * if we bugged/warned in a bad RCU context, for example, the last
+ * thing we want is to BUG/WARN again in the idtentry code, ad
+ * infinitum.
+ */
+ if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
+ enum bug_trap_type type;
+
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ type = report_bug(regs->ip, regs);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
+ nmi_exit();
+
+ if (type == BUG_TRAP_TYPE_WARN) {
+ /* Skip the ud2. */
+ regs->ip += LEN_UD2;
+ return;
+ }
+
+ /*
+ * Else, if this was a BUG and report_bug returns or if this
+ * was just a normal #UD, we want to continue onward and
+ * crash.
+ */
+ }
+
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ instrumentation_begin();
+ handle_invalid_op(regs);
+ instrumentation_end();
+ idtentry_exit_cond_rcu(regs, rcu_exit);
+}
+
+DEFINE_IDTENTRY(exc_coproc_segment_overrun)
+{
+ do_error_trap(regs, 0, "coprocessor segment overrun",
+ X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
+{
+ do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
+ 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
+{
+ do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
+ SIGBUS, 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
+{
+ do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
+ 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
+{
+ char *str = "alignment check";
if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
return;
@@ -271,12 +331,19 @@ __visible void __noreturn handle_stack_overflow(const char *message,
* from the TSS. Returning is, in principle, okay, but changes to regs will
* be lost. If, for some reason, we need to return to a context with modified
* regs, the shim code could be adjusted to synchronize the registers.
+ *
+ * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
+ * to be read before doing anything else.
*/
-dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2)
+DEFINE_IDTENTRY_DF(exc_double_fault)
{
static const char str[] = "double fault";
struct task_struct *tsk = current;
+#ifdef CONFIG_VMAP_STACK
+ unsigned long address = read_cr2();
+#endif
+
#ifdef CONFIG_X86_ESPFIX64
extern unsigned char native_irq_return_iret[];
@@ -299,6 +366,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
regs->ip == (unsigned long)native_irq_return_iret)
{
struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ unsigned long *p = (unsigned long *)regs->sp;
/*
* regs->sp points to the failing IRET frame on the
@@ -306,7 +374,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* in gpregs->ss through gpregs->ip.
*
*/
- memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+ gpregs->ip = p[0];
+ gpregs->cs = p[1];
+ gpregs->flags = p[2];
+ gpregs->sp = p[3];
+ gpregs->ss = p[4];
gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
/*
@@ -320,7 +392,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* which is what the stub expects, given that the faulting
* RIP will be the IRET instruction.
*/
- regs->ip = (unsigned long)general_protection;
+ regs->ip = (unsigned long)asm_exc_general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
return;
@@ -328,6 +400,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
#endif
nmi_enter();
+ instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
tsk->thread.error_code = error_code;
@@ -371,27 +444,31 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* stack even if the actual trigger for the double fault was
* something else.
*/
- if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
- handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
+ if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
+ handle_stack_overflow("kernel stack overflow (double-fault)",
+ regs, address);
+ }
#endif
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
die("double fault", regs, error_code);
panic("Machine halted.");
+ instrumentation_end();
}
-dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_bounds)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- if (notify_die(DIE_TRAP, "bounds", regs, error_code,
+ if (notify_die(DIE_TRAP, "bounds", regs, 0,
X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
return;
cond_local_irq_enable(regs);
if (!user_mode(regs))
- die("bounds", regs, error_code);
+ die("bounds", regs, 0);
+
+ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
- do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
+ cond_local_irq_disable(regs);
}
enum kernel_gp_hint {
@@ -438,7 +515,7 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
#define GPFSTR "general protection fault"
-dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
{
char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
enum kernel_gp_hint hint = GP_NO_HINT;
@@ -446,17 +523,17 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
unsigned long gp_addr;
int ret;
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
cond_local_irq_enable(regs);
if (static_cpu_has(X86_FEATURE_UMIP)) {
if (user_mode(regs) && fixup_umip_exception(regs))
- return;
+ goto exit;
}
if (v8086_mode(regs)) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+ local_irq_disable();
return;
}
@@ -468,12 +545,11 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
force_sig(SIGSEGV);
-
- return;
+ goto exit;
}
if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
- return;
+ goto exit;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
@@ -485,11 +561,11 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
if (!preemptible() &&
kprobe_running() &&
kprobe_fault_handler(regs, X86_TRAP_GP))
- return;
+ goto exit;
ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
if (ret == NOTIFY_STOP)
- return;
+ goto exit;
if (error_code)
snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
@@ -511,47 +587,74 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
die_addr(desc, regs, error_code, gp_addr);
+exit:
+ cond_local_irq_disable(regs);
}
-NOKPROBE_SYMBOL(do_general_protection);
-dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
+static bool do_int3(struct pt_regs *regs)
{
- if (poke_int3_handler(regs))
- return;
-
- /*
- * Unlike any other non-IST entry, we can be called from pretty much
- * any location in the kernel through kprobes -- text_poke() will most
- * likely be handled by poke_int3_handler() above. This means this
- * handler is effectively NMI-like.
- */
- if (!user_mode(regs))
- nmi_enter();
+ int res;
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
+ SIGTRAP) == NOTIFY_STOP)
+ return true;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (kprobe_int3_handler(regs))
- goto exit;
+ return true;
#endif
+ res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
- if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
+ return res == NOTIFY_STOP;
+}
+
+static void do_int3_user(struct pt_regs *regs)
+{
+ if (do_int3(regs))
+ return;
cond_local_irq_enable(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
cond_local_irq_disable(regs);
+}
-exit:
- if (!user_mode(regs))
+DEFINE_IDTENTRY_RAW(exc_int3)
+{
+ /*
+ * poke_int3_handler() is completely self contained code; it does (and
+ * must) *NOT* call out to anything, lest it hits upon yet another
+ * INT3.
+ */
+ if (poke_int3_handler(regs))
+ return;
+
+ /*
+ * idtentry_enter_user() uses static_branch_{,un}likely() and therefore
+ * can trigger INT3, hence poke_int3_handler() must be done
+ * before. If the entry came from kernel mode, then use nmi_enter()
+ * because the INT3 could have been hit in any context including
+ * NMI.
+ */
+ if (user_mode(regs)) {
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+ do_int3_user(regs);
+ instrumentation_end();
+ idtentry_exit_user(regs);
+ } else {
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ if (!do_int3(regs))
+ die("int3", regs, 0);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
nmi_exit();
+ }
}
-NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64
/*
@@ -559,21 +662,20 @@ NOKPROBE_SYMBOL(do_int3);
* to switch to the normal thread stack if the interrupted code was in
* user mode. The actual stack switch is done in entry_64.S
*/
-asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
if (regs != eregs)
*regs = *eregs;
return regs;
}
-NOKPROBE_SYMBOL(sync_regs);
struct bad_iret_stack {
void *error_entry_ret;
struct pt_regs regs;
};
-asmlinkage __visible notrace
+asmlinkage __visible noinstr
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
{
/*
@@ -584,19 +686,21 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
* just below the IRET frame) and we want to pretend that the
* exception came from the IRET target.
*/
- struct bad_iret_stack *new_stack =
- (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ struct bad_iret_stack tmp, *new_stack =
+ (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
- /* Copy the IRET target to the new stack. */
- memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+ /* Copy the IRET target to the temporary storage. */
+ memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
/* Copy the remainder of the stack from the current stack. */
- memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+ memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
+
+ /* Update the entry stack */
+ memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs));
return new_stack;
}
-NOKPROBE_SYMBOL(fixup_bad_iret);
#endif
static bool is_sysenter_singlestep(struct pt_regs *regs)
@@ -622,6 +726,43 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
+static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ *dr7 = local_db_save();
+
+ /*
+ * The Intel SDM says:
+ *
+ * Certain debug exceptions may clear bits 0-3. The remaining
+ * contents of the DR6 register are never cleared by the
+ * processor. To avoid confusion in identifying debug
+ * exceptions, debug handlers should clear the register before
+ * returning to the interrupted task.
+ *
+ * Keep it simple: clear DR6 immediately.
+ */
+ get_debugreg(*dr6, 6);
+ set_debugreg(0, 6);
+ /* Filter out all the reserved bits which are preset to 1 */
+ *dr6 &= ~DR6_RESERVED;
+}
+
+static __always_inline void debug_exit(unsigned long dr7)
+{
+ local_db_restore(dr7);
+}
+
/*
* Our handling of the processor debug registers is non-trivial.
* We do not clear them on entry and exit from the kernel. Therefore
@@ -646,86 +787,54 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
*
* May run on IST stack.
*/
-dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
+static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user)
{
struct task_struct *tsk = current;
- int user_icebp = 0;
- unsigned long dr6;
+ bool user_icebp;
int si_code;
- nmi_enter();
-
- get_debugreg(dr6, 6);
- /*
- * The Intel SDM says:
- *
- * Certain debug exceptions may clear bits 0-3. The remaining
- * contents of the DR6 register are never cleared by the
- * processor. To avoid confusion in identifying debug
- * exceptions, debug handlers should clear the register before
- * returning to the interrupted task.
- *
- * Keep it simple: clear DR6 immediately.
- */
- set_debugreg(0, 6);
-
- /* Filter out all the reserved bits which are preset to 1 */
- dr6 &= ~DR6_RESERVED;
-
/*
* The SDM says "The processor clears the BTF flag when it
* generates a debug exception." Clear TIF_BLOCKSTEP to keep
* TIF_BLOCKSTEP in sync with the hardware BTF flag.
*/
- clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
+ clear_thread_flag(TIF_BLOCKSTEP);
- if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
- is_sysenter_singlestep(regs))) {
- dr6 &= ~DR_STEP;
- if (!dr6)
- goto exit;
- /*
- * else we might have gotten a single-step trap and hit a
- * watchpoint at the same time, in which case we should fall
- * through and handle the watchpoint.
- */
- }
+ /*
+ * If DR6 is zero, no point in trying to handle it. The kernel is
+ * not using INT1.
+ */
+ if (!user && !dr6)
+ return;
/*
* If dr6 has no reason to give us about the origin of this trap,
* then it's very likely the result of an icebp/int01 trap.
* User wants a sigtrap for that.
*/
- if (!dr6 && user_mode(regs))
- user_icebp = 1;
+ user_icebp = user && !dr6;
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
#ifdef CONFIG_KPROBES
- if (kprobe_debug_handler(regs))
- goto exit;
+ if (kprobe_debug_handler(regs)) {
+ return;
+ }
#endif
- if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
-
- /*
- * Let others (NMI) know that the debug stack is in use
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
+ if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
+ SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
/* It's safe to allow irq's after DR6 has been saved */
cond_local_irq_enable(regs);
if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
- cond_local_irq_disable(regs);
- debug_stack_usage_dec();
- goto exit;
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, 0,
+ X86_TRAP_DB);
+ goto out;
}
if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
@@ -739,23 +848,91 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
}
+
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(regs, error_code, si_code);
+ send_sigtrap(regs, 0, si_code);
+
+out:
cond_local_irq_disable(regs);
- debug_stack_usage_dec();
+}
-exit:
+static __always_inline void exc_debug_kernel(struct pt_regs *regs,
+ unsigned long dr6)
+{
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+
+ /*
+ * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
+ * watchpoint at the same time then that will still be handled.
+ */
+ if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
+ dr6 &= ~DR_STEP;
+
+ handle_debug(regs, dr6, false);
+
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
nmi_exit();
}
-NOKPROBE_SYMBOL(do_debug);
+
+static __always_inline void exc_debug_user(struct pt_regs *regs,
+ unsigned long dr6)
+{
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+
+ handle_debug(regs, dr6, true);
+ instrumentation_end();
+ idtentry_exit_user(regs);
+}
+
+#ifdef CONFIG_X86_64
+/* IST stack entry */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+ exc_debug_kernel(regs, dr6);
+ debug_exit(dr7);
+}
+
+/* User entry, runs on regular task stack */
+DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+ exc_debug_user(regs, dr6);
+ debug_exit(dr7);
+}
+#else
+/* 32 bit does not have separate entry points. */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+
+ if (user_mode(regs))
+ exc_debug_user(regs, dr6);
+ else
+ exc_debug_kernel(regs, dr6);
+
+ debug_exit(dr7);
+}
+#endif
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-static void math_error(struct pt_regs *regs, int error_code, int trapnr)
+static void math_error(struct pt_regs *regs, int trapnr)
{
struct task_struct *task = current;
struct fpu *fpu = &task->thread.fpu;
@@ -766,16 +943,16 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (fixup_exception(regs, trapnr, error_code, 0))
- return;
+ if (fixup_exception(regs, trapnr, 0, 0))
+ goto exit;
- task->thread.error_code = error_code;
+ task->thread.error_code = 0;
task->thread.trap_nr = trapnr;
- if (notify_die(DIE_TRAP, str, regs, error_code,
- trapnr, SIGFPE) != NOTIFY_STOP)
- die(str, regs, error_code);
- return;
+ if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
+ SIGFPE) != NOTIFY_STOP)
+ die(str, regs, 0);
+ goto exit;
}
/*
@@ -784,32 +961,37 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
fpu__save(fpu);
task->thread.trap_nr = trapnr;
- task->thread.error_code = error_code;
+ task->thread.error_code = 0;
si_code = fpu__exception_code(fpu, trapnr);
/* Retry when we get spurious exceptions: */
if (!si_code)
- return;
+ goto exit;
force_sig_fault(SIGFPE, si_code,
(void __user *)uprobe_get_trap_addr(regs));
+exit:
+ cond_local_irq_disable(regs);
}
-dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_coprocessor_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- math_error(regs, error_code, X86_TRAP_MF);
+ math_error(regs, X86_TRAP_MF);
}
-dotraplinkage void
-do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_simd_coprocessor_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- math_error(regs, error_code, X86_TRAP_XF);
+ if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
+ /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
+ if (!static_cpu_has(X86_FEATURE_XMM)) {
+ __exc_general_protection(regs, 0);
+ return;
+ }
+ }
+ math_error(regs, X86_TRAP_XF);
}
-dotraplinkage void
-do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
{
/*
* This addresses a Pentium Pro Erratum:
@@ -832,13 +1014,10 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
*/
}
-dotraplinkage void
-do_device_not_available(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_device_not_available)
{
unsigned long cr0 = read_cr0();
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-
#ifdef CONFIG_MATH_EMULATION
if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
struct math_emu_info info = { };
@@ -847,6 +1026,8 @@ do_device_not_available(struct pt_regs *regs, long error_code)
info.regs = regs;
math_emulate(&info);
+
+ cond_local_irq_disable(regs);
return;
}
#endif
@@ -861,22 +1042,20 @@ do_device_not_available(struct pt_regs *regs, long error_code)
* to kill the task than getting stuck in a never-ending
* loop of #NM faults.
*/
- die("unexpected #NM exception", regs, error_code);
+ die("unexpected #NM exception", regs, 0);
}
}
-NOKPROBE_SYMBOL(do_device_not_available);
#ifdef CONFIG_X86_32
-dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_SW(iret_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
local_irq_enable();
-
- if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
+ if (notify_die(DIE_TRAP, "iret exception", regs, 0,
X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
- do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
+ do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
ILL_BADSTK, (void __user *)NULL);
}
+ local_irq_disable();
}
#endif
@@ -888,20 +1067,9 @@ void __init trap_init(void)
idt_setup_traps();
/*
- * Set the IDT descriptor to a fixed read-only location, so that the
- * "sidt" instruction will not leak the location of the kernel, and
- * to defend the IDT against arbitrary memory write vulnerabilities.
- * It will be reloaded in cpu_init() */
- cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
- PAGE_KERNEL_RO);
- idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
-
- /*
* Should be a barrier for any external CPU state:
*/
cpu_init();
idt_setup_ist_traps();
-
- idt_setup_debugidt_traps();
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 54226110bc7f..722a85f3b2dd 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -74,13 +74,7 @@ static bool in_entry_code(unsigned long ip)
{
char *addr = (char *)ip;
- if (addr >= __entry_text_start && addr < __entry_text_end)
- return true;
-
- if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
- return true;
-
- return false;
+ return addr >= __entry_text_start && addr < __entry_text_end;
}
static inline unsigned long *last_frame(struct unwind_state *state)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1bf7e312361f..3bfc8dd8a43d 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
-jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
-jiffies_64 = jiffies;
#endif
+jiffies = jiffies_64;
+
#if defined(CONFIG_X86_64)
/*
* On 64-bit, align RODATA to 2MB so we retain large page mappings for
@@ -134,7 +134,6 @@ SECTIONS
KPROBES_TEXT
ALIGN_ENTRY_TEXT_BEGIN
ENTRY_TEXT
- IRQENTRY_TEXT
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
*(.fixup)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 253b8e875ccd..8a294f9747aa 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -181,17 +181,14 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
- r = -ENOMEM;
if (cpuid->nent) {
- cpuid_entries =
- vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
- cpuid->nent));
- if (!cpuid_entries)
- goto out;
- r = -EFAULT;
- if (copy_from_user(cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+ cpuid_entries = vmemdup_user(entries,
+ array_size(sizeof(struct kvm_cpuid_entry),
+ cpuid->nent));
+ if (IS_ERR(cpuid_entries)) {
+ r = PTR_ERR(cpuid_entries);
goto out;
+ }
}
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
@@ -211,8 +208,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
+ kvfree(cpuid_entries);
out:
- vfree(cpuid_entries);
return r;
}
@@ -325,7 +322,7 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_7_ECX,
- F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
+ F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
@@ -334,6 +331,13 @@ void kvm_set_cpu_caps(void)
if (cpuid_ecx(7) & F(LA57))
kvm_cpu_cap_set(X86_FEATURE_LA57);
+ /*
+ * PKU not yet implemented for shadow paging and requires OSPKE
+ * to be set on the host. Clear it if that is not the case
+ */
+ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+ kvm_cpu_cap_clear(X86_FEATURE_PKU);
+
kvm_cpu_cap_mask(CPUID_7_EDX,
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
@@ -426,7 +430,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
struct kvm_cpuid_array {
struct kvm_cpuid_entry2 *entries;
- const int maxnent;
+ int maxnent;
int nent;
};
@@ -870,7 +874,6 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_array array = {
.nent = 0,
- .maxnent = cpuid->nent,
};
int r, i;
@@ -887,6 +890,8 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
if (!array.entries)
return -ENOMEM;
+ array.maxnent = cpuid->nent;
+
for (i = 0; i < ARRAY_SIZE(funcs); i++) {
r = get_cpuid_func(&array, funcs[i], type);
if (r)
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 018aebce33ff..7e818d64bb4d 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -43,22 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
-void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
- debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
+ debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
if (lapic_in_kernel(vcpu))
debugfs_create_file("lapic_timer_advance_ns", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_timer_advance_ns_fops);
if (kvm_has_tsc_control) {
debugfs_create_file("tsc-scaling-ratio", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_tsc_scaling_fops);
debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_tsc_scaling_frac_fops);
}
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index de5476f8683e..d0e2825ae617 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4800,8 +4800,12 @@ static const struct opcode twobyte_table[256] = {
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
N, N, N, N, N, N,
- D(ImplicitOps | ModRM | SrcMem | NoAccess),
- N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238b78e069fe..af9cdb426dd2 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1252,7 +1252,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* only, there can be valuable data in the rest which needs
* to be preserved e.g. on migration.
*/
- if (__clear_user((void __user *)addr, sizeof(u32)))
+ if (__put_user(0, (u32 __user *)addr))
return 1;
hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index febca334c320..a6e218c6140d 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -462,7 +462,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
if (channel == 3) {
/* Read-Back Command. */
for (channel = 0; channel < 3; channel++) {
- s = &pit_state->channels[channel];
if (val & (2 << channel)) {
if (!(val & 0x20))
pit_latch_count(pit, channel);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 8a6db11dcb43..6bceafb19108 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -258,7 +258,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm)
/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
- is_intercept(svm, SVM_EXIT_VINTR)) {
+ is_intercept(svm, INTERCEPT_VINTR)) {
/*
* In order to request an interrupt window, L0 is usurping
* svm->vmcb->control.int_ctl and possibly setting V_IRQ
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9e333b91ff78..8ccfa4197d9c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1378,6 +1378,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
/* Drop int_ctl fields related to VINTR injection. */
svm->vmcb->control.int_ctl &= mask;
if (is_guest_mode(&svm->vcpu)) {
+ svm->nested.hsave->control.int_ctl &= mask;
+
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
(svm->nested.ctl.int_ctl & V_TPR_MASK));
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
@@ -1837,7 +1839,7 @@ static void kvm_machine_check(void)
.flags = X86_EFLAGS_IF,
};
- do_machine_check(&regs, 0);
+ do_machine_check(&regs);
#endif
}
@@ -1999,7 +2001,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
*/
if (vgif_enabled(svm))
clr_intercept(svm, INTERCEPT_STGI);
- if (is_intercept(svm, SVM_EXIT_VINTR))
+ if (is_intercept(svm, INTERCEPT_VINTR))
svm_clear_vintr(svm);
enable_gif(svm);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9c74a732b08d..d1af20b050a8 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3087,9 +3087,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
/*
* VMExit clears RFLAGS.IF and DR7, even on a consistency check.
*/
- local_irq_enable();
if (hw_breakpoint_active())
set_debugreg(__this_cpu_read(cpu_dr7), 7);
+ local_irq_enable();
preempt_enable();
/*
@@ -4624,19 +4624,24 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}
-static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
+ int *ret)
{
gva_t gva;
struct x86_exception e;
+ int r;
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmcs_read32(VMX_INSTRUCTION_INFO), false,
- sizeof(*vmpointer), &gva))
- return 1;
+ sizeof(*vmpointer), &gva)) {
+ *ret = 1;
+ return -EINVAL;
+ }
- if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
+ r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
+ if (r != X86EMUL_CONTINUE) {
+ *ret = vmx_handle_memory_failure(vcpu, r, &e);
+ return -EINVAL;
}
return 0;
@@ -4764,8 +4769,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
+ return ret;
/*
* SDM 3: 24.11.5
@@ -4838,12 +4843,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
u32 zero = 0;
gpa_t vmptr;
u64 evmcs_gpa;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;
if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -4902,7 +4908,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
u64 value;
gva_t gva = 0;
short offset;
- int len;
+ int len, r;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -4943,10 +4949,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
instr_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}
return nested_vmx_succeed(vcpu);
@@ -4987,7 +4992,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
unsigned long field;
short offset;
gva_t gva;
- int len;
+ int len, r;
/*
* The value to write might be 32 or 64 bits, depending on L1's long
@@ -5017,10 +5022,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
instr_info, false, len, &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}
field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5103,12 +5107,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;
if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -5170,6 +5175,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
gva_t gva;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -5181,11 +5187,11 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
true, sizeof(gpa_t), &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
- sizeof(gpa_t), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+ sizeof(gpa_t), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
return nested_vmx_succeed(vcpu);
}
@@ -5209,7 +5215,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
struct {
u64 eptp, gpa;
} operand;
- int i;
+ int i, r;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_EPT) ||
@@ -5236,10 +5242,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
/*
* Nested EPT roots are always held through guest_mmu,
@@ -5291,6 +5296,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 gla;
} operand;
u16 vpid02;
+ int r;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -5318,10 +5324,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
if (operand.vpid >> 16)
return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
@@ -5666,7 +5672,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
{
u32 intr_info;
- switch (exit_reason) {
+ switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@@ -5727,7 +5733,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info;
- switch (exit_reason) {
+ switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index d33d890b605f..bdcce65c7a1d 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -181,7 +181,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = pmu->version > 1;
break;
case MSR_IA32_PERF_CAPABILITIES:
- ret = guest_cpuid_has(vcpu, X86_FEATURE_PDCM);
+ ret = 1;
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 170cc76a581f..36c771728c8c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e)
+{
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, e);
+ return 1;
+ }
+
+ /*
+ * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+ * while handling a VMX instruction KVM could've handled the request
+ * correctly by exiting to userspace and performing I/O but there
+ * doesn't seem to be a real use-case behind such requests, just return
+ * KVM_EXIT_INTERNAL_ERROR for now.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
+ return 0;
+}
/*
* Recognizes a pending MTF VM-exit and records the nested state for later
@@ -4683,7 +4709,7 @@ static void kvm_machine_check(void)
.flags = X86_EFLAGS_IF,
};
- do_machine_check(&regs, 0);
+ do_machine_check(&regs);
#endif
}
@@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
u64 pcid;
u64 gla;
} operand;
+ int r;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
@@ -7282,10 +7308,6 @@ static __init void vmx_set_cpu_caps(void)
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
- /* PKU is not yet implemented for shadow paging. */
- if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE))
- kvm_cpu_cap_check_and_set(X86_FEATURE_PKU);
-
if (vmx_umip_emulated())
kvm_cpu_cap_set(X86_FEATURE_UMIP);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 672c28f17e49..8a83b5edc820 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -355,6 +355,8 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9e41b5135340..00c88c2f34e4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -239,8 +239,7 @@ u64 __read_mostly host_xcr0;
u64 __read_mostly supported_xcr0;
EXPORT_SYMBOL_GPL(supported_xcr0);
-struct kmem_cache *x86_fpu_cache;
-EXPORT_SYMBOL_GPL(x86_fpu_cache);
+static struct kmem_cache *x86_fpu_cache;
static struct kmem_cache *x86_emulator_cache;
@@ -5647,13 +5646,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
- /*
- * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
- * is returned, but our callers are not ready for that and they blindly
- * call kvm_inject_page_fault. Ensure that they at least do not leak
- * uninitialized kernel stack memory into cr2 and error code.
- */
- memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
@@ -7018,7 +7010,7 @@ restart:
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
kvm_rip_write(vcpu, ctxt->eip);
- if (r && ctxt->tf)
+ if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
if (kvm_x86_ops.update_emulated_instruction)
kvm_x86_ops.update_emulated_instruction(vcpu);
@@ -8277,9 +8269,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end,
- bool blockable)
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
{
unsigned long apic_address;
@@ -8290,8 +8281,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-
- return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
@@ -9962,13 +9951,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
if (!slot || !slot->npages)
return 0;
- /*
- * Stuff a non-canonical value to catch use-after-delete. This
- * ends up being 0 on 32-bit KVM, but there's no better
- * alternative.
- */
- hva = (unsigned long)(0xdeadull << 48);
old_npages = slot->npages;
+ hva = 0;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
@@ -10140,43 +10124,65 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
}
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
- struct kvm_memory_slot *new)
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- /* Still write protect RO slot */
- if (new->flags & KVM_MEM_READONLY) {
- kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
+ /*
+ * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot.
+ * See comments below.
+ */
+ if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
return;
- }
/*
- * Call kvm_x86_ops dirty logging hooks when they are valid.
- *
- * kvm_x86_ops.slot_disable_log_dirty is called when:
- *
- * - KVM_MR_CREATE with dirty logging is disabled
- * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
- *
- * The reason is, in case of PML, we need to set D-bit for any slots
- * with dirty logging disabled in order to eliminate unnecessary GPA
- * logging in PML buffer (and potential PML buffer full VMEXIT). This
- * guarantees leaving PML enabled during guest's lifetime won't have
- * any additional overhead from PML when guest is running with dirty
- * logging disabled for memory slots.
+ * Dirty logging tracks sptes in 4k granularity, meaning that large
+ * sptes have to be split. If live migration is successful, the guest
+ * in the source machine will be destroyed and large sptes will be
+ * created in the destination. However, if the guest continues to run
+ * in the source machine (for example if live migration fails), small
+ * sptes will remain around and cause bad performance.
*
- * kvm_x86_ops.slot_enable_log_dirty is called when switching new slot
- * to dirty logging mode.
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
*
- * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
+ * There is no need to do this in any of the following cases:
+ * CREATE: No dirty mappings will already exist.
+ * MOVE/DELETE: The old mappings will already have been cleaned up by
+ * kvm_arch_flush_shadow_memslot()
+ */
+ if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+
+ /*
+ * Enable or disable dirty logging for the slot.
*
- * In case of write protect:
+ * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old
+ * slot have been zapped so no dirty logging updates are needed for
+ * the old slot.
+ * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible
+ * any mappings that might be created in it will consume the
+ * properties of the new slot and do not need to be updated here.
*
- * Write protect all pages for dirty logging.
+ * When PML is enabled, the kvm_x86_ops dirty logging hooks are
+ * called to enable/disable dirty logging.
*
- * All the sptes including the large sptes which point to this
- * slot are set to readonly. We can not create any new large
- * spte on this slot until the end of the logging.
+ * When disabling dirty logging with PML enabled, the D-bit is set
+ * for sptes in the slot in order to prevent unnecessary GPA
+ * logging in the PML buffer (and potential PML buffer full VMEXIT).
+ * This guarantees leaving PML enabled for the guest's lifetime
+ * won't have any additional overhead from PML when the guest is
+ * running with dirty logging disabled.
*
+ * When enabling dirty logging, large sptes are write-protected
+ * so they can be split on first write. New large sptes cannot
+ * be created for this slot until the end of the logging.
* See the comments in fast_page_fault().
+ * For small sptes, nothing is done if the dirty log is in the
+ * initial-all-set state. Otherwise, depending on whether pml
+ * is enabled the D-bit or the W-bit will be cleared.
*/
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
if (kvm_x86_ops.slot_enable_log_dirty) {
@@ -10213,39 +10219,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_mmu_calculate_default_mmu_pages(kvm));
/*
- * Dirty logging tracks sptes in 4k granularity, meaning that large
- * sptes have to be split. If live migration is successful, the guest
- * in the source machine will be destroyed and large sptes will be
- * created in the destination. However, if the guest continues to run
- * in the source machine (for example if live migration fails), small
- * sptes will remain around and cause bad performance.
- *
- * Scan sptes if dirty logging has been stopped, dropping those
- * which can be collapsed into a single large-page spte. Later
- * page faults will create the large-page sptes.
- *
- * There is no need to do this in any of the following cases:
- * CREATE: No dirty mappings will already exist.
- * MOVE/DELETE: The old mappings will already have been cleaned up by
- * kvm_arch_flush_shadow_memslot()
- */
- if (change == KVM_MR_FLAGS_ONLY &&
- (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
- kvm_mmu_zap_collapsible_sptes(kvm, new);
-
- /*
- * Set up write protection and/or dirty logging for the new slot.
- *
- * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
- * been zapped so no dirty logging staff is needed for old slot. For
- * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
- * new and it's also covered when dealing with the new slot.
- *
* FIXME: const-ify all uses of struct kvm_memory_slot.
*/
- if (change != KVM_MR_DELETE)
- kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
+ kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change);
/* Free the arrays associated with the old memslot. */
if (change == KVM_MR_MOVE)
@@ -10530,7 +10506,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
return kvm_arch_interrupt_allowed(vcpu);
}
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
@@ -10547,6 +10523,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
fault.address = work->arch.token;
fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault);
+ return true;
} else {
/*
* It is not possible to deliver a paravirtualized asynchronous
@@ -10557,6 +10534,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
* fault is retried, hopefully the page will be ready in the host.
*/
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+ return false;
}
}
@@ -10574,7 +10552,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
- if (kvm_pv_async_pf_enabled(vcpu) &&
+ if ((work->wakeup_all || work->notpresent_injected) &&
+ kvm_pv_async_pf_enabled(vcpu) &&
!apf_put_user_ready(vcpu, work->arch.token)) {
vcpu->arch.apf.pageready_pending = true;
kvm_apic_set_irq(vcpu, &irq, NULL);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 5246db42de45..6110bce7237b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,10 +6,19 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
+KCSAN_SANITIZE_delay.o := n
+ifdef CONFIG_KCSAN
+# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
+# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
+CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+endif
+
# Early boot use of cmdline; don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KCOV_INSTRUMENT_cmdline.o := n
KASAN_SANITIZE_cmdline.o := n
+KCSAN_SANITIZE_cmdline.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 98f7c6fa2eaa..f7fd0e868c9c 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+# Disable KCSAN entirely, because otherwise we get warnings that some functions
+# reference __initdata sections.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 6f8b48f545f4..770b613790b3 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -107,7 +107,6 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
*/
cea_map_stack(DF);
cea_map_stack(NMI);
- cea_map_stack(DB1);
cea_map_stack(DB);
cea_map_stack(MCE);
}
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b991aa4bdfae..1d6cb07f4f86 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -204,8 +204,19 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
return;
- if (fixup_bug(regs, trapnr))
- return;
+ if (trapnr == X86_TRAP_UD) {
+ if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+ /* Skip the ud2. */
+ regs->ip += LEN_UD2;
+ return;
+ }
+
+ /*
+ * If this was a BUG and report_bug returns or if this
+ * was just a normal #UD, we want to continue onward and
+ * crash.
+ */
+ }
fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 0b03ae8c39cd..66be9bd60307 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -414,21 +414,13 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
return 0;
}
+/* Pentium F0 0F C7 C8 bug workaround: */
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
- unsigned long nr;
-
- /*
- * Pentium F0 0F C7 C8 bug workaround:
- */
- if (boot_cpu_has_bug(X86_BUG_F00F)) {
- nr = (address - idt_descr.address) >> 3;
-
- if (nr == 6) {
- do_invalid_op(regs, 0);
- return 1;
- }
+ if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
+ handle_invalid_op(regs);
+ return 1;
}
#endif
return 0;
@@ -786,6 +778,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ local_irq_disable();
+
return;
}
@@ -1355,11 +1349,38 @@ trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
trace_page_fault_kernel(address, regs, error_code);
}
-dotraplinkage void
-do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
- unsigned long address)
+static __always_inline void
+handle_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
+ trace_page_fault_entries(regs, error_code, address);
+
+ if (unlikely(kmmio_fault(regs, address)))
+ return;
+
+ /* Was the fault on kernel-controlled part of the address space? */
+ if (unlikely(fault_in_kernel_space(address))) {
+ do_kern_addr_fault(regs, error_code, address);
+ } else {
+ do_user_addr_fault(regs, error_code, address);
+ /*
+ * User address page fault handling might have reenabled
+ * interrupts. Fixing up all potential exit points of
+ * do_user_addr_fault() and its leaf functions is just not
+ * doable w/o creating an unholy mess or turning the code
+ * upside down.
+ */
+ local_irq_disable();
+ }
+}
+
+DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
+{
+ unsigned long address = read_cr2();
+ bool rcu_exit;
+
prefetchw(&current->mm->mmap_lock);
+
/*
* KVM has two types of events that are, logically, interrupts, but
* are unfortunately delivered using the #PF vector. These events are
@@ -1374,19 +1395,28 @@ do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
* getting values from real and async page faults mixed up.
*
* Fingers crossed.
+ *
+ * The async #PF handling code takes care of idtentry handling
+ * itself.
*/
if (kvm_handle_async_pf(regs, (u32)address))
return;
- trace_page_fault_entries(regs, hw_error_code, address);
+ /*
+ * Entry handling for valid #PF from kernel mode is slightly
+ * different: RCU is already watching and rcu_irq_enter() must not
+ * be invoked because a kernel fault on a user space address might
+ * sleep.
+ *
+ * In case the fault hit a RCU idle region the conditional entry
+ * code reenabled RCU to avoid subsequent wreckage which helps
+ * debugability.
+ */
+ rcu_exit = idtentry_enter_cond_rcu(regs);
- if (unlikely(kmmio_fault(regs, address)))
- return;
+ instrumentation_begin();
+ handle_page_fault(regs, error_code, address);
+ instrumentation_end();
- /* Was the fault on kernel-controlled part of the address space? */
- if (unlikely(fault_in_kernel_space(address)))
- do_kern_addr_fault(regs, hw_error_code, address);
- else
- do_user_addr_fault(regs, hw_error_code, address);
+ idtentry_exit_cond_rcu(regs, rcu_exit);
}
-NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index a3c6757a65c7..a8a924b3c335 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -492,12 +492,12 @@ static void __init pti_setup_espfix64(void)
}
/*
- * Clone the populated PMDs of the entry and irqentry text and force it RO.
+ * Clone the populated PMDs of the entry text and force it RO.
*/
static void pti_clone_entry_text(void)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end,
+ (unsigned long) __entry_text_end,
PTI_CLONE_PMD);
}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 4ea69690c3e4..0ac96ca304c7 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1272,7 +1272,7 @@ static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
* (the resource will not be freed until noninterruptable cpus see this
* interrupt; hardware may timeout the s/w ack and reply ERROR)
*/
-void uv_bau_message_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
{
int count = 0;
cycles_t time_start;
diff --git a/arch/x86/purgatory/.gitignore b/arch/x86/purgatory/.gitignore
new file mode 100644
index 000000000000..d2be1500671d
--- /dev/null
+++ b/arch/x86/purgatory/.gitignore
@@ -0,0 +1 @@
+purgatory.chk
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index fb4ee5444379..b04e6e72a592 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -14,10 +14,18 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
-targets += purgatory.ro
-
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
@@ -25,7 +33,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
@@ -58,12 +66,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+ $(call if_changed,ld)
+
targets += kexec-purgatory.c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 682c895753d9..6b1f3a4eeb44 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b11ec5d8f8ac..83f1b6a56449 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index e138f7de52d2..3e89b0067ff0 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -13,6 +13,7 @@
#include <asm/smp.h>
#include <asm/reboot.h>
#include <asm/setup.h>
+#include <asm/idtentry.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/early_ioremap.h>
@@ -118,6 +119,17 @@ static void __init init_hvm_pv_info(void)
this_cpu_write(xen_vcpu_id, smp_processor_id());
}
+DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ inc_irq_stat(irq_hv_callback_count);
+
+ xen_hvm_evtchn_do_upcall();
+
+ set_irq_regs(old_regs);
+}
+
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index c2c97faaf004..33b309d65955 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -604,32 +604,42 @@ struct trap_array_entry {
bool ist_okay;
};
+#define TRAP_ENTRY(func, ist_ok) { \
+ .orig = asm_##func, \
+ .xen = xen_asm_##func, \
+ .ist_okay = ist_ok }
+
+#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \
+ .orig = asm_##func, \
+ .xen = xen_asm_##xenfunc, \
+ .ist_okay = ist_ok }
+
static struct trap_array_entry trap_array[] = {
- { debug, xen_xendebug, true },
- { double_fault, xen_double_fault, true },
+ TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ),
+ TRAP_ENTRY(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
- { machine_check, xen_machine_check, true },
+ TRAP_ENTRY(exc_machine_check, true ),
#endif
- { nmi, xen_xennmi, true },
- { int3, xen_int3, false },
- { overflow, xen_overflow, false },
+ TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ),
+ TRAP_ENTRY(exc_int3, false ),
+ TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false },
#endif
- { page_fault, xen_page_fault, false },
- { divide_error, xen_divide_error, false },
- { bounds, xen_bounds, false },
- { invalid_op, xen_invalid_op, false },
- { device_not_available, xen_device_not_available, false },
- { coprocessor_segment_overrun, xen_coprocessor_segment_overrun, false },
- { invalid_TSS, xen_invalid_TSS, false },
- { segment_not_present, xen_segment_not_present, false },
- { stack_segment, xen_stack_segment, false },
- { general_protection, xen_general_protection, false },
- { spurious_interrupt_bug, xen_spurious_interrupt_bug, false },
- { coprocessor_error, xen_coprocessor_error, false },
- { alignment_check, xen_alignment_check, false },
- { simd_coprocessor_error, xen_simd_coprocessor_error, false },
+ TRAP_ENTRY(exc_page_fault, false ),
+ TRAP_ENTRY(exc_divide_error, false ),
+ TRAP_ENTRY(exc_bounds, false ),
+ TRAP_ENTRY(exc_invalid_op, false ),
+ TRAP_ENTRY(exc_device_not_available, false ),
+ TRAP_ENTRY(exc_coproc_segment_overrun, false ),
+ TRAP_ENTRY(exc_invalid_tss, false ),
+ TRAP_ENTRY(exc_segment_not_present, false ),
+ TRAP_ENTRY(exc_stack_segment, false ),
+ TRAP_ENTRY(exc_general_protection, false ),
+ TRAP_ENTRY(exc_spurious_interrupt_bug, false ),
+ TRAP_ENTRY(exc_coprocessor_error, false ),
+ TRAP_ENTRY(exc_alignment_check, false ),
+ TRAP_ENTRY(exc_simd_coprocessor_error, false ),
};
static bool __ref get_trap_addr(void **addr, unsigned int ist)
@@ -641,7 +651,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
* Replace trap handler addresses by Xen specific ones.
* Check for known traps using IST and whitelist them.
* The debugger ones are the only ones we care about.
- * Xen will handle faults like double_fault, * so we should never see
+ * Xen will handle faults like double_fault, so we should never see
* them. Warn if there's an unexpected IST-using fault handler.
*/
for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1a2d8a50dac4..3566e37241d7 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -20,6 +20,7 @@
#include <asm/setup.h>
#include <asm/acpi.h>
#include <asm/numa.h>
+#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -993,7 +994,8 @@ static void __init xen_pvmmu_arch_setup(void)
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_pae_extended_cr3);
- if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
+ if (register_callback(CALLBACKTYPE_event,
+ xen_asm_exc_xen_hypervisor_callback) ||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
BUG();
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 8fa01c545460..171aff1b11f2 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -26,6 +26,7 @@
#include <linux/pgtable.h>
#include <asm/paravirt.h>
+#include <asm/idtentry.h>
#include <asm/desc.h>
#include <asm/cpu.h>
@@ -348,7 +349,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
ctxt->event_callback_eip =
- (unsigned long)xen_hypervisor_callback;
+ (unsigned long)xen_asm_exc_xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
index e666b614cf6d..9d548b0c772f 100644
--- a/arch/x86/xen/suspend_hvm.c
+++ b/arch/x86/xen/suspend_hvm.c
@@ -2,6 +2,7 @@
#include <linux/types.h>
#include <xen/xen.h>
+#include <xen/hvm.h>
#include <xen/features.h>
#include <xen/interface/features.h>
@@ -13,6 +14,6 @@ void xen_hvm_post_suspend(int suspend_cancelled)
xen_hvm_init_shared_info();
xen_vcpu_restore();
}
- xen_callback_vector();
+ xen_setup_callback_vector();
xen_unplug_emulated_devices();
}
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 2712e9155306..4757cec33abe 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -93,7 +93,7 @@ xen_iret_start_crit:
/*
* If there's something pending, mask events again so we can
- * jump back into xen_hypervisor_callback. Otherwise do not
+ * jump back into exc_xen_hypervisor_callback. Otherwise do not
* touch XEN_vcpu_info_mask.
*/
jne 1f
@@ -113,11 +113,11 @@ iret_restore_end:
* Events are masked, so jumping out of the critical region is
* OK.
*/
- je xen_hypervisor_callback
+ je xen_asm_exc_xen_hypervisor_callback
1: iret
xen_iret_end_crit:
- _ASM_EXTABLE(1b, iret_exc)
+ _ASM_EXTABLE(1b, asm_iret_error)
hyper_iret:
/* put this out of line since its very rarely used */
@@ -127,7 +127,7 @@ SYM_CODE_END(xen_iret)
.globl xen_iret_start_crit, xen_iret_end_crit
/*
- * This is called by xen_hypervisor_callback in entry_32.S when it sees
+ * This is called by xen_asm_exc_xen_hypervisor_callback in entry_32.S when it sees
* that the EIP at the time of interrupt was between
* xen_iret_start_crit and xen_iret_end_crit.
*
@@ -144,7 +144,7 @@ SYM_CODE_END(xen_iret)
* eflags }
* cs } nested exception info
* eip }
- * return address : (into xen_hypervisor_callback)
+ * return address : (into xen_asm_exc_xen_hypervisor_callback)
*
* In order to deliver the nested exception properly, we need to discard the
* nested exception frame such that when we handle the exception, we do it
@@ -152,7 +152,8 @@ SYM_CODE_END(xen_iret)
*
* The only caveat is that if the outer eax hasn't been restored yet (i.e.
* it's still on stack), we need to restore its value here.
- */
+*/
+.pushsection .noinstr.text, "ax"
SYM_CODE_START(xen_iret_crit_fixup)
/*
* Paranoia: Make sure we're really coming from kernel space.
@@ -181,3 +182,4 @@ SYM_CODE_START(xen_iret_crit_fixup)
2:
ret
SYM_CODE_END(xen_iret_crit_fixup)
+.popsection
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 0a0fd168683a..5d252aaeade8 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -28,33 +28,33 @@ SYM_CODE_END(xen_\name)
_ASM_NOKPROBE(xen_\name)
.endm
-xen_pv_trap divide_error
-xen_pv_trap debug
-xen_pv_trap xendebug
-xen_pv_trap int3
-xen_pv_trap xennmi
-xen_pv_trap overflow
-xen_pv_trap bounds
-xen_pv_trap invalid_op
-xen_pv_trap device_not_available
-xen_pv_trap double_fault
-xen_pv_trap coprocessor_segment_overrun
-xen_pv_trap invalid_TSS
-xen_pv_trap segment_not_present
-xen_pv_trap stack_segment
-xen_pv_trap general_protection
-xen_pv_trap page_fault
-xen_pv_trap spurious_interrupt_bug
-xen_pv_trap coprocessor_error
-xen_pv_trap alignment_check
+xen_pv_trap asm_exc_divide_error
+xen_pv_trap asm_exc_debug
+xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_exc_int3
+xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_exc_overflow
+xen_pv_trap asm_exc_bounds
+xen_pv_trap asm_exc_invalid_op
+xen_pv_trap asm_exc_device_not_available
+xen_pv_trap asm_exc_double_fault
+xen_pv_trap asm_exc_coproc_segment_overrun
+xen_pv_trap asm_exc_invalid_tss
+xen_pv_trap asm_exc_segment_not_present
+xen_pv_trap asm_exc_stack_segment
+xen_pv_trap asm_exc_general_protection
+xen_pv_trap asm_exc_page_fault
+xen_pv_trap asm_exc_spurious_interrupt_bug
+xen_pv_trap asm_exc_coprocessor_error
+xen_pv_trap asm_exc_alignment_check
#ifdef CONFIG_X86_MCE
-xen_pv_trap machine_check
+xen_pv_trap asm_exc_machine_check
#endif /* CONFIG_X86_MCE */
-xen_pv_trap simd_coprocessor_error
+xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
-xen_pv_trap hypervisor_callback
+xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT
SYM_CODE_START(xen_early_idt_handler_array)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45a441c33d6d..53b224fd6177 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -8,7 +8,6 @@
#include <xen/xen-ops.h>
/* These are code, but not functions. Defined in entry.S */
-extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
void xen_sysenter_target(void);
@@ -55,7 +54,6 @@ void xen_enable_sysenter(void);
void xen_enable_syscall(void);
void xen_vcpu_restore(void);
-void xen_callback_vector(void);
void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);