aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi2
-rw-r--r--arch/arm/boot/dts/keystone-k2g-evm.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/include/asm/percpu.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c27
-rw-r--r--arch/arm/kernel/vdso.c1
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi1
-rw-r--r--arch/arm64/include/asm/alternative.h4
-rw-r--r--arch/arm64/include/asm/checksum.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/smp.h1
-rw-r--r--arch/arm64/kvm/mmu.c11
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S14
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c25
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/sh/include/asm/pgalloc.h10
-rw-r--r--arch/sh/kernel/entry-common.S6
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/svm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/nested.c16
-rw-r--r--arch/x86/kvm/vmx/nested.h5
27 files changed, 127 insertions, 49 deletions
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 348116501aa2..9b1a24cc5e91 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -342,7 +342,8 @@
comphy: phy@18300 {
compatible = "marvell,armada-380-comphy";
- reg = <0x18300 0x100>;
+ reg-names = "comphy", "conf";
+ reg = <0x18300 0x100>, <0x18460 4>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
index 756f3a9f1b4f..12997dae35d9 100644
--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
@@ -397,7 +397,7 @@
pinctrl_usbotg: usbotggrp {
fsl,pins = <
- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
>;
};
@@ -409,6 +409,7 @@
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
>;
};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index 825924448ab4..14fd1de52a68 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -99,7 +99,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 3e5fb72f21fc..c99aa273c296 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -213,7 +213,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index db640bab8c1d..8b3d64c913d8 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -402,7 +402,7 @@
&gbe0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-id";
+ phy-mode = "rgmii-rxid";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index bf531efc0610..0f95a6ef8543 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -198,7 +198,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index e6b036734a64..c2b4fbf552a3 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -117,7 +117,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index ffe1d10a1a84..6d6a37940db2 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -181,7 +181,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
- alloc-ranges = <0x4a000000 0x6000000>;
+ alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index f44f448537f2..1a3eedbac4a2 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/thread_info.h>
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 02ca7adf5375..7fff88e61252 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+{
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
@@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ goto step;
+
perf_bp_event(wp, regs);
/*
- * If no overflow handler is present, insert a temporary
- * mismatch breakpoint so we can single-step over the
- * watchpoint trigger.
+ * Defer stepping to the overflow handler if one is installed.
+ * Otherwise, insert a temporary mismatch breakpoint so that
+ * we can single-step over the watchpoint trigger.
*/
- if (is_default_overflow_handler(wp))
- enable_single_step(wp, instruction_pointer(regs));
+ if (!is_default_overflow_handler(wp))
+ goto unlock;
+step:
+ enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock();
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 6bfdca4769a7..fddd08a6e063 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
if (!cntvct_ok) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 628028bfbb92..bcd82614c25d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
pud_t *pud;
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
- if (!WARN_ON(!p4d))
+ if (WARN_ON(!p4d))
return;
pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 78b1361dfbb9..9ce78a7b117d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -161,6 +161,7 @@
resets = <&ccu RST_BUS_VE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
allwinner,sram = <&ve_sram 1>;
+ iommus = <&iommu 3>;
};
gpu: gpu@1800000 {
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 12f0eb56a1cc..619db9b4c9d5 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
- ".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
- ".org . - (662b-661b) + (664b-663b)\n" \
+ ".org . - (662b-661b) + (664b-663b)\n\t" \
+ ".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index b6f7bc6da5fb..93a161b3bf3f 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__uint128_t tmp;
u64 sum;
+ int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph;
iph += 16;
- ihl -= 4;
+ n -= 4;
tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64;
do {
sum += *(const u32 *)iph;
iph += 4;
- } while (--ihl);
+ } while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..e21d4a01372f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
-#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
- system_supports_generic_auth()) && \
- ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
+#ifdef CONFIG_ARM64_PTR_AUTH
+#define vcpu_has_ptrauth(vcpu) \
+ ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
+ cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
+ (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+#else
+#define vcpu_has_ptrauth(vcpu) false
+#endif
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index ea268d88b6f7..a0c8a0b65259 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -30,7 +30,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#include <asm/pointer_auth.h>
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8c0035cab6b6..31058e6e7c2a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true;
}
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{
pud_t *pudp;
pmd_t *pmdp;
@@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false;
if (pudp)
- return kvm_s2pud_exec(pudp);
+ return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp)
- return kvm_s2pmd_exec(pmdp);
+ return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else
- return kvm_s2pte_exec(ptep);
+ return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
- (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+ (fault_status == FSC_PERM &&
+ stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0fc8bad878b2..446e54c3f71e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -3072,10 +3072,18 @@ do_hash_page:
ori r0,r0,DSISR_BAD_FAULT_64S@l
and. r0,r5,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
+
+ /*
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+ * don't call hash_page, just fail the fault. This is required to
+ * prevent re-entrancy problems in the hash code, namely perf
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+ * hash fault. See the comment in hash_preload().
+ */
ld r11, PACA_THREAD_INFO(r13)
- lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
- andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
- bne 77f /* then don't call hash_page now */
+ lwz r0,TI_PREEMPT(r11)
+ andis. r0,r0,NMI_MASK@h
+ bne 77f
/*
* r3 contains the trap number
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 468169e33c86..9b9f92ad0e7a 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+ unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
+ /*
+ * __hash_page_* must run with interrupts off, as it sets the
+ * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+ * time and may take a hash fault reading the user stack, see
+ * read_user_stack_slow() in the powerpc/perf code.
+ *
+ * If that takes a hash fault on the same page as we lock here, it
+ * will bail out when seeing H_PAGE_BUSY set, and retry the access
+ * leading to an infinite loop.
+ *
+ * Disabling interrupts here does not prevent perf interrupts, but it
+ * will prevent them taking hash faults (see the NMI test in
+ * do_hash_page), then read_user_stack's copy_from_user_nofault will
+ * fail and perf will fall back to read_user_stack_slow(), which
+ * walks the Linux page tables.
+ *
+ * Interrupts must also be off for the duration of the
+ * mm_is_thread_local test and update, to prevent preempt running the
+ * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ */
+ local_irq_save(flags);
+
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
+
+ local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index cd6a742ac6ef..01d70280d287 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
+ /*
+ * If perf interrupts hit in a local_irq_disable (soft-masked) region,
+ * we consider them as NMIs. This is required to prevent hash faults on
+ * user addresses when reading callchains. See the NMI test in
+ * do_hash_page.
+ */
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 22d968bfe9bb..d770da3f8b6f 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -33,13 +34,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmdp, addr) \
-do { \
- struct page *page = virt_to_page(pmdp); \
- pgtable_pmd_page_dtor(page); \
- tlb_remove_page((tlb), page); \
-} while (0);
-#endif
-
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 956a7a03b0c8..9bac5bbb67f3 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -199,7 +199,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
- mov.l 2f, r10 ! Number of syscalls
+ mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@@ -353,7 +353,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
- mov.l 2f, r8 ! Number of syscalls
+ mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@@ -392,7 +392,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
-2: .long NR_syscalls
+6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5bf72fc86a8e..4ce2ddd26c0b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
+ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c0da4dd78ac5..5bbf76189afa 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
- if (pause_filter_count) {
+ if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh;
@@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0);
- if (pause_filter_thresh)
+ if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, in_kernel);
@@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
- if (pause_filter_thresh)
+ if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
}
@@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
static int svm_vm_init(struct kvm *kvm)
{
+ if (!pause_filter_count || !pause_filter_thresh)
+ kvm->arch.pause_in_guest = true;
+
if (avic) {
int ret = avic_vm_init(kvm);
if (ret)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d4a4cec034d0..11e4df560018 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
+ if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
+ return -EINVAL;
+
/*
* SMM temporarily disables VMX, so we cannot be in guest mode,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
@@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
return ret;
- /* Empty 'VMXON' state is permitted */
- if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
- return 0;
+ /* Empty 'VMXON' state is permitted if no VMCS loaded */
+ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
+ /* See vmx_has_valid_vmcs12. */
+ if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
+ (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
+ (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
+ return -EINVAL;
+ else
+ return 0;
+ }
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 758bccc26cf9..197148d76b8f 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
+/*
+ * Note: the same condition is checked against the state provided by userspace
+ * in vmx_set_nested_state; if it is satisfied, the nested state must include
+ * the VMCS12.
+ */
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);