aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-07-06 12:48:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-07-06 12:48:04 -0700
commitbfe91da29bfad9941d5d703d45e29f0812a20724 (patch)
treee5f04b57e30de099ada784d05c7beefa4b678418 /arch/arm64
parentMerge tag 's390-5.8-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux (diff)
parentMerge tag 'kvmarm-fixes-5.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master (diff)
downloadlinux-dev-bfe91da29bfad9941d5d703d45e29f0812a20724.tar.xz
linux-dev-bfe91da29bfad9941d5d703d45e29f0812a20724.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "Bugfixes and a one-liner patch to silence a sparse warning" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: arm64: Stop clobbering x0 for HVC_SOFT_RESTART KVM: arm64: PMU: Fix per-CPU access in preemptible context KVM: VMX: Use KVM_POSSIBLE_CR*_GUEST_BITS to initialize guest/host masks KVM: x86: Mark CR4.TSD as being possibly owned by the guest KVM: x86: Inject #GP if guest attempts to toggle CR4.LA57 in 64-bit mode kvm: use more precise cast and do not drop __user KVM: x86: bit 8 of non-leaf PDPEs is not reserved KVM: X86: Fix async pf caused null-ptr-deref KVM: arm64: vgic-v4: Plug race between non-residency and v4.1 doorbell KVM: arm64: pvtime: Ensure task delay accounting is enabled KVM: arm64: Fix kvm_reset_vcpu() return code being incorrect with SVE KVM: arm64: Annotate hyp NMI-related functions as __always_inline KVM: s390: reduce number of IO pins to 1
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/pmu.c7
-rw-r--r--arch/arm64/kvm/pvtime.c15
-rw-r--r--arch/arm64/kvm/reset.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c8
7 files changed, 42 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index a358e97572c1..6647ae4f0231 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
return read_sysreg_s(SYS_ICC_PMR_EL1);
}
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
{
write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5d1f4ae42799..f7c3d1ff091d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6e6ed5581eed..e76c0e89d48e 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f
-reset:
+
/*
- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
- * case we coming via HVC_SOFT_RESTART.
+ * Set the HVC_RESET_VECTORS return code before entering the common
+ * path so that we do not clobber x0-x2 in case we are coming via
+ * HVC_SOFT_RESTART.
*/
+ mov x0, xzr
+reset:
+ /* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2
mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
/* Install stub vectors */
adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5
- mov x0, xzr
eret
1: /* Bad stub call */
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index b5ae3a5d509e..3c224162b3dd 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
}
/*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
+ preempt_disable();
host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index 1e0f4c284888..f7b52ce1557e 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -3,6 +3,7 @@
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
+static bool kvm_arm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
int ret = 0;
int idx;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64 __user *user = (u64 __user *)attr->addr;
u64 ipa;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
{
switch (attr->attr) {
case KVM_ARM_VCPU_PVTIME_IPA:
- return 0;
+ if (kvm_arm_pvtime_supported())
+ return 0;
}
return -ENXIO;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index d3b209023727..6ed36be51b4b 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- int ret = -EINVAL;
+ int ret;
bool loaded;
u32 pstate;
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
- if (kvm_vcpu_enable_ptrauth(vcpu))
+ if (kvm_vcpu_enable_ptrauth(vcpu)) {
+ ret = -EINVAL;
goto out;
+ }
}
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+ ret = -EINVAL;
goto out;
+ }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 27ac833e5ec7..b5fa73c9fd35 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
+ /*
+ * The v4.1 doorbell can fire concurrently with the vPE being
+ * made non-resident. Ensure we only update pending_last
+ * *after* the non-residency sequence has completed.
+ */
+ raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);