aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/psci.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/psci.c')
-rw-r--r--arch/arm64/kvm/psci.c317
1 files changed, 102 insertions, 215 deletions
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 74c47d420253..7fbc4c1b9df0 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -46,19 +46,11 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state.
*/
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ kvm_vcpu_wfi(vcpu);
return PSCI_RET_SUCCESS;
}
-static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.power_off = true;
- kvm_make_request(KVM_REQ_SLEEP, vcpu);
- kvm_vcpu_kick(vcpu);
-}
-
static inline bool kvm_psci_valid_affinity(struct kvm_vcpu *vcpu,
unsigned long affinity)
{
@@ -84,8 +76,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
- if (!vcpu->arch.power_off) {
- if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
+ if (!kvm_arm_vcpu_stopped(vcpu)) {
+ if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
@@ -108,12 +100,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
/*
- * Make sure the reset request is observed if the change to
- * power_state is observed.
+ * Make sure the reset request is observed if the RUNNABLE mp_state is
+ * observed.
*/
smp_wmb();
- vcpu->arch.power_off = false;
+ vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS;
@@ -121,8 +113,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
- int i, matching_cpus = 0;
- unsigned long mpidr;
+ int matching_cpus = 0;
+ unsigned long i, mpidr;
unsigned long target_affinity;
unsigned long target_affinity_mask;
unsigned long lowest_affinity_level;
@@ -151,7 +143,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
if ((mpidr & target_affinity_mask) == target_affinity) {
matching_cpus++;
- if (!tmp->arch.power_off)
+ if (!kvm_arm_vcpu_stopped(tmp))
return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
@@ -162,9 +154,9 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
return PSCI_0_2_AFFINITY_LEVEL_OFF;
}
-static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
+static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *tmp;
/*
@@ -177,22 +169,39 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
- tmp->arch.power_off = true;
+ tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = flags;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0);
}
static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0);
+}
+
+static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu)
+{
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET,
+ KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2);
+}
+
+static void kvm_psci_system_suspend(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+
+ memset(&run->system_event, 0, sizeof(vcpu->run->system_event));
+ run->system_event.type = KVM_SYSTEM_EVENT_SUSPEND;
+ run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
@@ -209,15 +218,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
{
- switch(fn) {
- case PSCI_0_2_FN64_CPU_SUSPEND:
- case PSCI_0_2_FN64_CPU_ON:
- case PSCI_0_2_FN64_AFFINITY_INFO:
- /* Disallow these functions for 32bit guests */
- if (vcpu_mode_is_32bit(vcpu))
- return PSCI_RET_NOT_SUPPORTED;
- break;
- }
+ /*
+ * Prevent 32 bit guests from calling 64 bit PSCI functions.
+ */
+ if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+ return PSCI_RET_NOT_SUPPORTED;
return 0;
}
@@ -229,10 +234,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
unsigned long val;
int ret = 1;
- val = kvm_psci_check_allowed_function(vcpu, psci_fn);
- if (val)
- goto out;
-
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
/*
@@ -246,7 +247,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
val = kvm_psci_vcpu_suspend(vcpu);
break;
case PSCI_0_2_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
+ kvm_arm_vcpu_power_off(vcpu);
val = PSCI_RET_SUCCESS;
break;
case PSCI_0_2_FN_CPU_ON:
@@ -300,29 +301,31 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
-out:
smccc_set_retval(vcpu, val, 0, 0, 0);
return ret;
}
-static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
+static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
{
+ unsigned long val = PSCI_RET_NOT_SUPPORTED;
u32 psci_fn = smccc_get_function(vcpu);
- u32 feature;
- unsigned long val;
+ struct kvm *kvm = vcpu->kvm;
+ u32 arg;
int ret = 1;
switch(psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
- val = KVM_ARM_PSCI_1_0;
+ val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
break;
case PSCI_1_0_FN_PSCI_FEATURES:
- feature = smccc_get_arg1(vcpu);
- val = kvm_psci_check_allowed_function(vcpu, feature);
+ arg = smccc_get_arg1(vcpu);
+ val = kvm_psci_check_allowed_function(vcpu, arg);
if (val)
break;
- switch(feature) {
+ val = PSCI_RET_NOT_SUPPORTED;
+
+ switch(arg) {
case PSCI_0_2_FN_PSCI_VERSION:
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
@@ -338,8 +341,47 @@ static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
case ARM_SMCCC_VERSION_FUNC_ID:
val = 0;
break;
- default:
- val = PSCI_RET_NOT_SUPPORTED;
+ case PSCI_1_0_FN_SYSTEM_SUSPEND:
+ case PSCI_1_0_FN64_SYSTEM_SUSPEND:
+ if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags))
+ val = 0;
+ break;
+ case PSCI_1_1_FN_SYSTEM_RESET2:
+ case PSCI_1_1_FN64_SYSTEM_RESET2:
+ if (minor >= 1)
+ val = 0;
+ break;
+ }
+ break;
+ case PSCI_1_0_FN_SYSTEM_SUSPEND:
+ kvm_psci_narrow_to_32bit(vcpu);
+ fallthrough;
+ case PSCI_1_0_FN64_SYSTEM_SUSPEND:
+ /*
+ * Return directly to userspace without changing the vCPU's
+ * registers. Userspace depends on reading the SMCCC parameters
+ * to implement SYSTEM_SUSPEND.
+ */
+ if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) {
+ kvm_psci_system_suspend(vcpu);
+ return 0;
+ }
+ break;
+ case PSCI_1_1_FN_SYSTEM_RESET2:
+ kvm_psci_narrow_to_32bit(vcpu);
+ fallthrough;
+ case PSCI_1_1_FN64_SYSTEM_RESET2:
+ if (minor >= 1) {
+ arg = smccc_get_arg1(vcpu);
+
+ if (arg <= PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET ||
+ arg >= PSCI_1_1_RESET_TYPE_VENDOR_START) {
+ kvm_psci_system_reset2(vcpu);
+ vcpu_set_reg(vcpu, 0, PSCI_RET_INTERNAL_FAILURE);
+ return 0;
+ }
+
+ val = PSCI_RET_INVALID_PARAMS;
break;
}
break;
@@ -359,7 +401,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
switch (psci_fn) {
case KVM_PSCI_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
+ kvm_arm_vcpu_power_off(vcpu);
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
@@ -392,180 +434,25 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
*/
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
- switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+ u32 psci_fn = smccc_get_function(vcpu);
+ unsigned long val;
+
+ val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+ if (val) {
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+ }
+
+ switch (kvm_psci_version(vcpu)) {
+ case KVM_ARM_PSCI_1_1:
+ return kvm_psci_1_x_call(vcpu, 1);
case KVM_ARM_PSCI_1_0:
- return kvm_psci_1_0_call(vcpu);
+ return kvm_psci_1_x_call(vcpu, 0);
case KVM_ARM_PSCI_0_2:
return kvm_psci_0_2_call(vcpu);
case KVM_ARM_PSCI_0_1:
return kvm_psci_0_1_call(vcpu);
default:
return -EINVAL;
- };
-}
-
-int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
-{
- return 3; /* PSCI version and two workaround registers */
-}
-
-int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
- return -EFAULT;
-
- if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
- return -EFAULT;
-
- if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
- return -EFAULT;
-
- return 0;
-}
-
-#define KVM_REG_FEATURE_LEVEL_WIDTH 4
-#define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
-
-/*
- * Convert the workaround level into an easy-to-compare number, where higher
- * values mean better protection.
- */
-static int get_kernel_wa_level(u64 regid)
-{
- switch (regid) {
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (arm64_get_spectre_v2_state()) {
- case SPECTRE_VULNERABLE:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
- case SPECTRE_MITIGATED:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
- case SPECTRE_UNAFFECTED:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
- }
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (arm64_get_spectre_v4_state()) {
- case SPECTRE_MITIGATED:
- /*
- * As for the hypercall discovery, we pretend we
- * don't have any FW mitigation if SSBS is there at
- * all times.
- */
- if (cpus_have_final_cap(ARM64_SSBS))
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- fallthrough;
- case SPECTRE_UNAFFECTED:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
- case SPECTRE_VULNERABLE:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- }
- }
-
- return -EINVAL;
-}
-
-int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- void __user *uaddr = (void __user *)(long)reg->addr;
- u64 val;
-
- switch (reg->id) {
- case KVM_REG_ARM_PSCI_VERSION:
- val = kvm_psci_version(vcpu, vcpu->kvm);
- break;
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
- break;
- default:
- return -ENOENT;
}
-
- if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- void __user *uaddr = (void __user *)(long)reg->addr;
- u64 val;
- int wa_level;
-
- if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- switch (reg->id) {
- case KVM_REG_ARM_PSCI_VERSION:
- {
- bool wants_02;
-
- wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
-
- switch (val) {
- case KVM_ARM_PSCI_0_1:
- if (wants_02)
- return -EINVAL;
- vcpu->kvm->arch.psci_version = val;
- return 0;
- case KVM_ARM_PSCI_0_2:
- case KVM_ARM_PSCI_1_0:
- if (!wants_02)
- return -EINVAL;
- vcpu->kvm->arch.psci_version = val;
- return 0;
- }
- break;
- }
-
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
- return -EINVAL;
-
- if (get_kernel_wa_level(reg->id) < val)
- return -EINVAL;
-
- return 0;
-
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
- return -EINVAL;
-
- /* The enabled bit must not be set unless the level is AVAIL. */
- if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
- (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
- return -EINVAL;
-
- /*
- * Map all the possible incoming states to the only two we
- * really want to deal with.
- */
- switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
- wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- break;
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
- wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
- * other way around.
- */
- if (get_kernel_wa_level(reg->id) < wa_level)
- return -EINVAL;
-
- return 0;
- default:
- return -ENOENT;
- }
-
- return -EINVAL;
}