aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-05-28 12:38:18 +0100
committerMarc Zyngier <maz@kernel.org>2022-06-10 09:54:34 +0100
commit699bb2e0c6f3796549dabac329501df7ffd99439 (patch)
tree9d95b4a64ab124d14f768fbf1da97c02d6f3f94f /arch/arm64/kvm/hyp
parentKVM: arm64: Move vcpu configuration flags into their own set (diff)
downloadlinux-dev-699bb2e0c6f3796549dabac329501df7ffd99439.tar.xz
linux-dev-699bb2e0c6f3796549dabac329501df7ffd99439.zip
KVM: arm64: Move vcpu PC/Exception flags to the input flag set
The PC update flags (which also deal with exception injection) is one of the most complicated use of the flag we have. Make it more fool prof by: - moving it over to the new accessors and assign it to the input flag set - turn the combination of generic ELx flags with another flag indicating the target EL itself into an explicit set of flags for each EL and vector combination - add a new accessor to pend the exception This is otherwise a pretty straightformward conversion. Reviewed-by: Fuad Tabba <tabba@google.com> Reviewed-by: Reiji Watanabe <reijiw@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/exception.c23
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c4
2 files changed, 12 insertions, 15 deletions
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index c5d009715402..b7557b25ed56 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -303,14 +303,14 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
- switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
- case KVM_ARM64_EXCEPT_AA32_UND:
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA32_UND):
enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
break;
- case KVM_ARM64_EXCEPT_AA32_IABT:
+ case unpack_vcpu_flag(EXCEPT_AA32_IABT):
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
break;
- case KVM_ARM64_EXCEPT_AA32_DABT:
+ case unpack_vcpu_flag(EXCEPT_AA32_DABT):
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
break;
default:
@@ -318,9 +318,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
break;
}
} else {
- switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
- case (KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_EXCEPT_AA64_EL1):
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
break;
default:
@@ -340,12 +339,12 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
*/
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
+ vcpu_clear_flag(vcpu, EXCEPT_MASK);
+ } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ vcpu_clear_flag(vcpu, INCREMENT_PC);
}
}
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index b6d86e423319..edd3eabf520f 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -38,9 +38,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
- KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
__kvm_adjust_pc(vcpu);