diff options
author | 2022-02-10 07:33:26 -0500 | |
---|---|---|
committer | 2022-04-29 12:49:55 -0400 | |
commit | f417e1459a1299bb4984cbffc03d8746cab9f8a8 (patch) | |
tree | 2b3f80de3b519e4cd336752bb789e5929bae917a | |
parent | KVM: x86/mmu: cleanup computation of MMU roles for two-dimensional paging (diff) | |
download | linux-dev-f417e1459a1299bb4984cbffc03d8746cab9f8a8.tar.xz linux-dev-f417e1459a1299bb4984cbffc03d8746cab9f8a8.zip |
KVM: x86/mmu: cleanup computation of MMU roles for shadow paging
Pass the already-computed CPU role, instead of redoing it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0b97aee6b735..3f4777849040 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4846,15 +4846,14 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role cpu_role) { - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role role; role = cpu_role; - if (!____is_efer_lma(regs)) + if (!cpu_role.ext.efer_lma) role.base.level = PT32E_ROOT_LEVEL; - else if (____is_cr4_la57(regs)) + else if (cpu_role.ext.cr4_la57) role.base.level = PT64_ROOT_5LEVEL; else role.base.level = PT64_ROOT_4LEVEL; @@ -4893,16 +4892,15 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context = &vcpu->arch.root_mmu; union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role mmu_role = - kvm_calc_shadow_mmu_root_page_role(vcpu, regs); + kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role); shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role); } static union kvm_mmu_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role cpu_role) { - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role role; WARN_ON_ONCE(cpu_role.base.direct); @@ -4922,7 +4920,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, .efer = efer, }; union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s); - union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, ®s); + union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role); shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role); kvm_mmu_new_pgd(vcpu, nested_cr3); |