From e8c22266e68f0db2a7e11b0a9f29fd88ec0cfd4a Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 15 Jun 2020 14:13:34 +0200 Subject: KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/ kvm_arch_setup_async_pf() return '1' when a job to handle page fault asynchronously was scheduled and '0' otherwise. To avoid the confusion change return type to 'bool'. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20200615121334.91300-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- virt/kvm/async_pf.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'virt') diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 45799606bb3e..390f758d5a27 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) } } -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch) +/* + * Try to schedule a job to handle page fault asynchronously. Returns 'true' on + * success, 'false' on failure (page fault has to be handled synchronously). + */ +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) - return 0; + return false; /* Arch specific code should not do async PF in this case */ if (unlikely(kvm_is_error_hva(hva))) - return 0; + return false; /* * do alloc nowait since if we are going to sleep anyway we @@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); if (!work) - return 0; + return false; work->wakeup_all = false; work->vcpu = vcpu; @@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, schedule_work(&work->work); - return 1; + return true; } int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) -- cgit v1.2.3-59-g8ed1b From 995decb6c43e1d6e9d6a7d590471f2eea74600f4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 8 Jul 2020 16:00:23 +0200 Subject: KVM: x86: take as_id into account when checking PGD OVMF booted guest running on shadow pages crashes on TRIPLE FAULT after enabling paging from SMM. The crash is triggered from mmu_check_root() and is caused by kvm_is_visible_gfn() searching through memslots with as_id = 0 while vCPU may be in a different context (address space). Introduce kvm_vcpu_is_visible_gfn() and use it from mmu_check_root(). Signed-off-by: Vitaly Kuznetsov Message-Id: <20200708140023.1476020-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 8 ++++++++ 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0011b2c97f65..231beb6d9cf7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3693,7 +3693,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) { int ret = 0; - if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { + if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); ret = 1; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9edc6fc71a89..87140e79648b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -774,6 +774,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0a68c9d3d3ab..b528a59b0a84 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1626,6 +1626,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return kvm_is_visible_memslot(memslot); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); + unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) { struct vm_area_struct *vma; -- cgit v1.2.3-59-g8ed1b From 6926f95accee3f8ceb5f69dbecd880687028ae70 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Jul 2020 19:35:39 -0700 Subject: KVM: Move x86's MMU memory cache helpers to common KVM code Move x86's memory cache helpers to common KVM code so that they can be reused by arm64 and MIPS in future patches. Suggested-by: Christoffer Dall Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20200703023545.8771-16-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 53 ---------------------------------------------- include/linux/kvm_host.h | 7 ++++++ virt/kvm/kvm_main.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 53 deletions(-) (limited to 'virt') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 942b6a90cb17..fa506aaaf019 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1061,47 +1061,6 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) local_irq_enable(); } -static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, - gfp_t gfp_flags) -{ - gfp_flags |= mc->gfp_zero; - - if (mc->kmem_cache) - return kmem_cache_alloc(mc->kmem_cache, gfp_flags); - else - return (void *)__get_free_page(gfp_flags); -} - -static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) -{ - void *obj; - - if (mc->nobjs >= min) - return 0; - while (mc->nobjs < ARRAY_SIZE(mc->objects)) { - obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); - if (!obj) - return mc->nobjs >= min ? 0 : -ENOMEM; - mc->objects[mc->nobjs++] = obj; - } - return 0; -} - -static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) -{ - return mc->nobjs; -} - -static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) { - if (mc->kmem_cache) - kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); - else - free_page((unsigned long)mc->objects[--mc->nobjs]); - } -} - static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; @@ -1133,18 +1092,6 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } -static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - if (WARN_ON(!mc->nobjs)) - p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); - else - p = mc->objects[--mc->nobjs]; - BUG_ON(!p); - return p; -} - static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) { return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 87140e79648b..989afcbe642f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -817,6 +817,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); +#endif + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b528a59b0a84..2c2c0254c2d8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -340,6 +340,61 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, + gfp_t gfp_flags) +{ + gfp_flags |= mc->gfp_zero; + + if (mc->kmem_cache) + return kmem_cache_alloc(mc->kmem_cache, gfp_flags); + else + return (void *)__get_free_page(gfp_flags); +} + +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) +{ + void *obj; + + if (mc->nobjs >= min) + return 0; + while (mc->nobjs < ARRAY_SIZE(mc->objects)) { + obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); + if (!obj) + return mc->nobjs >= min ? 0 : -ENOMEM; + mc->objects[mc->nobjs++] = obj; + } + return 0; +} + +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) +{ + return mc->nobjs; +} + +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) { + if (mc->kmem_cache) + kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); + else + free_page((unsigned long)mc->objects[--mc->nobjs]); + } +} + +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + if (WARN_ON(!mc->nobjs)) + p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); + else + p = mc->objects[--mc->nobjs]; + BUG_ON(!p); + return p; +} +#endif + static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { mutex_init(&vcpu->mutex); -- cgit v1.2.3-59-g8ed1b