aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2022-06-22 15:27:08 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2022-06-24 04:52:00 -0400
commit837f66c71207542283831d0762c5dca3db5b397a (patch)
tree3676dcfc4354048b5eb61133a5bf72081b7dc2d8 /virt/kvm
parentKVM: x86/mmu: pull call to drop_large_spte() into __link_shadow_page() (diff)
downloadlinux-dev-837f66c71207542283831d0762c5dca3db5b397a.tar.xz
linux-dev-837f66c71207542283831d0762c5dca3db5b397a.zip
KVM: Allow for different capacities in kvm_mmu_memory_cache structs
Allow the capacity of the kvm_mmu_memory_cache struct to be chosen at declaration time rather than being fixed for all declarations. This will be used in a follow-up commit to declare an cache in x86 with a capacity of 512+ objects without having to increase the capacity of all caches in KVM. This change requires each cache now specify its capacity at runtime, since the cache struct itself no longer has a fixed capacity known at compile time. To protect against someone accidentally defining a kvm_mmu_memory_cache struct directly (without the extra storage), this commit includes a WARN_ON() in kvm_mmu_topup_memory_cache(). In order to support different capacities, this commit changes the objects pointer array to be dynamically allocated the first time the cache is topped-up. While here, opportunistically clean up the stack-allocated kvm_mmu_memory_cache structs in riscv and arm64 to use designated initializers. No functional change intended. Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: David Matlack <dmatlack@google.com> Message-Id: <20220516232138.1783324-22-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5b8ae83e09d7..45188d11812c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -396,14 +396,31 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
return (void *)__get_free_page(gfp_flags);
}
-int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
{
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
void *obj;
if (mc->nobjs >= min)
return 0;
- while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
- obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
+
+ if (unlikely(!mc->objects)) {
+ if (WARN_ON_ONCE(!capacity))
+ return -EIO;
+
+ mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
+ if (!mc->objects)
+ return -ENOMEM;
+
+ mc->capacity = capacity;
+ }
+
+ /* It is illegal to request a different capacity across topups. */
+ if (WARN_ON_ONCE(mc->capacity != capacity))
+ return -EIO;
+
+ while (mc->nobjs < mc->capacity) {
+ obj = mmu_memory_cache_alloc_obj(mc, gfp);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
@@ -411,6 +428,11 @@ int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
return 0;
}
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
+{
+ return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
+}
+
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
{
return mc->nobjs;
@@ -424,6 +446,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
else
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
+
+ kvfree(mc->objects);
+
+ mc->objects = NULL;
+ mc->capacity = 0;
}
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)