diff options
Diffstat (limited to 'arch/x86/kvm/mmu/tdp_mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.c | 1169 |
1 files changed, 773 insertions, 396 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 1db8496259ad..672f0432d777 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -14,28 +14,36 @@ static bool __read_mostly tdp_mmu_enabled = true; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); /* Initializes the TDP MMU for the VM, if enabled. */ -bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) +int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { + struct workqueue_struct *wq; + if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) - return false; + return 0; + + wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); + if (!wq) + return -ENOMEM; /* This should not be changed for the lifetime of the VM. */ kvm->arch.tdp_mmu_enabled = true; - INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); - - return true; + kvm->arch.tdp_mmu_zap_wq = wq; + return 1; } -static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, +/* Arbitrarily returns true so that this may be used in if statements. */ +static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, bool shared) { if (shared) lockdep_assert_held_read(&kvm->mmu_lock); else lockdep_assert_held_write(&kvm->mmu_lock); + + return true; } void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) @@ -43,20 +51,20 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) if (!kvm->arch.tdp_mmu_enabled) return; + /* Also waits for any queued work items. */ + destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); /* * Ensure that all the outstanding RCU callbacks to free shadow pages - * can run before the VM is torn down. + * can run before the VM is torn down. Work items on tdp_mmu_zap_wq + * can call kvm_tdp_mmu_put_root and create new callbacks. */ rcu_barrier(); } -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield, bool flush, - bool shared); - static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) { free_page((unsigned long)sp->spt); @@ -79,6 +87,56 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) tdp_mmu_free_sp(sp); } +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared); + +static void tdp_mmu_zap_root_work(struct work_struct *work) +{ + struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, + tdp_mmu_async_work); + struct kvm *kvm = root->tdp_mmu_async_data; + + read_lock(&kvm->mmu_lock); + + /* + * A TLB flush is not necessary as KVM performs a local TLB flush when + * allocating a new root (see kvm_mmu_load()), and when migrating vCPU + * to a different pCPU. Note, the local TLB flush on reuse also + * invalidates any paging-structure-cache entries, i.e. TLB entries for + * intermediate paging structures, that may be zapped, as such entries + * are associated with the ASID on both VMX and SVM. + */ + tdp_mmu_zap_root(kvm, root, true); + + /* + * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for + * avoiding an infinite loop. By design, the root is reachable while + * it's being asynchronously zapped, thus a different task can put its + * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an + * asynchronously zapped root is unavoidable. + */ + kvm_tdp_mmu_put_root(kvm, root, true); + + read_unlock(&kvm->mmu_lock); +} + +static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) +{ + root->tdp_mmu_async_data = kvm; + INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); + queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); +} + +static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) +{ + union kvm_mmu_page_role role = page->role; + role.invalid = true; + + /* No need to use cmpxchg, only the invalid bit can change. */ + role.word = xchg(&page->role.word, role.word); + return role.invalid; +} + void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared) { @@ -89,25 +147,63 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, WARN_ON(!root->tdp_mmu_page); + /* + * The root now has refcount=0. It is valid, but readers already + * cannot acquire a reference to it because kvm_tdp_mmu_get_root() + * rejects it. This remains true for the rest of the execution + * of this function, because readers visit valid roots only + * (except for tdp_mmu_zap_root_work(), which however + * does not acquire any reference itself). + * + * Even though there are flows that need to visit all roots for + * correctness, they all take mmu_lock for write, so they cannot yet + * run concurrently. The same is true after kvm_tdp_root_mark_invalid, + * since the root still has refcount=0. + * + * However, tdp_mmu_zap_root can yield, and writers do not expect to + * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). + * So the root temporarily gets an extra reference, going to refcount=1 + * while staying invalid. Readers still cannot acquire any reference; + * but writers are now allowed to run if tdp_mmu_zap_root yields and + * they might take an extra reference if they themselves yield. + * Therefore, when the reference is given back by the worker, + * there is no guarantee that the refcount is still 1. If not, whoever + * puts the last reference will free the page, but they will not have to + * zap the root because a root cannot go from invalid to valid. + */ + if (!kvm_tdp_root_mark_invalid(root)) { + refcount_set(&root->tdp_mmu_root_count, 1); + + /* + * Zapping the root in a worker is not just "nice to have"; + * it is required because kvm_tdp_mmu_invalidate_all_roots() + * skips already-invalid roots. If kvm_tdp_mmu_put_root() did + * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() + * might return with some roots not zapped yet. + */ + tdp_mmu_schedule_zap_root(kvm, root); + return; + } + spin_lock(&kvm->arch.tdp_mmu_pages_lock); list_del_rcu(&root->link); spin_unlock(&kvm->arch.tdp_mmu_pages_lock); - - zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); - call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); } /* - * Finds the next valid root after root (or the first valid root if root - * is NULL), takes a reference on it, and returns that next root. If root - * is not NULL, this thread should have already taken a reference on it, and - * that reference will be dropped. If no valid root is found, this - * function will return NULL. + * Returns the next root after @prev_root (or the first root if @prev_root is + * NULL). A reference to the returned root is acquired, and the reference to + * @prev_root is released (the caller obviously must hold a reference to + * @prev_root if it's non-NULL). + * + * If @only_valid is true, invalid roots are skipped. + * + * Returns NULL if the end of tdp_mmu_roots was reached. */ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, struct kvm_mmu_page *prev_root, - bool shared) + bool shared, bool only_valid) { struct kvm_mmu_page *next_root; @@ -121,9 +217,14 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, typeof(*next_root), link); - while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) + while (next_root) { + if ((!only_valid || !next_root->role.invalid) && + kvm_tdp_mmu_get_root(next_root)) + break; + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, &next_root->link, typeof(*next_root), link); + } rcu_read_unlock(); @@ -143,71 +244,91 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, * mode. In the unlikely event that this thread must free a root, the lock * will be temporarily dropped and reacquired in write mode. */ -#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ - for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ - _root; \ - _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ - if (kvm_mmu_page_as_id(_root) != _as_id) { \ - } else - -#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ - list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ - lockdep_is_held_type(&kvm->mmu_lock, 0) || \ - lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ - if (kvm_mmu_page_as_id(_root) != _as_id) { \ +#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ + for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ + _root; \ + _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ + if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \ + kvm_mmu_page_as_id(_root) != _as_id) { \ } else -static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, - int level) -{ - union kvm_mmu_page_role role; +#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ + __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) - role = vcpu->arch.mmu->mmu_role.base; - role.level = level; - role.direct = true; - role.gpte_is_8_bytes = true; - role.access = ACC_ALL; - role.ad_disabled = !shadow_accessed_mask; +#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ + __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) - return role; -} +/* + * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, + * the implication being that any flow that holds mmu_lock for read is + * inherently yield-friendly and should use the yield-safe variant above. + * Holding mmu_lock for write obviates the need for RCU protection as the list + * is guaranteed to be stable. + */ +#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ + list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ + if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ + kvm_mmu_page_as_id(_root) != _as_id) { \ + } else -static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, - int level) +static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); + + return sp; +} + +static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, + gfn_t gfn, union kvm_mmu_page_role role) +{ set_page_private(virt_to_page(sp->spt), (unsigned long)sp); - sp->role.word = page_role_for_level(vcpu, level).word; + sp->role = role; sp->gfn = gfn; + sp->ptep = sptep; sp->tdp_mmu_page = true; trace_kvm_mmu_get_page(sp, true); +} - return sp; +static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, + struct tdp_iter *iter) +{ + struct kvm_mmu_page *parent_sp; + union kvm_mmu_page_role role; + + parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); + + role = parent_sp->role; + role.level--; + + tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); } hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) { - union kvm_mmu_page_role role; + union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; struct kvm *kvm = vcpu->kvm; struct kvm_mmu_page *root; lockdep_assert_held_write(&kvm->mmu_lock); - role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); - - /* Check for an existing root before allocating a new one. */ + /* + * Check for an existing root before allocating a new one. Note, the + * role check prevents consuming an invalid root. + */ for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { if (root->role.word == role.word && - kvm_tdp_mmu_get_root(kvm, root)) + kvm_tdp_mmu_get_root(root)) goto out; } - root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); + root = tdp_mmu_alloc_sp(vcpu); + tdp_mmu_init_sp(root, NULL, 0, role); + refcount_set(&root->tdp_mmu_root_count, 1); spin_lock(&kvm->arch.tdp_mmu_pages_lock); @@ -251,26 +372,18 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, } } -/** - * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU - * - * @kvm: kvm instance - * @sp: the new page - * @account_nx: This page replaces a NX large page and should be marked for - * eventual reclaim. - */ -static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, - bool account_nx) +static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { - spin_lock(&kvm->arch.tdp_mmu_pages_lock); - list_add(&sp->link, &kvm->arch.tdp_mmu_pages); - if (account_nx) - account_huge_nx_page(kvm, sp); - spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + kvm_account_pgtable_pages((void *)sp->spt, +1); +} + +static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + kvm_account_pgtable_pages((void *)sp->spt, -1); } /** - * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU + * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages * * @kvm: kvm instance * @sp: the page to be removed @@ -278,9 +391,10 @@ static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, * the MMU lock and the operation must synchronize with other * threads that might be adding or removing pages. */ -static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, - bool shared) +static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, + bool shared) { + tdp_unaccount_mmu_page(kvm, sp); if (shared) spin_lock(&kvm->arch.tdp_mmu_pages_lock); else @@ -295,7 +409,7 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, } /** - * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure + * handle_removed_pt() - handle a page table removed from the TDP structure * * @kvm: kvm instance * @pt: the page removed from the paging structure @@ -311,8 +425,7 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, * this thread will be responsible for ensuring the page is freed. Hence the * early rcu_dereferences in the function. */ -static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, - bool shared) +static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) { struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); int level = sp->role.level; @@ -321,12 +434,12 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, trace_kvm_mmu_prepare_zap_page(sp); - tdp_mmu_unlink_page(kvm, sp, shared); + tdp_mmu_unlink_sp(kvm, sp, shared); - for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - u64 *sptep = rcu_dereference(pt) + i; + for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { + tdp_ptep_t sptep = pt + i; gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); - u64 old_child_spte; + u64 old_spte; if (shared) { /* @@ -338,8 +451,8 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, * value to the removed SPTE value. */ for (;;) { - old_child_spte = xchg(sptep, REMOVED_SPTE); - if (!is_removed_spte(old_child_spte)) + old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); + if (!is_removed_spte(old_spte)) break; cpu_relax(); } @@ -353,28 +466,45 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, * are guarded by the memslots generation, not by being * unreachable. */ - old_child_spte = READ_ONCE(*sptep); - if (!is_shadow_present_pte(old_child_spte)) + old_spte = kvm_tdp_mmu_read_spte(sptep); + if (!is_shadow_present_pte(old_spte)) continue; /* - * Marking the SPTE as a removed SPTE is not - * strictly necessary here as the MMU lock will - * stop other threads from concurrently modifying - * this SPTE. Using the removed SPTE value keeps - * the two branches consistent and simplifies - * the function. + * Use the common helper instead of a raw WRITE_ONCE as + * the SPTE needs to be updated atomically if it can be + * modified by a different vCPU outside of mmu_lock. + * Even though the parent SPTE is !PRESENT, the TLB + * hasn't yet been flushed, and both Intel and AMD + * document that A/D assists can use upper-level PxE + * entries that are cached in the TLB, i.e. the CPU can + * still access the page and mark it dirty. + * + * No retry is needed in the atomic update path as the + * sole concern is dropping a Dirty bit, i.e. no other + * task can zap/remove the SPTE as mmu_lock is held for + * write. Marking the SPTE as a removed SPTE is not + * strictly necessary for the same reason, but using + * the remove SPTE value keeps the shared/exclusive + * paths consistent and allows the handle_changed_spte() + * call below to hardcode the new value to REMOVED_SPTE. + * + * Note, even though dropping a Dirty bit is the only + * scenario where a non-atomic update could result in a + * functional bug, simply checking the Dirty bit isn't + * sufficient as a fast page fault could read the upper + * level SPTE before it is zapped, and then make this + * target SPTE writable, resume the guest, and set the + * Dirty bit between reading the SPTE above and writing + * it here. */ - WRITE_ONCE(*sptep, REMOVED_SPTE); + old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, + REMOVED_SPTE, level); } handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, - old_child_spte, REMOVED_SPTE, level, - shared); + old_spte, REMOVED_SPTE, level, shared); } - kvm_flush_remote_tlbs_with_address(kvm, base_gfn, - KVM_PAGES_PER_HPAGE(level + 1)); - call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); } @@ -435,6 +565,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); + if (is_leaf) + check_spte_writable_invariants(new_spte); + /* * The only times a SPTE should be changed from a non-present to * non-present state is when an MMIO entry is installed/modified/ @@ -469,11 +602,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, /* * Recursively handle child PTs if the change removed a subtree from - * the paging structure. + * the paging structure. Note the WARN on the PFN changing without the + * SPTE being converted to a hugepage (leaf) or being zapped. Shadow + * pages are kernel allocations and should never be migrated. */ - if (was_present && !was_leaf && (pfn_changed || !is_present)) - handle_removed_tdp_mmu_page(kvm, - spte_to_child_pt(old_spte, level), shared); + if (was_present && !was_leaf && + (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) + handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); } static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, @@ -492,74 +627,88 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * and handle the associated bookkeeping. Do not mark the page dirty * in KVM's dirty bitmaps. * + * If setting the SPTE fails because it has changed, iter->old_spte will be + * refreshed to the current value of the spte. + * * @kvm: kvm instance * @iter: a tdp_iter instance currently on the SPTE that should be set * @new_spte: The value the SPTE should be set to - * Returns: true if the SPTE was set, false if it was not. If false is returned, - * this function will have no side-effects. + * Return: + * * 0 - If the SPTE was set. + * * -EBUSY - If the SPTE cannot be set. In this case this function will have + * no side-effects other than setting iter->old_spte to the last + * known value of the spte. */ -static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) +static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) { - lockdep_assert_held_read(&kvm->mmu_lock); + u64 *sptep = rcu_dereference(iter->sptep); /* - * Do not change removed SPTEs. Only the thread that froze the SPTE - * may modify it. + * The caller is responsible for ensuring the old SPTE is not a REMOVED + * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, + * and pre-checking before inserting a new SPTE is advantageous as it + * avoids unnecessary work. */ - if (is_removed_spte(iter->old_spte)) - return false; + WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); + + lockdep_assert_held_read(&kvm->mmu_lock); /* * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and * does not hold the mmu_lock. */ - if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, - new_spte) != iter->old_spte) - return false; + if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) + return -EBUSY; __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, new_spte, iter->level, true); handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); - return true; + return 0; } -static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, - struct tdp_iter *iter) +static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter) { + int ret; + /* * Freeze the SPTE by setting it to a special, * non-present value. This will stop other threads from * immediately installing a present entry in its place * before the TLBs are flushed. */ - if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) - return false; + ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); + if (ret) + return ret; kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, KVM_PAGES_PER_HPAGE(iter->level)); /* - * No other thread can overwrite the removed SPTE as they - * must either wait on the MMU lock or use - * tdp_mmu_set_spte_atomic which will not overwrite the - * special removed SPTE value. No bookkeeping is needed - * here since the SPTE is going from non-present - * to non-present. + * No other thread can overwrite the removed SPTE as they must either + * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not + * overwrite the special removed SPTE value. No bookkeeping is needed + * here since the SPTE is going from non-present to non-present. Use + * the raw write helper to avoid an unnecessary check on volatile bits. */ - WRITE_ONCE(*rcu_dereference(iter->sptep), 0); + __kvm_tdp_mmu_write_spte(iter->sptep, 0); - return true; + return 0; } /* * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping - * @kvm: kvm instance - * @iter: a tdp_iter instance currently on the SPTE that should be set - * @new_spte: The value the SPTE should be set to + * @kvm: KVM instance + * @as_id: Address space ID, i.e. regular vs. SMM + * @sptep: Pointer to the SPTE + * @old_spte: The current value of the SPTE + * @new_spte: The new value that will be set for the SPTE + * @gfn: The base GFN that was (or will be) mapped by the SPTE + * @level: The level _containing_ the SPTE (its parent PT's level) * @record_acc_track: Notify the MM subsystem of changes to the accessed state * of the page. Should be set unless handling an MMU * notifier for access tracking. Leaving record_acc_track @@ -570,57 +719,71 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, * unless performing certain dirty logging operations. * Leaving record_dirty_log unset in that case prevents page * writes from being double counted. + * + * Returns the old SPTE value, which _may_ be different than @old_spte if the + * SPTE had voldatile bits. */ -static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte, bool record_acc_track, - bool record_dirty_log) +static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, + u64 old_spte, u64 new_spte, gfn_t gfn, int level, + bool record_acc_track, bool record_dirty_log) { lockdep_assert_held_write(&kvm->mmu_lock); /* - * No thread should be using this function to set SPTEs to the + * No thread should be using this function to set SPTEs to or from the * temporary removed SPTE value. * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic * should be used. If operating under the MMU lock in write mode, the * use of the removed SPTE should not be necessary. */ - WARN_ON(is_removed_spte(iter->old_spte)); + WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); - WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); + old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); + + __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); - __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - new_spte, iter->level, false); if (record_acc_track) - handle_changed_spte_acc_track(iter->old_spte, new_spte, - iter->level); + handle_changed_spte_acc_track(old_spte, new_spte, level); if (record_dirty_log) - handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, - iter->old_spte, new_spte, - iter->level); + handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, + new_spte, level); + return old_spte; +} + +static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, + u64 new_spte, bool record_acc_track, + bool record_dirty_log) +{ + WARN_ON_ONCE(iter->yielded); + + iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, + iter->old_spte, new_spte, + iter->gfn, iter->level, + record_acc_track, record_dirty_log); } static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); } static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); } static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); + _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); } #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ - for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) + for_each_tdp_pte(_iter, _root, _start, _end) #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ tdp_root_for_each_pte(_iter, _root, _start, _end) \ @@ -630,8 +793,7 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, else #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ - for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ - _mmu->shadow_root_level, _start, _end) + for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) /* * Yield if the MMU lock is contended or this thread needs to return control @@ -640,28 +802,29 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, * If this function should yield and flush is set, it will perform a remote * TLB flush before yielding. * - * If this function yields, it will also reset the tdp_iter's walk over the - * paging structure and the calling function should skip to the next - * iteration to allow the iterator to continue its traversal from the - * paging structure root. + * If this function yields, iter->yielded is set and the caller must skip to + * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk + * over the paging structures to allow the iterator to continue its traversal + * from the paging structure root. * - * Return true if this function yielded and the iterator's traversal was reset. - * Return false if a yield was not needed. + * Returns true if this function yielded. */ -static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, - struct tdp_iter *iter, bool flush, - bool shared) +static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, + struct tdp_iter *iter, + bool flush, bool shared) { + WARN_ON(iter->yielded); + /* Ensure forward progress has been made before yielding. */ if (iter->next_last_level_gfn == iter->yielded_gfn) return false; if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { - rcu_read_unlock(); - if (flush) kvm_flush_remote_tlbs(kvm); + rcu_read_unlock(); + if (shared) cond_resched_rwlock_read(&kvm->mmu_lock); else @@ -671,208 +834,209 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, WARN_ON(iter->gfn > iter->next_last_level_gfn); - tdp_iter_restart(iter); - - return true; + iter->yielded = true; } - return false; + return iter->yielded; } -/* - * Tears down the mappings for the range of gfns, [start, end), and frees the - * non-root pages mapping GFNs strictly within that range. Returns true if - * SPTEs have been cleared and a TLB flush is needed before releasing the - * MMU lock. - * - * If can_yield is true, will release the MMU lock and reschedule if the - * scheduler needs the CPU or there is contention on the MMU lock. If this - * function cannot yield, it will not release the MMU lock or reschedule and - * the caller must ensure it does not supply too large a GFN range, or the - * operation can cause a soft lockup. - * - * If shared is true, this thread holds the MMU lock in read mode and must - * account for the possibility that other threads are modifying the paging - * structures concurrently. If shared is false, this thread should hold the - * MMU lock in write mode. - */ -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield, bool flush, - bool shared) +static inline gfn_t tdp_mmu_max_gfn_exclusive(void) { - gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - bool zap_all = (start == 0 && end >= max_gfn_host); - struct tdp_iter iter; - - /* - * No need to try to step down in the iterator when zapping all SPTEs, - * zapping the top-level non-leaf SPTEs will recurse on their children. - */ - int min_level = zap_all ? root->role.level : PG_LEVEL_4K; - /* - * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will - * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, - * and so KVM will never install a SPTE for such addresses. + * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with + * a gpa range that would exceed the max gfn, and KVM does not create + * MMIO SPTEs for "impossible" gfns, instead sending such accesses down + * the slow emulation path every time. */ - end = min(end, max_gfn_host); + return kvm_mmu_max_gfn() + 1; +} - kvm_lockdep_assert_mmu_lock_held(kvm, shared); +static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared, int zap_level) +{ + struct tdp_iter iter; - rcu_read_lock(); + gfn_t end = tdp_mmu_max_gfn_exclusive(); + gfn_t start = 0; - for_each_tdp_pte_min_level(iter, root->spt, root->role.level, - min_level, start, end) { + for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { retry: - if (can_yield && - tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { - flush = false; + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) continue; - } if (!is_shadow_present_pte(iter.old_spte)) continue; - /* - * If this is a non-last-level SPTE that covers a larger range - * than should be zapped, continue, and zap the mappings at a - * lower level, except when zapping all SPTEs. - */ - if (!zap_all && - (iter.gfn < start || - iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && - !is_last_spte(iter.old_spte, iter.level)) + if (iter.level > zap_level) continue; - if (!shared) { + if (!shared) tdp_mmu_set_spte(kvm, &iter, 0); - flush = true; - } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { - /* - * The iter must explicitly re-read the SPTE because - * the atomic cmpxchg failed. - */ - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) goto retry; - } } - - rcu_read_unlock(); - return flush; } -/* - * Tears down the mappings for the range of gfns, [start, end), and frees the - * non-root pages mapping GFNs strictly within that range. Returns true if - * SPTEs have been cleared and a TLB flush is needed before releasing the - * MMU lock. - */ -bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, - gfn_t end, bool can_yield, bool flush) +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared) { - struct kvm_mmu_page *root; - for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false) - flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, - false); + /* + * The root must have an elevated refcount so that it's reachable via + * mmu_notifier callbacks, which allows this path to yield and drop + * mmu_lock. When handling an unmap/release mmu_notifier command, KVM + * must drop all references to relevant pages prior to completing the + * callback. Dropping mmu_lock with an unreachable root would result + * in zapping SPTEs after a relevant mmu_notifier callback completes + * and lead to use-after-free as zapping a SPTE triggers "writeback" of + * dirty accessed bits to the SPTE's associated struct page. + */ + WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); - return flush; -} + kvm_lockdep_assert_mmu_lock_held(kvm, shared); -void kvm_tdp_mmu_zap_all(struct kvm *kvm) -{ - bool flush = false; - int i; + rcu_read_lock(); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush); + /* + * To avoid RCU stalls due to recursively removing huge swaths of SPs, + * split the zap into two passes. On the first pass, zap at the 1gb + * level, and then zap top-level SPs on the second pass. "1gb" is not + * arbitrary, as KVM must be able to zap a 1gb shadow page without + * inducing a stall to allow in-place replacement with a 1gb hugepage. + * + * Because zapping a SP recurses on its children, stepping down to + * PG_LEVEL_4K in the iterator itself is unnecessary. + */ + __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); + __tdp_mmu_zap_root(kvm, root, shared, root->role.level); - if (flush) - kvm_flush_remote_tlbs(kvm); + rcu_read_unlock(); } -static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, - struct kvm_mmu_page *prev_root) +bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { - struct kvm_mmu_page *next_root; + u64 old_spte; - if (prev_root) - next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, - &prev_root->link, - typeof(*prev_root), link); - else - next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, - typeof(*next_root), link); + /* + * This helper intentionally doesn't allow zapping a root shadow page, + * which doesn't have a parent page table and thus no associated entry. + */ + if (WARN_ON_ONCE(!sp->ptep)) + return false; - while (next_root && !(next_root->role.invalid && - refcount_read(&next_root->tdp_mmu_root_count))) - next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, - &next_root->link, - typeof(*next_root), link); + old_spte = kvm_tdp_mmu_read_spte(sp->ptep); + if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) + return false; - return next_root; + __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, + sp->gfn, sp->role.level + 1, true, true); + + return true; } /* - * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each - * invalidated root, they will not be freed until this function drops the - * reference. Before dropping that reference, tear down the paging - * structure so that whichever thread does drop the last reference - * only has to do a trivial amount of work. Since the roots are invalid, - * no new SPTEs should be created under them. + * If can_yield is true, will release the MMU lock and reschedule if the + * scheduler needs the CPU or there is contention on the MMU lock. If this + * function cannot yield, it will not release the MMU lock or reschedule and + * the caller must ensure it does not supply too large a GFN range, or the + * operation can cause a soft lockup. */ -void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) +static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, + gfn_t start, gfn_t end, bool can_yield, bool flush) { - struct kvm_mmu_page *next_root; - struct kvm_mmu_page *root; - bool flush = false; + struct tdp_iter iter; - lockdep_assert_held_read(&kvm->mmu_lock); + end = min(end, tdp_mmu_max_gfn_exclusive()); + + lockdep_assert_held_write(&kvm->mmu_lock); rcu_read_lock(); - root = next_invalidated_root(kvm, NULL); + for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { + if (can_yield && + tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { + flush = false; + continue; + } - while (root) { - next_root = next_invalidated_root(kvm, root); + if (!is_shadow_present_pte(iter.old_spte) || + !is_last_spte(iter.old_spte, iter.level)) + continue; - rcu_read_unlock(); + tdp_mmu_set_spte(kvm, &iter, 0); + flush = true; + } - flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); + rcu_read_unlock(); - /* - * Put the reference acquired in - * kvm_tdp_mmu_invalidate_roots - */ - kvm_tdp_mmu_put_root(kvm, root, true); + /* + * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need + * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. + */ + return flush; +} - root = next_root; +/* + * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns + * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or + * more SPTEs were zapped since the MMU lock was last acquired. + */ +bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, + bool can_yield, bool flush) +{ + struct kvm_mmu_page *root; - rcu_read_lock(); - } + for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) + flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); - rcu_read_unlock(); + return flush; +} + +void kvm_tdp_mmu_zap_all(struct kvm *kvm) +{ + struct kvm_mmu_page *root; + int i; + + /* + * Zap all roots, including invalid roots, as all SPTEs must be dropped + * before returning to the caller. Zap directly even if the root is + * also being zapped by a worker. Walking zapped top-level SPTEs isn't + * all that expensive and mmu_lock is already held, which means the + * worker has yielded, i.e. flushing the work instead of zapping here + * isn't guaranteed to be any faster. + * + * A TLB flush is unnecessary, KVM zaps everything if and only the VM + * is being destroyed or the userspace VMM has exited. In both cases, + * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. + */ + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + for_each_tdp_mmu_root_yield_safe(kvm, root, i) + tdp_mmu_zap_root(kvm, root, false); + } +} - if (flush) - kvm_flush_remote_tlbs(kvm); +/* + * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast + * zap" completes. + */ +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) +{ + flush_workqueue(kvm->arch.tdp_mmu_zap_wq); } /* - * Mark each TDP MMU root as invalid so that other threads - * will drop their references and allow the root count to - * go to 0. - * - * Also take a reference on all roots so that this thread - * can do the bulk of the work required to free the roots - * once they are invalidated. Without this reference, a - * vCPU thread might drop the last reference to a root and - * get stuck with tearing down the entire paging structure. + * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that + * is about to be zapped, e.g. in response to a memslots update. The actual + * zapping is performed asynchronously, so a reference is taken on all roots. + * Using a separate workqueue makes it easy to ensure that the destruction is + * performed before the "fast zap" completes, without keeping a separate list + * of invalidated roots; the list is effectively the list of work items in + * the workqueue. * - * Roots which have a zero refcount should be skipped as - * they're already being torn down. - * Already invalid roots should be referenced again so that - * they aren't freed before kvm_tdp_mmu_zap_all_fast is - * done with them. + * Get a reference even if the root is already invalid, the asynchronous worker + * assumes it was gifted a reference to the root it processes. Because mmu_lock + * is held for write, it should be impossible to observe a root with zero refcount, + * i.e. the list of roots cannot be stale. * * This has essentially the same effect for the TDP MMU * as updating mmu_valid_gen does for the shadow MMU. @@ -882,9 +1046,13 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) struct kvm_mmu_page *root; lockdep_assert_held_write(&kvm->mmu_lock); - list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) - if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) + list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { + if (!root->role.invalid && + !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { root->role.invalid = true; + tdp_mmu_schedule_zap_root(kvm, root); + } + } } /* @@ -910,8 +1078,12 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, if (new_spte == iter->old_spte) ret = RET_PF_SPURIOUS; - else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) + else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) return RET_PF_RETRY; + else if (is_shadow_present_pte(iter->old_spte) && + !is_last_spte(iter->old_spte, iter->level)) + kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, + KVM_PAGES_PER_HPAGE(iter->level + 1)); /* * If the page fault was caused by a write but the page is write @@ -925,6 +1097,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ if (unlikely(is_mmio_spte(new_spte))) { + vcpu->stat.pf_mmio_spte_created++; trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, new_spte); ret = RET_PF_EMULATE; @@ -933,17 +1106,49 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, rcu_dereference(iter->sptep)); } - /* - * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be - * consistent with legacy MMU behavior. - */ - if (ret != RET_PF_SPURIOUS) - vcpu->stat.pf_fixed++; - return ret; } /* + * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the + * provided page table. + * + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @sp: The new TDP page table to install. + * @account_nx: True if this page table is being installed to split a + * non-executable huge page. + * @shared: This operation is running under the MMU lock in read mode. + * + * Returns: 0 if the new page table was installed. Non-0 if the page table + * could not be installed (e.g. the atomic compare-exchange failed). + */ +static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_mmu_page *sp, bool account_nx, + bool shared) +{ + u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); + int ret = 0; + + if (shared) { + ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); + if (ret) + return ret; + } else { + tdp_mmu_set_spte(kvm, iter, spte); + } + + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + list_add(&sp->link, &kvm->arch.tdp_mmu_pages); + if (account_nx) + account_huge_nx_page(kvm, sp); + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + tdp_account_mmu_page(kvm, sp); + + return 0; +} + +/* * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. */ @@ -952,8 +1157,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; - u64 *child_pt; - u64 new_spte; int ret; kvm_mmu_hugepage_adjust(vcpu, fault); @@ -976,7 +1179,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) */ if (is_shadow_present_pte(iter.old_spte) && is_large_pte(iter.old_spte)) { - if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) + if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) break; /* @@ -984,10 +1187,13 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * because the new value informs the !present * path below. */ - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep); } if (!is_shadow_present_pte(iter.old_spte)) { + bool account_nx = fault->huge_page_disallowed && + fault->req_level >= iter.level; + /* * If SPTE has been frozen by another thread, just * give up and retry, avoiding unnecessary page table @@ -996,26 +1202,21 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (is_removed_spte(iter.old_spte)) break; - sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1); - child_pt = sp->spt; - - new_spte = make_nonleaf_spte(child_pt, - !shadow_accessed_mask); - - if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) { - tdp_mmu_link_page(vcpu->kvm, sp, - fault->huge_page_disallowed && - fault->req_level >= iter.level); + sp = tdp_mmu_alloc_sp(vcpu); + tdp_mmu_init_child_sp(sp, &iter); - trace_kvm_mmu_get_page(sp, true); - } else { + if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) { tdp_mmu_free_sp(sp); break; } } } - if (iter.level != fault->goal_level) { + /* + * Force the guest to retry the access if the upper level SPTEs aren't + * in place, or if the target leaf SPTE is frozen by another CPU. + */ + if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) { rcu_read_unlock(); return RET_PF_RETRY; } @@ -1029,13 +1230,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush) { - struct kvm_mmu_page *root; - - for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) - flush = zap_gfn_range(kvm, root, range->start, range->end, - range->may_block, flush, false); - - return flush; + return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, + range->end, range->may_block, flush); } typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, @@ -1049,18 +1245,18 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, struct tdp_iter iter; bool ret = false; - rcu_read_lock(); - /* * Don't support rescheduling, none of the MMU notifiers that funnel * into this helper allow blocking; it'd be dead, wasteful code. */ for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) ret |= handler(kvm, &iter, range); - } - rcu_read_unlock(); + rcu_read_unlock(); + } return ret; } @@ -1152,13 +1348,12 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, */ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); - - /* FIXME: return 'flush' instead of flushing here. */ - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); - - return false; + /* + * No need to handle the remote TLB flush under RCU protection, the + * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a + * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). + */ + return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); } /* @@ -1177,8 +1372,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); - for_each_tdp_pte_min_level(iter, root->spt, root->role.level, - min_level, start, end) { + for_each_tdp_pte_min_level(iter, root, min_level, start, end) { retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; @@ -1190,14 +1384,9 @@ retry: new_spte = iter.old_spte & ~PT_WRITABLE_MASK; - if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { - /* - * The iter must explicitly re-read the SPTE because - * the atomic cmpxchg failed. - */ - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) goto retry; - } + spte_set = true; } @@ -1218,13 +1407,197 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, lockdep_assert_held_read(&kvm->mmu_lock); - for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, slot->base_gfn + slot->npages, min_level); return spte_set; } +static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) +{ + struct kvm_mmu_page *sp; + + gfp |= __GFP_ZERO; + + sp = kmem_cache_alloc(mmu_page_header_cache, gfp); + if (!sp) + return NULL; + + sp->spt = (void *)__get_free_page(gfp); + if (!sp->spt) { + kmem_cache_free(mmu_page_header_cache, sp); + return NULL; + } + + return sp; +} + +static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, + struct tdp_iter *iter, + bool shared) +{ + struct kvm_mmu_page *sp; + + /* + * Since we are allocating while under the MMU lock we have to be + * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct + * reclaim and to avoid making any filesystem callbacks (which can end + * up invoking KVM MMU notifiers, resulting in a deadlock). + * + * If this allocation fails we drop the lock and retry with reclaim + * allowed. + */ + sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); + if (sp) + return sp; + + rcu_read_unlock(); + + if (shared) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); + + iter->yielded = true; + sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); + + if (shared) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); + + rcu_read_lock(); + + return sp; +} + +static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_mmu_page *sp, bool shared) +{ + const u64 huge_spte = iter->old_spte; + const int level = iter->level; + int ret, i; + + tdp_mmu_init_child_sp(sp, iter); + + /* + * No need for atomics when writing to sp->spt since the page table has + * not been linked in yet and thus is not reachable from any other CPU. + */ + for (i = 0; i < SPTE_ENT_PER_PAGE; i++) + sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); + + /* + * Replace the huge spte with a pointer to the populated lower level + * page table. Since we are making this change without a TLB flush vCPUs + * will see a mix of the split mappings and the original huge mapping, + * depending on what's currently in their TLB. This is fine from a + * correctness standpoint since the translation will be the same either + * way. + */ + ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared); + if (ret) + goto out; + + /* + * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we + * are overwriting from the page stats. But we have to manually update + * the page stats with the new present child pages. + */ + kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); + +out: + trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); + return ret; +} + +static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, + struct kvm_mmu_page *root, + gfn_t start, gfn_t end, + int target_level, bool shared) +{ + struct kvm_mmu_page *sp = NULL; + struct tdp_iter iter; + int ret = 0; + + rcu_read_lock(); + + /* + * Traverse the page table splitting all huge pages above the target + * level into one lower level. For example, if we encounter a 1GB page + * we split it into 512 2MB pages. + * + * Since the TDP iterator uses a pre-order traversal, we are guaranteed + * to visit an SPTE before ever visiting its children, which means we + * will correctly recursively split huge pages that are more than one + * level above the target level (e.g. splitting a 1GB to 512 2MB pages, + * and then splitting each of those to 512 4KB pages). + */ + for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { +retry: + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) + continue; + + if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) + continue; + + if (!sp) { + sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); + if (!sp) { + ret = -ENOMEM; + trace_kvm_mmu_split_huge_page(iter.gfn, + iter.old_spte, + iter.level, ret); + break; + } + + if (iter.yielded) + continue; + } + + if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) + goto retry; + + sp = NULL; + } + + rcu_read_unlock(); + + /* + * It's possible to exit the loop having never used the last sp if, for + * example, a vCPU doing HugePage NX splitting wins the race and + * installs its own sp in place of the last sp we tried to split. + */ + if (sp) + tdp_mmu_free_sp(sp); + + return ret; +} + + +/* + * Try to split all huge pages mapped by the TDP MMU down to the target level. + */ +void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, + const struct kvm_memory_slot *slot, + gfn_t start, gfn_t end, + int target_level, bool shared) +{ + struct kvm_mmu_page *root; + int r = 0; + + kvm_lockdep_assert_mmu_lock_held(kvm, shared); + + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { + r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); + if (r) { + kvm_tdp_mmu_put_root(kvm, root, shared); + break; + } + } +} + /* * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. @@ -1246,6 +1619,9 @@ retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; + if (!is_shadow_present_pte(iter.old_spte)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -1258,14 +1634,9 @@ retry: continue; } - if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { - /* - * The iter must explicitly re-read the SPTE because - * the atomic cmpxchg failed. - */ - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) goto retry; - } + spte_set = true; } @@ -1288,7 +1659,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, lockdep_assert_held_read(&kvm->mmu_lock); - for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, slot->base_gfn + slot->npages); @@ -1358,10 +1729,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); } -/* - * Clear leaf entries which could be replaced by large mappings, for - * GFNs within the slot. - */ static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, const struct kvm_memory_slot *slot) @@ -1369,42 +1736,53 @@ static void zap_collapsible_spte_range(struct kvm *kvm, gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; - kvm_pfn_t pfn; + int max_mapping_level; rcu_read_lock(); - tdp_root_for_each_pte(iter, root, start, end) { + for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; - if (!is_shadow_present_pte(iter.old_spte) || - !is_last_spte(iter.old_spte, iter.level)) + if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || + !is_shadow_present_pte(iter.old_spte)) + continue; + + /* + * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with + * a large page size, then its parent would have been zapped + * instead of stepping down. + */ + if (is_last_spte(iter.old_spte, iter.level)) + continue; + + /* + * If iter.gfn resides outside of the slot, i.e. the page for + * the current level overlaps but is not contained by the slot, + * then the SPTE can't be made huge. More importantly, trying + * to query that info from slot->arch.lpage_info will cause an + * out-of-bounds access. + */ + if (iter.gfn < start || iter.gfn >= end) continue; - pfn = spte_to_pfn(iter.old_spte); - if (kvm_is_reserved_pfn(pfn) || - iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, - pfn, PG_LEVEL_NUM)) + max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, + iter.gfn, PG_LEVEL_NUM); + if (max_mapping_level < iter.level) continue; /* Note, a successful atomic zap also does a remote TLB flush. */ - if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { - /* - * The iter must explicitly re-read the SPTE because - * the atomic cmpxchg failed. - */ - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); + if (tdp_mmu_zap_spte_atomic(kvm, &iter)) goto retry; - } } rcu_read_unlock(); } /* - * Clear non-leaf entries (and free associated page tables) which could - * be replaced by large mappings, for GFNs within the slot. + * Zap non-leaf SPTEs (and free their associated page tables) which could + * be replaced by huge pages, for GFNs within the slot. */ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) @@ -1413,7 +1791,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, lockdep_assert_held_read(&kvm->mmu_lock); - for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) zap_collapsible_spte_range(kvm, root, slot); } @@ -1433,18 +1811,17 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, rcu_read_lock(); - for_each_tdp_pte_min_level(iter, root->spt, root->role.level, - min_level, gfn, gfn + 1) { + for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; - if (!is_writable_pte(iter.old_spte)) - break; - new_spte = iter.old_spte & ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); + if (new_spte == iter.old_spte) + break; + tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; } @@ -1487,7 +1864,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, gfn_t gfn = addr >> PAGE_SHIFT; int leaf = -1; - *root_level = vcpu->arch.mmu->shadow_root_level; + *root_level = vcpu->arch.mmu->root_role.level; tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { leaf = iter.level; |