aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/x86/kvm/svm/sev.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-06-13 21:42:37 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2022-06-15 08:07:22 -0400
commite5380f6d7586ea3ef3a55d8cf19ceffacea31392 (patch)
tree4fc03286865c8ccaffba5a60d485b95385248258 /arch/x86/kvm/svm/sev.c
parentKVM: SEV: fix misplaced closing parenthesis (diff)
downloadwireguard-linux-e5380f6d7586ea3ef3a55d8cf19ceffacea31392.tar.xz
wireguard-linux-e5380f6d7586ea3ef3a55d8cf19ceffacea31392.zip
KVM: SVM: Hide SEV migration lockdep goo behind CONFIG_PROVE_LOCKING
Wrap the manipulation of @role and the manual mutex_{release,acquire}() invocations in CONFIG_PROVE_LOCKING=y to squash a clang-15 warning. When building with -Wunused-but-set-parameter and CONFIG_DEBUG_LOCK_ALLOC=n, clang-15 seees there's no usage of @role in mutex_lock_killable_nested() and yells. PROVE_LOCKING selects DEBUG_LOCK_ALLOC, and the only reason KVM manipulates @role is to make PROVE_LOCKING happy. To avoid true ugliness, use "i" and "j" to detect the first pass in the loops; the "idx" field that's used by kvm_for_each_vcpu() is guaranteed to be '0' on the first pass as it's simply the first entry in the vCPUs XArray, which is fully KVM controlled. kvm_for_each_vcpu() passes '0' for xa_for_each_range()'s "start", and xa_for_each_range() will not enter the loop if there's no entry at '0'. Fixes: 0c2c7c069285 ("KVM: SEV: Mark nested locking of vcpu->lock") Reported-by: kernel test robot <lkp@intel.com> Cc: Peter Gonda <pgonda@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220613214237.2538266-1-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm/sev.c')
-rw-r--r--arch/x86/kvm/svm/sev.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 51fd985cf21d..309bcdb2f929 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1606,38 +1606,35 @@ static int sev_lock_vcpus_for_migration(struct kvm *kvm,
{
struct kvm_vcpu *vcpu;
unsigned long i, j;
- bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock;
- if (first) {
+#ifdef CONFIG_PROVE_LOCKING
+ if (!i)
/*
* Reset the role to one that avoids colliding with
* the role used for the first vcpu mutex.
*/
role = SEV_NR_MIGRATION_ROLES;
- first = false;
- } else {
+ else
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
- }
+#endif
}
return 0;
out_unlock:
- first = true;
kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;
- if (first)
- first = false;
- else
+#ifdef CONFIG_PROVE_LOCKING
+ if (j)
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-
+#endif
mutex_unlock(&vcpu->mutex);
}