OSDN Git Service

KVM: SEV: Mark nested locking of vcpu->lock
authorPeter Gonda <pgonda@google.com>
Mon, 2 May 2022 16:58:07 +0000 (09:58 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 6 May 2022 17:08:04 +0000 (13:08 -0400)
svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all
source and target vcpu->locks. Unfortunately there is an 8 subclass
limit, so a new subclass cannot be used for each vCPU. Instead maintain
ownership of the first vcpu's mutex.dep_map using a role specific
subclass: source vs target. Release the other vcpu's mutex.dep_maps.

Fixes: b56639318bb2b ("KVM: SEV: Add support for SEV intra host migration")
Reported-by: John Sperbeck<jsperbeck@google.com>
Suggested-by: David Rientjes <rientjes@google.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Peter Gonda <pgonda@google.com>
Message-Id: <20220502165807.529624-1-pgonda@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/sev.c

index 0ad70c1..7c39287 100644 (file)
@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
        atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
+/* vCPU mutex subclasses.  */
+enum sev_migration_role {
+       SEV_MIGRATION_SOURCE = 0,
+       SEV_MIGRATION_TARGET,
+       SEV_NR_MIGRATION_ROLES,
+};
 
-static int sev_lock_vcpus_for_migration(struct kvm *kvm)
+static int sev_lock_vcpus_for_migration(struct kvm *kvm,
+                                       enum sev_migration_role role)
 {
        struct kvm_vcpu *vcpu;
        unsigned long i, j;
+       bool first = true;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (mutex_lock_killable(&vcpu->mutex))
+               if (mutex_lock_killable_nested(&vcpu->mutex, role))
                        goto out_unlock;
+
+               if (first) {
+                       /*
+                        * Reset the role to one that avoids colliding with
+                        * the role used for the first vcpu mutex.
+                        */
+                       role = SEV_NR_MIGRATION_ROLES;
+                       first = false;
+               } else {
+                       mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
+               }
        }
 
        return 0;
 
 out_unlock:
+
+       first = true;
        kvm_for_each_vcpu(j, vcpu, kvm) {
                if (i == j)
                        break;
 
+               if (first)
+                       first = false;
+               else
+                       mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
+
+
                mutex_unlock(&vcpu->mutex);
        }
        return -EINTR;
@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;
        unsigned long i;
+       bool first = true;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (first)
+                       first = false;
+               else
+                       mutex_acquire(&vcpu->mutex.dep_map,
+                                     SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
+
                mutex_unlock(&vcpu->mutex);
        }
 }
@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
                charged = true;
        }
 
-       ret = sev_lock_vcpus_for_migration(kvm);
+       ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
        if (ret)
                goto out_dst_cgroup;
-       ret = sev_lock_vcpus_for_migration(source_kvm);
+       ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
        if (ret)
                goto out_dst_vcpu;