All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] KVM: SEV: Mark nested locking of vcpu->lock
@ 2022-05-02 16:58 Peter Gonda
  2022-05-02 17:45 ` Paolo Bonzini
  0 siblings, 1 reply; 2+ messages in thread
From: Peter Gonda @ 2022-05-02 16:58 UTC (permalink / raw)
  To: kvm
  Cc: Peter Gonda, John Sperbeck, David Rientjes, Sean Christopherson,
	Paolo Bonzini, Hillf Danton, linux-kernel

svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all
source and target vcpu->locks. Unfortunately there is an 8 subclass
limit, so a new subclass cannot be used for each vCPU. Instead maintain
ownership of the first vcpu's mutex.dep_map using a role specific
subclass: source vs target. Release the other vcpu's mutex.dep_maps.

Fixes: b56639318bb2b ("KVM: SEV: Add support for SEV intra host migration")
Reported-by: John Sperbeck<jsperbeck@google.com>
Suggested-by: David Rientjes <rientjes@google.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Peter Gonda <pgonda@google.com>

---

V4
 * Due to 8 subclass limit keep dep_map on only the first vcpu and
   release the others.

V3
 * Updated signature to enum to self-document argument.
 * Updated comment as Seanjc@ suggested.

Tested by running sev_migrate_tests with lockdep enabled. Before we see
a warning from sev_lock_vcpus_for_migration(). After we get no warnings.

---
 arch/x86/kvm/svm/sev.c | 46 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 75fa6dd268f0..0239def64eaa 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1591,24 +1591,55 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 	atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
+/*
+ * To suppress lockdep false positives, subclass all vCPU mutex locks by
+ * assigning even numbers to the source vCPUs and odd numbers to destination
+ * vCPUs based on the vCPU's index.
+ */
+enum sev_migration_role {
+	SEV_MIGRATION_SOURCE = 0,
+	SEV_MIGRATION_TARGET,
+	SEV_NR_MIGRATION_ROLES,
+};
 
-static int sev_lock_vcpus_for_migration(struct kvm *kvm)
+static int sev_lock_vcpus_for_migration(struct kvm *kvm,
+					enum sev_migration_role role)
 {
 	struct kvm_vcpu *vcpu;
 	unsigned long i, j;
+	bool first = true;
 
 	kvm_for_each_vcpu(i, vcpu, kvm) {
-		if (mutex_lock_killable(&vcpu->mutex))
+		if (mutex_lock_killable_nested(&vcpu->mutex, role))
 			goto out_unlock;
+
+		if (first) {
+			/*
+			 * Reset the role to one that avoids colliding with
+			 * the role used for the first vcpu mutex.
+			 */
+			role = SEV_NR_MIGRATION_ROLES;
+			first = false;
+		} else {
+			mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
+		}
 	}
 
 	return 0;
 
 out_unlock:
+
+	first = true;
 	kvm_for_each_vcpu(j, vcpu, kvm) {
 		if (i == j)
 			break;
 
+		if (first)
+			first = false;
+		else
+			mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
+
+
 		mutex_unlock(&vcpu->mutex);
 	}
 	return -EINTR;
@@ -1618,8 +1649,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
 {
 	struct kvm_vcpu *vcpu;
 	unsigned long i;
+	bool first = true;
 
 	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (first)
+			first = false;
+		else
+			mutex_acquire(&vcpu->mutex.dep_map,
+				      SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
+
 		mutex_unlock(&vcpu->mutex);
 	}
 }
@@ -1745,10 +1783,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
 		charged = true;
 	}
 
-	ret = sev_lock_vcpus_for_migration(kvm);
+	ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
 	if (ret)
 		goto out_dst_cgroup;
-	ret = sev_lock_vcpus_for_migration(source_kvm);
+	ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
 	if (ret)
 		goto out_dst_vcpu;
 
-- 
2.36.0.464.gb9c8b46e94-goog


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v4] KVM: SEV: Mark nested locking of vcpu->lock
  2022-05-02 16:58 [PATCH v4] KVM: SEV: Mark nested locking of vcpu->lock Peter Gonda
@ 2022-05-02 17:45 ` Paolo Bonzini
  0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2022-05-02 17:45 UTC (permalink / raw)
  To: Peter Gonda, kvm
  Cc: John Sperbeck, David Rientjes, Sean Christopherson, Hillf Danton,
	linux-kernel

On 5/2/22 18:58, Peter Gonda wrote:
> svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all
> source and target vcpu->locks. Unfortunately there is an 8 subclass
> limit, so a new subclass cannot be used for each vCPU. Instead maintain
> ownership of the first vcpu's mutex.dep_map using a role specific
> subclass: source vs target. Release the other vcpu's mutex.dep_maps.
> 
> Fixes: b56639318bb2b ("KVM: SEV: Add support for SEV intra host migration")
> Reported-by: John Sperbeck<jsperbeck@google.com>
> Suggested-by: David Rientjes <rientjes@google.com>
> Suggested-by: Sean Christopherson <seanjc@google.com>
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Hillf Danton <hdanton@sina.com>
> Cc: kvm@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Peter Gonda <pgonda@google.com>

Looks good, thanks!

Paolo

> ---
> 
> V4
>   * Due to 8 subclass limit keep dep_map on only the first vcpu and
>     release the others.
> 
> V3
>   * Updated signature to enum to self-document argument.
>   * Updated comment as Seanjc@ suggested.
> 
> Tested by running sev_migrate_tests with lockdep enabled. Before we see
> a warning from sev_lock_vcpus_for_migration(). After we get no warnings.
> 
> ---
>   arch/x86/kvm/svm/sev.c | 46 ++++++++++++++++++++++++++++++++++++++----
>   1 file changed, 42 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 75fa6dd268f0..0239def64eaa 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1591,24 +1591,55 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
>   	atomic_set_release(&src_sev->migration_in_progress, 0);
>   }
>   
> +/*
> + * To suppress lockdep false positives, subclass all vCPU mutex locks by
> + * assigning even numbers to the source vCPUs and odd numbers to destination
> + * vCPUs based on the vCPU's index.
> + */
> +enum sev_migration_role {
> +	SEV_MIGRATION_SOURCE = 0,
> +	SEV_MIGRATION_TARGET,
> +	SEV_NR_MIGRATION_ROLES,
> +};
>   
> -static int sev_lock_vcpus_for_migration(struct kvm *kvm)
> +static int sev_lock_vcpus_for_migration(struct kvm *kvm,
> +					enum sev_migration_role role)
>   {
>   	struct kvm_vcpu *vcpu;
>   	unsigned long i, j;
> +	bool first = true;
>   
>   	kvm_for_each_vcpu(i, vcpu, kvm) {
> -		if (mutex_lock_killable(&vcpu->mutex))
> +		if (mutex_lock_killable_nested(&vcpu->mutex, role))
>   			goto out_unlock;
> +
> +		if (first) {
> +			/*
> +			 * Reset the role to one that avoids colliding with
> +			 * the role used for the first vcpu mutex.
> +			 */
> +			role = SEV_NR_MIGRATION_ROLES;
> +			first = false;
> +		} else {
> +			mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
> +		}
>   	}
>   
>   	return 0;
>   
>   out_unlock:
> +
> +	first = true;
>   	kvm_for_each_vcpu(j, vcpu, kvm) {
>   		if (i == j)
>   			break;
>   
> +		if (first)
> +			first = false;
> +		else
> +			mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
> +
> +
>   		mutex_unlock(&vcpu->mutex);
>   	}
>   	return -EINTR;
> @@ -1618,8 +1649,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
>   {
>   	struct kvm_vcpu *vcpu;
>   	unsigned long i;
> +	bool first = true;
>   
>   	kvm_for_each_vcpu(i, vcpu, kvm) {
> +		if (first)
> +			first = false;
> +		else
> +			mutex_acquire(&vcpu->mutex.dep_map,
> +				      SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
> +
>   		mutex_unlock(&vcpu->mutex);
>   	}
>   }
> @@ -1745,10 +1783,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
>   		charged = true;
>   	}
>   
> -	ret = sev_lock_vcpus_for_migration(kvm);
> +	ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
>   	if (ret)
>   		goto out_dst_cgroup;
> -	ret = sev_lock_vcpus_for_migration(source_kvm);
> +	ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
>   	if (ret)
>   		goto out_dst_vcpu;
>   


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-05-02 17:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-02 16:58 [PATCH v4] KVM: SEV: Mark nested locking of vcpu->lock Peter Gonda
2022-05-02 17:45 ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.