kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/2] kvm/vfio: ensure kvg instance stays around in kvm_vfio_group_add()
@ 2023-07-14 18:37 Dmitry Torokhov
  2023-07-14 18:37 ` [PATCH v2 2/2] kvm/vfio: avoid bouncing the mutex when adding and deleting groups Dmitry Torokhov
  0 siblings, 1 reply; 3+ messages in thread
From: Dmitry Torokhov @ 2023-07-14 18:37 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Alex Williamson, Greg KH, Sean Christopherson, Roxana Bradescu,
	kvm, linux-kernel

kvm_vfio_group_add() creates kvg instance, links it to kv->group_list,
and calls kvm_vfio_file_set_kvm() with kvg->file as an argument after
dropping kv->lock. If we race group addition and deletion calls, kvg
instance may get freed by the time we get around to calling
kvm_vfio_file_set_kvm().

Previous iterations of the code did not reference kvg->file outside of
the critical section, but used a temporary variable. Still, they had
similar problem of the file reference being owned by kvg structure and
potential for kvm_vfio_group_del() dropping it before
kvm_vfio_group_add() had a chance to complete.

Fix this by moving call to kvm_vfio_file_set_kvm() under the protection
of kv->lock. We already call it while holding the same lock when vfio
group is being deleted, so it should be safe here as well.

Fixes: 2fc1bec15883 ("kvm: set/clear kvm to/from vfio_group when group add/delete")
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
---

v2: updated commit description with the correct "Fixes" tag (per Alex),
    expanded commit description to mention issues with the earlier
    implementation of kvm_vfio_group_add().

 virt/kvm/vfio.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 9584eb57e0ed..cd46d7ef98d6 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -179,10 +179,10 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
 	list_add_tail(&kvg->node, &kv->group_list);
 
 	kvm_arch_start_assignment(dev->kvm);
+	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
 
 	mutex_unlock(&kv->lock);
 
-	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
 	kvm_vfio_update_coherency(dev);
 
 	return 0;
-- 
2.41.0.255.g8b1d071c50-goog


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH v2 2/2] kvm/vfio: avoid bouncing the mutex when adding and deleting groups
  2023-07-14 18:37 [PATCH v2 1/2] kvm/vfio: ensure kvg instance stays around in kvm_vfio_group_add() Dmitry Torokhov
@ 2023-07-14 18:37 ` Dmitry Torokhov
  2023-07-14 19:31   ` Alex Williamson
  0 siblings, 1 reply; 3+ messages in thread
From: Dmitry Torokhov @ 2023-07-14 18:37 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Alex Williamson, Greg KH, Sean Christopherson, Roxana Bradescu,
	kvm, linux-kernel

Stop taking kv->lock mutex in kvm_vfio_update_coherency() and instead
call it with this mutex held: the callers of the function usually
already have it taken (and released) before calling
kvm_vfio_update_coherency(). This avoid bouncing the lock up and down.

The exception is kvm_vfio_release() where we do not take the lock, but
it is being executed when the very last reference to kvm_device is being
dropped, so there are no concerns about concurrency.

Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
---

v2: new patch.

 virt/kvm/vfio.c | 25 +++++++++----------------
 1 file changed, 9 insertions(+), 16 deletions(-)

diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index cd46d7ef98d6..9868e7ccb5fb 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -122,8 +122,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
 	bool noncoherent = false;
 	struct kvm_vfio_group *kvg;
 
-	mutex_lock(&kv->lock);
-
 	list_for_each_entry(kvg, &kv->group_list, node) {
 		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
 			noncoherent = true;
@@ -139,8 +137,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
 		else
 			kvm_arch_unregister_noncoherent_dma(dev->kvm);
 	}
-
-	mutex_unlock(&kv->lock);
 }
 
 static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
@@ -157,7 +153,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
 	/* Ensure the FD is a vfio group FD.*/
 	if (!kvm_vfio_file_is_group(filp)) {
 		ret = -EINVAL;
-		goto err_fput;
+		goto out_fput;
 	}
 
 	mutex_lock(&kv->lock);
@@ -165,30 +161,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
 	list_for_each_entry(kvg, &kv->group_list, node) {
 		if (kvg->file == filp) {
 			ret = -EEXIST;
-			goto err_unlock;
+			goto out_unlock;
 		}
 	}
 
 	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
 	if (!kvg) {
 		ret = -ENOMEM;
-		goto err_unlock;
+		goto out_unlock;
 	}
 
-	kvg->file = filp;
+	kvg->file = get_file(filp);
 	list_add_tail(&kvg->node, &kv->group_list);
 
 	kvm_arch_start_assignment(dev->kvm);
 	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
-
-	mutex_unlock(&kv->lock);
-
 	kvm_vfio_update_coherency(dev);
 
-	return 0;
-err_unlock:
+	ret = 0;
+out_unlock:
 	mutex_unlock(&kv->lock);
-err_fput:
+out_fput:
 	fput(filp);
 	return ret;
 }
@@ -224,12 +217,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
 		break;
 	}
 
+	kvm_vfio_update_coherency(dev);
+
 	mutex_unlock(&kv->lock);
 
 	fdput(f);
 
-	kvm_vfio_update_coherency(dev);
-
 	return ret;
 }
 
-- 
2.41.0.255.g8b1d071c50-goog


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2 2/2] kvm/vfio: avoid bouncing the mutex when adding and deleting groups
  2023-07-14 18:37 ` [PATCH v2 2/2] kvm/vfio: avoid bouncing the mutex when adding and deleting groups Dmitry Torokhov
@ 2023-07-14 19:31   ` Alex Williamson
  0 siblings, 0 replies; 3+ messages in thread
From: Alex Williamson @ 2023-07-14 19:31 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: Paolo Bonzini, Greg KH, Sean Christopherson, Roxana Bradescu,
	kvm, linux-kernel

On Fri, 14 Jul 2023 11:37:57 -0700
Dmitry Torokhov <dmitry.torokhov@gmail.com> wrote:

> Stop taking kv->lock mutex in kvm_vfio_update_coherency() and instead
> call it with this mutex held: the callers of the function usually
> already have it taken (and released) before calling
> kvm_vfio_update_coherency(). This avoid bouncing the lock up and down.
> 
> The exception is kvm_vfio_release() where we do not take the lock, but
> it is being executed when the very last reference to kvm_device is being
> dropped, so there are no concerns about concurrency.
> 
> Suggested-by: Alex Williamson <alex.williamson@redhat.com>
> Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
> ---
> 
> v2: new patch.
> 
>  virt/kvm/vfio.c | 25 +++++++++----------------
>  1 file changed, 9 insertions(+), 16 deletions(-)
> 
> diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
> index cd46d7ef98d6..9868e7ccb5fb 100644
> --- a/virt/kvm/vfio.c
> +++ b/virt/kvm/vfio.c
> @@ -122,8 +122,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
>  	bool noncoherent = false;
>  	struct kvm_vfio_group *kvg;
>  
> -	mutex_lock(&kv->lock);
> -
>  	list_for_each_entry(kvg, &kv->group_list, node) {
>  		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
>  			noncoherent = true;
> @@ -139,8 +137,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
>  		else
>  			kvm_arch_unregister_noncoherent_dma(dev->kvm);
>  	}
> -
> -	mutex_unlock(&kv->lock);
>  }
>  
>  static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
> @@ -157,7 +153,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
>  	/* Ensure the FD is a vfio group FD.*/
>  	if (!kvm_vfio_file_is_group(filp)) {
>  		ret = -EINVAL;
> -		goto err_fput;
> +		goto out_fput;
>  	}
>  
>  	mutex_lock(&kv->lock);
> @@ -165,30 +161,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
>  	list_for_each_entry(kvg, &kv->group_list, node) {
>  		if (kvg->file == filp) {
>  			ret = -EEXIST;
> -			goto err_unlock;
> +			goto out_unlock;
>  		}
>  	}
>  
>  	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
>  	if (!kvg) {
>  		ret = -ENOMEM;
> -		goto err_unlock;
> +		goto out_unlock;
>  	}
>  
> -	kvg->file = filp;
> +	kvg->file = get_file(filp);
>  	list_add_tail(&kvg->node, &kv->group_list);
>  
>  	kvm_arch_start_assignment(dev->kvm);
>  	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
> -
> -	mutex_unlock(&kv->lock);
> -
>  	kvm_vfio_update_coherency(dev);
>  
> -	return 0;
> -err_unlock:
> +	ret = 0;

Nit, let's initialize ret = 0 when it's declared to avoid this.  Series
looks good to me otherwise.  Thanks,

Alex

> +out_unlock:
>  	mutex_unlock(&kv->lock);
> -err_fput:
> +out_fput:
>  	fput(filp);
>  	return ret;
>  }
> @@ -224,12 +217,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
>  		break;
>  	}
>  
> +	kvm_vfio_update_coherency(dev);
> +
>  	mutex_unlock(&kv->lock);
>  
>  	fdput(f);
>  
> -	kvm_vfio_update_coherency(dev);
> -
>  	return ret;
>  }
>  


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-07-14 19:32 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-14 18:37 [PATCH v2 1/2] kvm/vfio: ensure kvg instance stays around in kvm_vfio_group_add() Dmitry Torokhov
2023-07-14 18:37 ` [PATCH v2 2/2] kvm/vfio: avoid bouncing the mutex when adding and deleting groups Dmitry Torokhov
2023-07-14 19:31   ` Alex Williamson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).