All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vfio: Delete container_q
@ 2022-04-29 18:46 Jason Gunthorpe
  2022-05-05  7:53 ` Tian, Kevin
  2022-05-13 17:23 ` Alex Williamson
  0 siblings, 2 replies; 3+ messages in thread
From: Jason Gunthorpe @ 2022-04-29 18:46 UTC (permalink / raw)
  To: Alex Williamson, Cornelia Huck, kvm, Lu Baolu, Kevin Tian

Now that the iommu core takes care of isolation there is no race between
driver attach and container unset. Once iommu_group_release_dma_owner()
returns the device can immediately be re-used.

Remove this mechanism.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/vfio/vfio.c | 20 --------------------
 1 file changed, 20 deletions(-)

This was missed in Baolu's series, and applies on top of "iommu: Remove iommu
group changes notifier"

diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 0c766384cee0f8..4a1847f50c9289 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -74,7 +74,6 @@ struct vfio_group {
 	struct list_head		vfio_next;
 	struct list_head		container_next;
 	atomic_t			opened;
-	wait_queue_head_t		container_q;
 	enum vfio_group_type		type;
 	unsigned int			dev_counter;
 	struct kvm			*kvm;
@@ -363,7 +362,6 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
 	refcount_set(&group->users, 1);
 	INIT_LIST_HEAD(&group->device_list);
 	mutex_init(&group->device_lock);
-	init_waitqueue_head(&group->container_q);
 	group->iommu_group = iommu_group;
 	/* put in vfio_group_release() */
 	iommu_group_ref_get(iommu_group);
@@ -723,23 +721,6 @@ void vfio_unregister_group_dev(struct vfio_device *device)
 	group->dev_counter--;
 	mutex_unlock(&group->device_lock);
 
-	/*
-	 * In order to support multiple devices per group, devices can be
-	 * plucked from the group while other devices in the group are still
-	 * in use.  The container persists with this group and those remaining
-	 * devices still attached.  If the user creates an isolation violation
-	 * by binding this device to another driver while the group is still in
-	 * use, that's their fault.  However, in the case of removing the last,
-	 * or potentially the only, device in the group there can be no other
-	 * in-use devices in the group.  The user has done their due diligence
-	 * and we should lay no claims to those devices.  In order to do that,
-	 * we need to make sure the group is detached from the container.
-	 * Without this stall, we're potentially racing with a user process
-	 * that may attempt to immediately bind this device to another driver.
-	 */
-	if (list_empty(&group->device_list))
-		wait_event(group->container_q, !group->container);
-
 	if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
 		iommu_group_remove_device(device->dev);
 
@@ -984,7 +965,6 @@ static void __vfio_group_unset_container(struct vfio_group *group)
 	iommu_group_release_dma_owner(group->iommu_group);
 
 	group->container = NULL;
-	wake_up(&group->container_q);
 	list_del(&group->container_next);
 
 	/* Detaching the last group deprivileges a container, remove iommu */

base-commit: 46788c84354d07f8b1e5df87e805500611fd04fb
-- 
2.36.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* RE: [PATCH] vfio: Delete container_q
  2022-04-29 18:46 [PATCH] vfio: Delete container_q Jason Gunthorpe
@ 2022-05-05  7:53 ` Tian, Kevin
  2022-05-13 17:23 ` Alex Williamson
  1 sibling, 0 replies; 3+ messages in thread
From: Tian, Kevin @ 2022-05-05  7:53 UTC (permalink / raw)
  To: Jason Gunthorpe, Alex Williamson, Cornelia Huck, kvm, Lu Baolu

> From: Jason Gunthorpe <jgg@nvidia.com>
> Sent: Saturday, April 30, 2022 2:46 AM
> 
> Now that the iommu core takes care of isolation there is no race between
> driver attach and container unset. Once iommu_group_release_dma_owner()
> returns the device can immediately be re-used.
> 
> Remove this mechanism.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
>  drivers/vfio/vfio.c | 20 --------------------
>  1 file changed, 20 deletions(-)
> 
> This was missed in Baolu's series, and applies on top of "iommu: Remove
> iommu
> group changes notifier"
> 
> diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
> index 0c766384cee0f8..4a1847f50c9289 100644
> --- a/drivers/vfio/vfio.c
> +++ b/drivers/vfio/vfio.c
> @@ -74,7 +74,6 @@ struct vfio_group {
>  	struct list_head		vfio_next;
>  	struct list_head		container_next;
>  	atomic_t			opened;
> -	wait_queue_head_t		container_q;
>  	enum vfio_group_type		type;
>  	unsigned int			dev_counter;
>  	struct kvm			*kvm;
> @@ -363,7 +362,6 @@ static struct vfio_group *vfio_group_alloc(struct
> iommu_group *iommu_group,
>  	refcount_set(&group->users, 1);
>  	INIT_LIST_HEAD(&group->device_list);
>  	mutex_init(&group->device_lock);
> -	init_waitqueue_head(&group->container_q);
>  	group->iommu_group = iommu_group;
>  	/* put in vfio_group_release() */
>  	iommu_group_ref_get(iommu_group);
> @@ -723,23 +721,6 @@ void vfio_unregister_group_dev(struct vfio_device
> *device)
>  	group->dev_counter--;
>  	mutex_unlock(&group->device_lock);
> 
> -	/*
> -	 * In order to support multiple devices per group, devices can be
> -	 * plucked from the group while other devices in the group are still
> -	 * in use.  The container persists with this group and those remaining
> -	 * devices still attached.  If the user creates an isolation violation
> -	 * by binding this device to another driver while the group is still in
> -	 * use, that's their fault.  However, in the case of removing the last,
> -	 * or potentially the only, device in the group there can be no other
> -	 * in-use devices in the group.  The user has done their due diligence
> -	 * and we should lay no claims to those devices.  In order to do that,
> -	 * we need to make sure the group is detached from the container.
> -	 * Without this stall, we're potentially racing with a user process
> -	 * that may attempt to immediately bind this device to another driver.
> -	 */
> -	if (list_empty(&group->device_list))
> -		wait_event(group->container_q, !group->container);
> -
>  	if (group->type == VFIO_NO_IOMMU || group->type ==
> VFIO_EMULATED_IOMMU)
>  		iommu_group_remove_device(device->dev);
> 
> @@ -984,7 +965,6 @@ static void __vfio_group_unset_container(struct
> vfio_group *group)
>  	iommu_group_release_dma_owner(group->iommu_group);
> 
>  	group->container = NULL;
> -	wake_up(&group->container_q);
>  	list_del(&group->container_next);
> 
>  	/* Detaching the last group deprivileges a container, remove iommu
> */
> 
> base-commit: 46788c84354d07f8b1e5df87e805500611fd04fb
> --
> 2.36.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] vfio: Delete container_q
  2022-04-29 18:46 [PATCH] vfio: Delete container_q Jason Gunthorpe
  2022-05-05  7:53 ` Tian, Kevin
@ 2022-05-13 17:23 ` Alex Williamson
  1 sibling, 0 replies; 3+ messages in thread
From: Alex Williamson @ 2022-05-13 17:23 UTC (permalink / raw)
  To: Jason Gunthorpe; +Cc: Cornelia Huck, kvm, Lu Baolu, Kevin Tian

On Fri, 29 Apr 2022 15:46:17 -0300
Jason Gunthorpe <jgg@nvidia.com> wrote:

> Now that the iommu core takes care of isolation there is no race between
> driver attach and container unset. Once iommu_group_release_dma_owner()
> returns the device can immediately be re-used.
> 
> Remove this mechanism.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/vfio/vfio.c | 20 --------------------
>  1 file changed, 20 deletions(-)
> 
> This was missed in Baolu's series, and applies on top of "iommu: Remove iommu
> group changes notifier"
> 
> diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
> index 0c766384cee0f8..4a1847f50c9289 100644
> --- a/drivers/vfio/vfio.c
> +++ b/drivers/vfio/vfio.c
> @@ -74,7 +74,6 @@ struct vfio_group {
>  	struct list_head		vfio_next;
>  	struct list_head		container_next;
>  	atomic_t			opened;
> -	wait_queue_head_t		container_q;
>  	enum vfio_group_type		type;
>  	unsigned int			dev_counter;
>  	struct kvm			*kvm;
> @@ -363,7 +362,6 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
>  	refcount_set(&group->users, 1);
>  	INIT_LIST_HEAD(&group->device_list);
>  	mutex_init(&group->device_lock);
> -	init_waitqueue_head(&group->container_q);
>  	group->iommu_group = iommu_group;
>  	/* put in vfio_group_release() */
>  	iommu_group_ref_get(iommu_group);
> @@ -723,23 +721,6 @@ void vfio_unregister_group_dev(struct vfio_device *device)
>  	group->dev_counter--;
>  	mutex_unlock(&group->device_lock);
>  
> -	/*
> -	 * In order to support multiple devices per group, devices can be
> -	 * plucked from the group while other devices in the group are still
> -	 * in use.  The container persists with this group and those remaining
> -	 * devices still attached.  If the user creates an isolation violation
> -	 * by binding this device to another driver while the group is still in
> -	 * use, that's their fault.  However, in the case of removing the last,
> -	 * or potentially the only, device in the group there can be no other
> -	 * in-use devices in the group.  The user has done their due diligence
> -	 * and we should lay no claims to those devices.  In order to do that,
> -	 * we need to make sure the group is detached from the container.
> -	 * Without this stall, we're potentially racing with a user process
> -	 * that may attempt to immediately bind this device to another driver.
> -	 */
> -	if (list_empty(&group->device_list))
> -		wait_event(group->container_q, !group->container);
> -
>  	if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
>  		iommu_group_remove_device(device->dev);
>  
> @@ -984,7 +965,6 @@ static void __vfio_group_unset_container(struct vfio_group *group)
>  	iommu_group_release_dma_owner(group->iommu_group);
>  
>  	group->container = NULL;
> -	wake_up(&group->container_q);
>  	list_del(&group->container_next);
>  
>  	/* Detaching the last group deprivileges a container, remove iommu */
> 
> base-commit: 46788c84354d07f8b1e5df87e805500611fd04fb

Applied to vfio next branch for v5.19.  Thanks,

Alex


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-05-13 17:23 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-29 18:46 [PATCH] vfio: Delete container_q Jason Gunthorpe
2022-05-05  7:53 ` Tian, Kevin
2022-05-13 17:23 ` Alex Williamson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.