iommu.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
@ 2020-04-23 12:53 Jean-Philippe Brucker
  2020-04-23 12:53 ` [PATCH v2 1/2] uacce: Remove mm_exit() op Jean-Philippe Brucker
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Jean-Philippe Brucker @ 2020-04-23 12:53 UTC (permalink / raw)
  To: iommu, linux-accelerators
  Cc: Jean-Philippe Brucker, arnd, gregkh, jgg, zhangfei.gao

The IOMMU SVA API currently requires device drivers to implement an
mm_exit() callback, which stops device jobs that do DMA. This function
is called in the release() MMU notifier, when an address space that is
shared with a device exits.

It has been noted several time during discussions about SVA that
cancelling DMA jobs can be slow and complex, and doing it in the
release() notifier might cause synchronization issues. Device drivers
must in any case call unbind() to remove their bond, after stopping DMA
from a more favorable context (release of a file descriptor).

Patch 1 removes the mm_exit() callback from the uacce module, and patch
2 removes it from the IOMMU API. Since v1 [1] I fixed the uacce unbind
reported by Zhangfei and added details in the commit message of patch 2.

Jean-Philippe Brucker (2):
  uacce: Remove mm_exit() op
  iommu: Remove iommu_sva_ops::mm_exit()

 include/linux/iommu.h      |  30 -------
 include/linux/uacce.h      |  34 ++------
 drivers/iommu/iommu.c      |  11 ---
 drivers/misc/uacce/uacce.c | 172 +++++++++----------------------------
 4 files changed, 51 insertions(+), 196 deletions(-)

-- 
2.26.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2 1/2] uacce: Remove mm_exit() op
  2020-04-23 12:53 [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
@ 2020-04-23 12:53 ` Jean-Philippe Brucker
  2020-04-24  1:05   ` Zhangfei Gao
  2020-04-23 12:53 ` [PATCH v2 2/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 10+ messages in thread
From: Jean-Philippe Brucker @ 2020-04-23 12:53 UTC (permalink / raw)
  To: iommu, linux-accelerators
  Cc: Jean-Philippe Brucker, arnd, gregkh, jgg, zhangfei.gao

The mm_exit() op will be removed from the SVA API. When a process dies
and its mm goes away, the IOMMU driver won't notify device drivers
anymore. Drivers should expect to handle a lot more aborted DMA. On the
upside, it does greatly simplify the queue management.

The uacce_mm struct, that tracks all queues bound to an mm, was only
used by the mm_exit() callback. Remove it.

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v1->v2: clear q->handle after unbind
---
 include/linux/uacce.h      |  34 ++------
 drivers/misc/uacce/uacce.c | 172 +++++++++----------------------------
 2 files changed, 51 insertions(+), 155 deletions(-)

diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 0e215e6d0534a..454c2f6672d79 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -68,19 +68,21 @@ enum uacce_q_state {
  * @uacce: pointer to uacce
  * @priv: private pointer
  * @wait: wait queue head
- * @list: index into uacce_mm
- * @uacce_mm: the corresponding mm
+ * @list: index into uacce queues list
  * @qfrs: pointer of qfr regions
  * @state: queue state machine
+ * @pasid: pasid associated to the mm
+ * @handle: iommu_sva handle returned by iommu_sva_bind_device()
  */
 struct uacce_queue {
 	struct uacce_device *uacce;
 	void *priv;
 	wait_queue_head_t wait;
 	struct list_head list;
-	struct uacce_mm *uacce_mm;
 	struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
 	enum uacce_q_state state;
+	int pasid;
+	struct iommu_sva *handle;
 };
 
 /**
@@ -96,8 +98,8 @@ struct uacce_queue {
  * @cdev: cdev of the uacce
  * @dev: dev of the uacce
  * @priv: private pointer of the uacce
- * @mm_list: list head of uacce_mm->list
- * @mm_lock: lock for mm_list
+ * @queues: list of queues
+ * @queues_lock: lock for queues list
  * @inode: core vfs
  */
 struct uacce_device {
@@ -112,27 +114,9 @@ struct uacce_device {
 	struct cdev *cdev;
 	struct device dev;
 	void *priv;
-	struct list_head mm_list;
-	struct mutex mm_lock;
-	struct inode *inode;
-};
-
-/**
- * struct uacce_mm - keep track of queues bound to a process
- * @list: index into uacce_device
- * @queues: list of queues
- * @mm: the mm struct
- * @lock: protects the list of queues
- * @pasid: pasid of the uacce_mm
- * @handle: iommu_sva handle return from iommu_sva_bind_device
- */
-struct uacce_mm {
-	struct list_head list;
 	struct list_head queues;
-	struct mm_struct *mm;
-	struct mutex lock;
-	int pasid;
-	struct iommu_sva *handle;
+	struct mutex queues_lock;
+	struct inode *inode;
 };
 
 #if IS_ENABLED(CONFIG_UACCE)
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index d39307f060bd6..107028e77ca37 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
 }
 #endif
 
-static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
-			  void *data)
+static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
 {
-	struct uacce_mm *uacce_mm = data;
-	struct uacce_queue *q;
-
-	/*
-	 * No new queue can be added concurrently because no caller can have a
-	 * reference to this mm. But there may be concurrent calls to
-	 * uacce_mm_put(), so we need the lock.
-	 */
-	mutex_lock(&uacce_mm->lock);
-	list_for_each_entry(q, &uacce_mm->queues, list)
-		uacce_put_queue(q);
-	uacce_mm->mm = NULL;
-	mutex_unlock(&uacce_mm->lock);
+	int pasid;
+	struct iommu_sva *handle;
 
-	return 0;
-}
-
-static struct iommu_sva_ops uacce_sva_ops = {
-	.mm_exit = uacce_sva_exit,
-};
-
-static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
-				     struct uacce_queue *q,
-				     struct mm_struct *mm)
-{
-	struct uacce_mm *uacce_mm = NULL;
-	struct iommu_sva *handle = NULL;
-	int ret;
-
-	lockdep_assert_held(&uacce->mm_lock);
-
-	list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
-		if (uacce_mm->mm == mm) {
-			mutex_lock(&uacce_mm->lock);
-			list_add(&q->list, &uacce_mm->queues);
-			mutex_unlock(&uacce_mm->lock);
-			return uacce_mm;
-		}
-	}
-
-	uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
-	if (!uacce_mm)
-		return NULL;
+	if (!(uacce->flags & UACCE_DEV_SVA))
+		return 0;
 
-	if (uacce->flags & UACCE_DEV_SVA) {
-		/*
-		 * Safe to pass an incomplete uacce_mm, since mm_exit cannot
-		 * fire while we hold a reference to the mm.
-		 */
-		handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
-		if (IS_ERR(handle))
-			goto err_free;
+	handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 
-		ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
-		if (ret)
-			goto err_unbind;
-
-		uacce_mm->pasid = iommu_sva_get_pasid(handle);
-		if (uacce_mm->pasid == IOMMU_PASID_INVALID)
-			goto err_unbind;
+	pasid = iommu_sva_get_pasid(handle);
+	if (pasid == IOMMU_PASID_INVALID) {
+		iommu_sva_unbind_device(handle);
+		return -ENODEV;
 	}
 
-	uacce_mm->mm = mm;
-	uacce_mm->handle = handle;
-	INIT_LIST_HEAD(&uacce_mm->queues);
-	mutex_init(&uacce_mm->lock);
-	list_add(&q->list, &uacce_mm->queues);
-	list_add(&uacce_mm->list, &uacce->mm_list);
-
-	return uacce_mm;
-
-err_unbind:
-	if (handle)
-		iommu_sva_unbind_device(handle);
-err_free:
-	kfree(uacce_mm);
-	return NULL;
+	q->handle = handle;
+	q->pasid = pasid;
+	return 0;
 }
 
-static void uacce_mm_put(struct uacce_queue *q)
+static void uacce_unbind_queue(struct uacce_queue *q)
 {
-	struct uacce_mm *uacce_mm = q->uacce_mm;
-
-	lockdep_assert_held(&q->uacce->mm_lock);
-
-	mutex_lock(&uacce_mm->lock);
-	list_del(&q->list);
-	mutex_unlock(&uacce_mm->lock);
-
-	if (list_empty(&uacce_mm->queues)) {
-		if (uacce_mm->handle)
-			iommu_sva_unbind_device(uacce_mm->handle);
-		list_del(&uacce_mm->list);
-		kfree(uacce_mm);
-	}
+	if (!q->handle)
+		return;
+	iommu_sva_unbind_device(q->handle);
+	q->handle = NULL;
 }
 
 static int uacce_fops_open(struct inode *inode, struct file *filep)
 {
-	struct uacce_mm *uacce_mm = NULL;
 	struct uacce_device *uacce;
 	struct uacce_queue *q;
 	int ret = 0;
@@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
 	if (!q)
 		return -ENOMEM;
 
-	mutex_lock(&uacce->mm_lock);
-	uacce_mm = uacce_mm_get(uacce, q, current->mm);
-	mutex_unlock(&uacce->mm_lock);
-	if (!uacce_mm) {
-		ret = -ENOMEM;
+	ret = uacce_bind_queue(uacce, q);
+	if (ret)
 		goto out_with_mem;
-	}
 
 	q->uacce = uacce;
-	q->uacce_mm = uacce_mm;
 
 	if (uacce->ops->get_queue) {
-		ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+		ret = uacce->ops->get_queue(uacce, q->pasid, q);
 		if (ret < 0)
-			goto out_with_mm;
+			goto out_with_bond;
 	}
 
 	init_waitqueue_head(&q->wait);
@@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
 	uacce->inode = inode;
 	q->state = UACCE_Q_INIT;
 
+	mutex_lock(&uacce->queues_lock);
+	list_add(&q->list, &uacce->queues);
+	mutex_unlock(&uacce->queues_lock);
+
 	return 0;
 
-out_with_mm:
-	mutex_lock(&uacce->mm_lock);
-	uacce_mm_put(q);
-	mutex_unlock(&uacce->mm_lock);
+out_with_bond:
+	uacce_unbind_queue(q);
 out_with_mem:
 	kfree(q);
 	return ret;
@@ -241,14 +168,12 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
 static int uacce_fops_release(struct inode *inode, struct file *filep)
 {
 	struct uacce_queue *q = filep->private_data;
-	struct uacce_device *uacce = q->uacce;
 
+	mutex_lock(&q->uacce->queues_lock);
+	list_del(&q->list);
+	mutex_unlock(&q->uacce->queues_lock);
 	uacce_put_queue(q);
-
-	mutex_lock(&uacce->mm_lock);
-	uacce_mm_put(q);
-	mutex_unlock(&uacce->mm_lock);
-
+	uacce_unbind_queue(q);
 	kfree(q);
 
 	return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
 	if (ret < 0)
 		goto err_with_uacce;
 
-	INIT_LIST_HEAD(&uacce->mm_list);
-	mutex_init(&uacce->mm_lock);
+	INIT_LIST_HEAD(&uacce->queues);
+	mutex_init(&uacce->queues_lock);
 	device_initialize(&uacce->dev);
 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
 	uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
  */
 void uacce_remove(struct uacce_device *uacce)
 {
-	struct uacce_mm *uacce_mm;
-	struct uacce_queue *q;
+	struct uacce_queue *q, *next_q;
 
 	if (!uacce)
 		return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
 
 	/* ensure no open queue remains */
-	mutex_lock(&uacce->mm_lock);
-	list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
-		/*
-		 * We don't take the uacce_mm->lock here. Since we hold the
-		 * device's mm_lock, no queue can be added to or removed from
-		 * this uacce_mm. We may run concurrently with mm_exit, but
-		 * uacce_put_queue() is serialized and iommu_sva_unbind_device()
-		 * waits for the lock that mm_exit is holding.
-		 */
-		list_for_each_entry(q, &uacce_mm->queues, list)
-			uacce_put_queue(q);
-
-		if (uacce->flags & UACCE_DEV_SVA) {
-			iommu_sva_unbind_device(uacce_mm->handle);
-			uacce_mm->handle = NULL;
-		}
+	mutex_lock(&uacce->queues_lock);
+	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+		uacce_put_queue(q);
+		uacce_unbind_queue(q);
 	}
-	mutex_unlock(&uacce->mm_lock);
+	mutex_unlock(&uacce->queues_lock);
 
 	/* disable sva now since no opened queues */
 	if (uacce->flags & UACCE_DEV_SVA)
-- 
2.26.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v2 2/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-04-23 12:53 [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
  2020-04-23 12:53 ` [PATCH v2 1/2] uacce: Remove mm_exit() op Jean-Philippe Brucker
@ 2020-04-23 12:53 ` Jean-Philippe Brucker
  2020-05-27 10:10 ` [PATCH v2 0/2] " Jean-Philippe Brucker
  2020-05-29 12:53 ` Joerg Roedel
  3 siblings, 0 replies; 10+ messages in thread
From: Jean-Philippe Brucker @ 2020-04-23 12:53 UTC (permalink / raw)
  To: iommu, linux-accelerators
  Cc: Jean-Philippe Brucker, arnd, gregkh, jgg, zhangfei.gao

After binding a device to an mm, device drivers currently need to
register a mm_exit handler. This function is called when the mm exits,
to gracefully stop DMA targeting the address space and flush page faults
to the IOMMU.

This is deemed too complex for the MMU release() notifier, which may be
triggered by any mmput() invocation, from about 120 callsites [1]. The
upcoming SVA module has an example of such complexity: the I/O Page
Fault handler would need to call mmput_async() instead of mmput() after
handling an IOPF, to avoid triggering the release() notifier which would
in turn drain the IOPF queue and lock up.

Another concern is the DMA stop function taking too long, up to several
minutes [2]. For some mmput() callers this may disturb other users. For
example, if the OOM killer picks the mm bound to a device as the victim
and that mm's memory is locked, if the release() takes too long, it
might choose additional innocent victims to kill.

To simplify the MMU release notifier, don't forward the notification to
device drivers. Since they don't stop DMA on mm exit anymore, the PASID
lifetime is extended:

(1) The device driver calls bind(). A PASID is allocated.

  Here any DMA fault is handled by mm, and on error we don't print
  anything to dmesg. Userspace can easily trigger errors by issuing DMA
  on unmapped buffers.

(2) exit_mmap(), for example the process took a SIGKILL. This step
    doesn't happen during normal operations. Remove the pgd from the
    PASID table, since the page tables are about to be freed. Invalidate
    the IOTLBs.

  Here the device may still perform DMA on the address space. Incoming
  transactions are aborted but faults aren't printed out. ATS
  Translation Requests return Successful Translation Completions with
  R=W=0. PRI Page Requests return with Invalid Request.

(3) The device driver stops DMA, possibly following release of a fd, and
    calls unbind(). PASID table is cleared, IOTLB invalidated if
    necessary. The page fault queues are drained, and the PASID is
    freed.

  If DMA for that PASID is still running here, something went seriously
  wrong and errors should be reported.

For now remove iommu_sva_ops entirely. We might need to re-introduce
them at some point, for example to notify device drivers of unhandled
IOPF.

[1] https://lore.kernel.org/linux-iommu/20200306174239.GM31668@ziepe.ca/
[2] https://lore.kernel.org/linux-iommu/4d68da96-0ad5-b412-5987-2f7a6aa796c3@amd.com/

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v1->v2: tried to clarify the lifetime of the PASID
---
 include/linux/iommu.h | 30 ------------------------------
 drivers/iommu/iommu.c | 11 -----------
 2 files changed, 41 deletions(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ef8b0bda6951..bd330d6343b78 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -53,8 +53,6 @@ struct iommu_fault_event;
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
 			struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
-				       void *);
 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
 
 struct iommu_domain_geometry {
@@ -171,25 +169,6 @@ enum iommu_dev_features {
 
 #define IOMMU_PASID_INVALID	(-1U)
 
-/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- *           @mm_exit returns, the device must not issue any more transaction
- *           with the PASID given as argument.
- *
- *           The @mm_exit handler is allowed to sleep. Be careful about the
- *           locks taken in @mm_exit, because they might lead to deadlocks if
- *           they are also held when dropping references to the mm. Consider the
- *           following call chain:
- *           mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- *           Using mmput_async() prevents this scenario.
- *
- */
-struct iommu_sva_ops {
-	iommu_mm_exit_handler_t mm_exit;
-};
-
 #ifdef CONFIG_IOMMU_API
 
 /**
@@ -605,7 +584,6 @@ struct iommu_fwspec {
  */
 struct iommu_sva {
 	struct device			*dev;
-	const struct iommu_sva_ops	*ops;
 };
 
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -653,8 +631,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
 					struct mm_struct *mm,
 					void *drvdata);
 void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
-		      const struct iommu_sva_ops *ops);
 int iommu_sva_get_pasid(struct iommu_sva *handle);
 
 #else /* CONFIG_IOMMU_API */
@@ -1058,12 +1034,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
 {
 }
 
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
-				    const struct iommu_sva_ops *ops)
-{
-	return -EINVAL;
-}
-
 static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
 {
 	return IOMMU_PASID_INVALID;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2b471419e26c3..dfed12328c032 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2637,17 +2637,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
 }
 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
 
-int iommu_sva_set_ops(struct iommu_sva *handle,
-		      const struct iommu_sva_ops *sva_ops)
-{
-	if (handle->ops && handle->ops != sva_ops)
-		return -EEXIST;
-
-	handle->ops = sva_ops;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
-
 int iommu_sva_get_pasid(struct iommu_sva *handle)
 {
 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
-- 
2.26.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 1/2] uacce: Remove mm_exit() op
  2020-04-23 12:53 ` [PATCH v2 1/2] uacce: Remove mm_exit() op Jean-Philippe Brucker
@ 2020-04-24  1:05   ` Zhangfei Gao
  0 siblings, 0 replies; 10+ messages in thread
From: Zhangfei Gao @ 2020-04-24  1:05 UTC (permalink / raw)
  To: Jean-Philippe Brucker, iommu, linux-accelerators
  Cc: Herbert Xu, arnd, gregkh, jgg



On 2020/4/23 下午8:53, Jean-Philippe Brucker wrote:
> The mm_exit() op will be removed from the SVA API. When a process dies
> and its mm goes away, the IOMMU driver won't notify device drivers
> anymore. Drivers should expect to handle a lot more aborted DMA. On the
> upside, it does greatly simplify the queue management.
>
> The uacce_mm struct, that tracks all queues bound to an mm, was only
> used by the mm_exit() callback. Remove it.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Thanks Jean
Acked-by: Zhangfei Gao <zhangfei.gao@linaro.org>

> ---
> v1->v2: clear q->handle after unbind

Thanks
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-04-23 12:53 [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
  2020-04-23 12:53 ` [PATCH v2 1/2] uacce: Remove mm_exit() op Jean-Philippe Brucker
  2020-04-23 12:53 ` [PATCH v2 2/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
@ 2020-05-27 10:10 ` Jean-Philippe Brucker
  2020-05-27 12:42   ` Joerg Roedel
  2020-05-29 12:53 ` Joerg Roedel
  3 siblings, 1 reply; 10+ messages in thread
From: Jean-Philippe Brucker @ 2020-05-27 10:10 UTC (permalink / raw)
  To: iommu, linux-accelerators; +Cc: arnd, gregkh, jgg, zhangfei.gao

Hi Joerg,

I was wondering if you could take these two patches for v5.8. The API
change is a precursor for the SVA support in SMMUv3, and the VT-d
implementation of the SVA API (queued for 5.8) doesn't implement
iommu_sva_ops.

Thanks,
Jean

On Thu, Apr 23, 2020 at 02:53:27PM +0200, Jean-Philippe Brucker wrote:
> The IOMMU SVA API currently requires device drivers to implement an
> mm_exit() callback, which stops device jobs that do DMA. This function
> is called in the release() MMU notifier, when an address space that is
> shared with a device exits.
> 
> It has been noted several time during discussions about SVA that
> cancelling DMA jobs can be slow and complex, and doing it in the
> release() notifier might cause synchronization issues. Device drivers
> must in any case call unbind() to remove their bond, after stopping DMA
> from a more favorable context (release of a file descriptor).
> 
> Patch 1 removes the mm_exit() callback from the uacce module, and patch
> 2 removes it from the IOMMU API. Since v1 [1] I fixed the uacce unbind
> reported by Zhangfei and added details in the commit message of patch 2.

[1] https://lore.kernel.org/linux-iommu/20200408140427.212807-1-jean-philippe@linaro.org/

> Jean-Philippe Brucker (2):
>   uacce: Remove mm_exit() op
>   iommu: Remove iommu_sva_ops::mm_exit()
> 
>  include/linux/iommu.h      |  30 -------
>  include/linux/uacce.h      |  34 ++------
>  drivers/iommu/iommu.c      |  11 ---
>  drivers/misc/uacce/uacce.c | 172 +++++++++----------------------------
>  4 files changed, 51 insertions(+), 196 deletions(-)
> 
> -- 
> 2.26.0
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-05-27 10:10 ` [PATCH v2 0/2] " Jean-Philippe Brucker
@ 2020-05-27 12:42   ` Joerg Roedel
  2020-05-28  3:32     ` Lu Baolu
  0 siblings, 1 reply; 10+ messages in thread
From: Joerg Roedel @ 2020-05-27 12:42 UTC (permalink / raw)
  To: Jean-Philippe Brucker
  Cc: arnd, gregkh, jgg, iommu, zhangfei.gao, linux-accelerators

Hi Jean-Philippe,

On Wed, May 27, 2020 at 12:10:38PM +0200, Jean-Philippe Brucker wrote:
> I was wondering if you could take these two patches for v5.8. The API
> change is a precursor for the SVA support in SMMUv3, and the VT-d
> implementation of the SVA API (queued for 5.8) doesn't implement
> iommu_sva_ops.

I'd like some Acks on patch 2 (at least from the Intel people) before
going ahead with this.


Regards,

	Joerg

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-05-27 12:42   ` Joerg Roedel
@ 2020-05-28  3:32     ` Lu Baolu
  2020-05-28  3:34       ` Lu Baolu
  2020-05-28 15:33       ` Jacob Pan
  0 siblings, 2 replies; 10+ messages in thread
From: Lu Baolu @ 2020-05-28  3:32 UTC (permalink / raw)
  To: Joerg Roedel, Jean-Philippe Brucker, jacob.jun.pan
  Cc: arnd, gregkh, jgg, iommu, zhangfei.gao, linux-accelerators

Hi Jorge,

On 2020/5/27 20:42, Joerg Roedel wrote:
> Hi Jean-Philippe,
> 
> On Wed, May 27, 2020 at 12:10:38PM +0200, Jean-Philippe Brucker wrote:
>> I was wondering if you could take these two patches for v5.8. The API
>> change is a precursor for the SVA support in SMMUv3, and the VT-d
>> implementation of the SVA API (queued for 5.8) doesn't implement
>> iommu_sva_ops.
> 
> I'd like some Acks on patch 2 (at least from the Intel people) before
> going ahead with this.
> 

Patch 2 in this series looks good to me.

Acked-by: Lu Baolu <baolu.lu@linux.intel.com>

+Jacob, he participated in the discussions.

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-05-28  3:32     ` Lu Baolu
@ 2020-05-28  3:34       ` Lu Baolu
  2020-05-28 15:33       ` Jacob Pan
  1 sibling, 0 replies; 10+ messages in thread
From: Lu Baolu @ 2020-05-28  3:34 UTC (permalink / raw)
  To: Joerg Roedel, Jean-Philippe Brucker, jacob.jun.pan
  Cc: arnd, gregkh, jgg, iommu, zhangfei.gao, linux-accelerators

Sorry for the typo.

On 2020/5/28 11:32, Lu Baolu wrote:
> Hi Jorge,
      ^^^^^
      Joerg


> 
> On 2020/5/27 20:42, Joerg Roedel wrote:
>> Hi Jean-Philippe,
>>
>> On Wed, May 27, 2020 at 12:10:38PM +0200, Jean-Philippe Brucker wrote:
>>> I was wondering if you could take these two patches for v5.8. The API
>>> change is a precursor for the SVA support in SMMUv3, and the VT-d
>>> implementation of the SVA API (queued for 5.8) doesn't implement
>>> iommu_sva_ops.
>>
>> I'd like some Acks on patch 2 (at least from the Intel people) before
>> going ahead with this.
>>
> 
> Patch 2 in this series looks good to me.
> 
> Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
> 
> +Jacob, he participated in the discussions.
> 
> Best regards,
> baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-05-28  3:32     ` Lu Baolu
  2020-05-28  3:34       ` Lu Baolu
@ 2020-05-28 15:33       ` Jacob Pan
  1 sibling, 0 replies; 10+ messages in thread
From: Jacob Pan @ 2020-05-28 15:33 UTC (permalink / raw)
  To: Lu Baolu
  Cc: Jean-Philippe Brucker, arnd, gregkh, jgg, iommu, zhangfei.gao,
	linux-accelerators

On Thu, 28 May 2020 11:32:50 +0800
Lu Baolu <baolu.lu@linux.intel.com> wrote:

> Hi Jorge,
> 
> On 2020/5/27 20:42, Joerg Roedel wrote:
> > Hi Jean-Philippe,
> > 
> > On Wed, May 27, 2020 at 12:10:38PM +0200, Jean-Philippe Brucker
> > wrote:  
> >> I was wondering if you could take these two patches for v5.8. The
> >> API change is a precursor for the SVA support in SMMUv3, and the
> >> VT-d implementation of the SVA API (queued for 5.8) doesn't
> >> implement iommu_sva_ops.  
> > 
> > I'd like some Acks on patch 2 (at least from the Intel people)
> > before going ahead with this.
> >   
> 
> Patch 2 in this series looks good to me.
> 
> Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
> 
> +Jacob, he participated in the discussions.
> 
Patch 2 looks good to me, VT-d has a similar flow and challenge to stop
DMA in mm_exit. We disable fault reporting during this window between
mm_exit and FD close trigged unbind.

Acked-by: Jacob Pan <jacob.jun.pan@linux.intel.com>

> Best regards,
> baolu

[Jacob Pan]
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit()
  2020-04-23 12:53 [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
                   ` (2 preceding siblings ...)
  2020-05-27 10:10 ` [PATCH v2 0/2] " Jean-Philippe Brucker
@ 2020-05-29 12:53 ` Joerg Roedel
  3 siblings, 0 replies; 10+ messages in thread
From: Joerg Roedel @ 2020-05-29 12:53 UTC (permalink / raw)
  To: Jean-Philippe Brucker
  Cc: arnd, gregkh, jgg, iommu, zhangfei.gao, linux-accelerators

On Thu, Apr 23, 2020 at 02:53:27PM +0200, Jean-Philippe Brucker wrote:
> The IOMMU SVA API currently requires device drivers to implement an
> mm_exit() callback, which stops device jobs that do DMA. This function
> is called in the release() MMU notifier, when an address space that is
> shared with a device exits.
> 
> It has been noted several time during discussions about SVA that
> cancelling DMA jobs can be slow and complex, and doing it in the
> release() notifier might cause synchronization issues. Device drivers
> must in any case call unbind() to remove their bond, after stopping DMA
> from a more favorable context (release of a file descriptor).
> 
> Patch 1 removes the mm_exit() callback from the uacce module, and patch
> 2 removes it from the IOMMU API. Since v1 [1] I fixed the uacce unbind
> reported by Zhangfei and added details in the commit message of patch 2.
> 
> Jean-Philippe Brucker (2):
>   uacce: Remove mm_exit() op
>   iommu: Remove iommu_sva_ops::mm_exit()

Applied, thanks.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2020-05-29 12:53 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-23 12:53 [PATCH v2 0/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
2020-04-23 12:53 ` [PATCH v2 1/2] uacce: Remove mm_exit() op Jean-Philippe Brucker
2020-04-24  1:05   ` Zhangfei Gao
2020-04-23 12:53 ` [PATCH v2 2/2] iommu: Remove iommu_sva_ops::mm_exit() Jean-Philippe Brucker
2020-05-27 10:10 ` [PATCH v2 0/2] " Jean-Philippe Brucker
2020-05-27 12:42   ` Joerg Roedel
2020-05-28  3:32     ` Lu Baolu
2020-05-28  3:34       ` Lu Baolu
2020-05-28 15:33       ` Jacob Pan
2020-05-29 12:53 ` Joerg Roedel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).