All of lore.kernel.org
 help / color / mirror / Atom feed
* completely rework the dma_resv semantic
@ 2021-12-07 12:33 Christian König
  2021-12-07 12:33 ` [PATCH 01/24] dma-buf: add dma_resv_replace_fences Christian König
                   ` (24 more replies)
  0 siblings, 25 replies; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Hi Daniel,

just a gentle ping that you wanted to take a look at this.

Not much changed compared to the last version, only a minor bugfix in
the dma_resv_get_singleton error handling.

Regards,
Christian.



^ permalink raw reply	[flat|nested] 95+ messages in thread

* [PATCH 01/24] dma-buf: add dma_resv_replace_fences
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:05     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 02/24] dma-buf: finally make the dma_resv_list private Christian König
                   ` (23 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

This function allows to replace fences from the shared fence list when
we can gurantee that the operation represented by the original fence has
finished or no accesses to the resources protected by the dma_resv
object any more when the new fence finishes.

Then use this function in the amdkfd code when BOs are unmapped from the
process.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
 include/linux/dma-resv.h                      |  2 +
 3 files changed, 52 insertions(+), 42 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 4deea75c0b9c..a688dbded3d3 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 }
 EXPORT_SYMBOL(dma_resv_add_shared_fence);
 
+/**
+ * dma_resv_replace_fences - replace fences in the dma_resv obj
+ * @obj: the reservation object
+ * @context: the context of the fences to replace
+ * @replacement: the new fence to use instead
+ *
+ * Replace fences with a specified context with a new fence. Only valid if the
+ * operation represented by the original fences is completed or has no longer
+ * access to the resources protected by the dma_resv object when the new fence
+ * completes.
+ */
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+			     struct dma_fence *replacement)
+{
+	struct dma_resv_list *list;
+	struct dma_fence *old;
+	unsigned int i;
+
+	dma_resv_assert_held(obj);
+
+	write_seqcount_begin(&obj->seq);
+
+	old = dma_resv_excl_fence(obj);
+	if (old->context == context) {
+		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
+		dma_fence_put(old);
+	}
+
+	list = dma_resv_shared_list(obj);
+	for (i = 0; list && i < list->shared_count; ++i) {
+		old = rcu_dereference_protected(list->shared[i],
+						dma_resv_held(obj));
+		if (old->context != context)
+			continue;
+
+		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
+		dma_fence_put(old);
+	}
+
+	write_seqcount_end(&obj->seq);
+}
+EXPORT_SYMBOL(dma_resv_replace_fences);
+
 /**
  * dma_resv_add_excl_fence - Add an exclusive fence.
  * @obj: the reservation object
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 71acd577803e..b558ef0f8c4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 					struct amdgpu_amdkfd_fence *ef)
 {
-	struct dma_resv *resv = bo->tbo.base.resv;
-	struct dma_resv_list *old, *new;
-	unsigned int i, j, k;
+	struct dma_fence *replacement;
 
 	if (!ef)
 		return -EINVAL;
 
-	old = dma_resv_shared_list(resv);
-	if (!old)
-		return 0;
-
-	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
-	if (!new)
-		return -ENOMEM;
-
-	/* Go through all the shared fences in the resevation object and sort
-	 * the interesting ones to the end of the list.
+	/* TODO: Instead of block before we should use the fence of the page
+	 * table update and TLB flush here directly.
 	 */
-	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
-		struct dma_fence *f;
-
-		f = rcu_dereference_protected(old->shared[i],
-					      dma_resv_held(resv));
-
-		if (f->context == ef->base.context)
-			RCU_INIT_POINTER(new->shared[--j], f);
-		else
-			RCU_INIT_POINTER(new->shared[k++], f);
-	}
-	new->shared_max = old->shared_max;
-	new->shared_count = k;
-
-	/* Install the new fence list, seqcount provides the barriers */
-	write_seqcount_begin(&resv->seq);
-	RCU_INIT_POINTER(resv->fence, new);
-	write_seqcount_end(&resv->seq);
-
-	/* Drop the references to the removed fences or move them to ef_list */
-	for (i = j; i < old->shared_count; ++i) {
-		struct dma_fence *f;
-
-		f = rcu_dereference_protected(new->shared[i],
-					      dma_resv_held(resv));
-		dma_fence_put(f);
-	}
-	kfree_rcu(old, rcu);
-
+	replacement = dma_fence_get_stub();
+	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
+				replacement);
+	dma_fence_put(replacement);
 	return 0;
 }
 
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index eebf04325b34..e0be34265eae 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+			     struct dma_fence *fence);
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
 			unsigned *pshared_count, struct dma_fence ***pshared);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 02/24] dma-buf: finally make the dma_resv_list private
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
  2021-12-07 12:33 ` [PATCH 01/24] dma-buf: add dma_resv_replace_fences Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:08     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences Christian König
                   ` (22 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Drivers should never touch this directly.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 26 ++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 26 +-------------------------
 2 files changed, 27 insertions(+), 25 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a688dbded3d3..a12a3a39f280 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -56,6 +56,19 @@
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
+/**
+ * struct dma_resv_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
+struct dma_resv_list {
+	struct rcu_head rcu;
+	u32 shared_count, shared_max;
+	struct dma_fence __rcu *shared[];
+};
+
 /**
  * dma_resv_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
@@ -133,6 +146,19 @@ void dma_resv_fini(struct dma_resv *obj)
 }
 EXPORT_SYMBOL(dma_resv_fini);
 
+/**
+ * dma_resv_shared_list - get the reservation object's shared fence list
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
+ */
+static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
+{
+	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
 /**
  * dma_resv_reserve_shared - Reserve space to add shared fences to
  * a dma_resv.
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index e0be34265eae..3baf2a4a9a0d 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -47,18 +47,7 @@
 
 extern struct ww_class reservation_ww_class;
 
-/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
- */
-struct dma_resv_list {
-	struct rcu_head rcu;
-	u32 shared_count, shared_max;
-	struct dma_fence __rcu *shared[];
-};
+struct dma_resv_list;
 
 /**
  * struct dma_resv - a reservation object manages fences for a buffer
@@ -440,19 +429,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
 	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
-/**
- * dma_resv_shared_list - get the reservation object's shared fence list
- * @obj: the reservation object
- *
- * Returns the shared fence list. Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- */
-static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
-{
-	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
-}
-
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
  2021-12-07 12:33 ` [PATCH 01/24] dma-buf: add dma_resv_replace_fences Christian König
  2021-12-07 12:33 ` [PATCH 02/24] dma-buf: finally make the dma_resv_list private Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:13     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2 Christian König
                   ` (21 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Returning the exclusive fence separately is no longer used.

Instead add a write parameter to indicate the use case.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c                   | 48 ++++++++------------
 drivers/dma-buf/st-dma-resv.c                | 26 ++---------
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c  |  6 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c      |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  3 +-
 include/linux/dma-resv.h                     |  4 +-
 6 files changed, 31 insertions(+), 58 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a12a3a39f280..480c305554a1 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -611,57 +611,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
- * @fence_excl: the returned exclusive fence (or NULL)
- * @shared_count: the number of shared fences returned
- * @shared: the array of shared fence ptrs returned (array is krealloc'd to
- * the required size, and must be freed by caller)
- *
- * Retrieve all fences from the reservation object. If the pointer for the
- * exclusive fence is not specified the fence is put into the array of the
- * shared fences as well. Returns either zero or -ENOMEM.
+ * @write: true if we should return all fences
+ * @num_fences: the number of fences returned
+ * @fences: the array of fence ptrs returned (array is krealloc'd to the
+ * required size, and must be freed by caller)
+ *
+ * Retrieve all fences from the reservation object.
+ * Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
-			unsigned int *shared_count, struct dma_fence ***shared)
+int dma_resv_get_fences(struct dma_resv *obj, bool write,
+			unsigned int *num_fences, struct dma_fence ***fences)
 {
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	*shared_count = 0;
-	*shared = NULL;
-
-	if (fence_excl)
-		*fence_excl = NULL;
+	*num_fences = 0;
+	*fences = NULL;
 
-	dma_resv_iter_begin(&cursor, obj, true);
+	dma_resv_iter_begin(&cursor, obj, write);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
 		if (dma_resv_iter_is_restarted(&cursor)) {
 			unsigned int count;
 
-			while (*shared_count)
-				dma_fence_put((*shared)[--(*shared_count)]);
+			while (*num_fences)
+				dma_fence_put((*fences)[--(*num_fences)]);
 
-			if (fence_excl)
-				dma_fence_put(*fence_excl);
-
-			count = cursor.shared_count;
-			count += fence_excl ? 0 : 1;
+			count = cursor.shared_count + 1;
 
 			/* Eventually re-allocate the array */
-			*shared = krealloc_array(*shared, count,
+			*fences = krealloc_array(*fences, count,
 						 sizeof(void *),
 						 GFP_KERNEL);
-			if (count && !*shared) {
+			if (count && !*fences) {
 				dma_resv_iter_end(&cursor);
 				return -ENOMEM;
 			}
 		}
 
-		dma_fence_get(fence);
-		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
-			*fence_excl = fence;
-		else
-			(*shared)[(*shared_count)++] = fence;
+		(*fences)[(*num_fences)++] = dma_fence_get(fence);
 	}
 	dma_resv_iter_end(&cursor);
 
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index bc32b3eedcb6..cbe999c6e7a6 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg)
 
 static int test_get_fences(void *arg, bool shared)
 {
-	struct dma_fence *f, *excl = NULL, **fences = NULL;
+	struct dma_fence *f, **fences = NULL;
 	struct dma_resv resv;
 	int r, i;
 
@@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared)
 	}
 	dma_resv_unlock(&resv);
 
-	r = dma_resv_get_fences(&resv, &excl, &i, &fences);
+	r = dma_resv_get_fences(&resv, shared, &i, &fences);
 	if (r) {
 		pr_err("get_fences failed\n");
 		goto err_free;
 	}
 
-	if (shared) {
-		if (excl != NULL) {
-			pr_err("get_fences returned unexpected excl fence\n");
-			goto err_free;
-		}
-		if (i != 1 || fences[0] != f) {
-			pr_err("get_fences returned unexpected shared fence\n");
-			goto err_free;
-		}
-	} else {
-		if (excl != f) {
-			pr_err("get_fences returned unexpected excl fence\n");
-			goto err_free;
-		}
-		if (i != 0) {
-			pr_err("get_fences returned unexpected shared fence\n");
-			goto err_free;
-		}
+	if (i != 1 || fences[0] != f) {
+		pr_err("get_fences returned unexpected fence\n");
+		goto err_free;
 	}
 
 	dma_fence_signal(f);
 err_free:
-	dma_fence_put(excl);
 	while (i--)
 		dma_fence_put(fences[i]);
 	kfree(fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 68108f151dad..d17e1c911689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto unpin;
 	}
 
-	r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
-				&work->shared_count, &work->shared);
+	/* TODO: Unify this with other drivers */
+	r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
+				&work->shared_count,
+				&work->shared);
 	if (unlikely(r != 0)) {
 		DRM_ERROR("failed to get fences for buffer\n");
 		goto unpin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b7fb72bff2c1..be48487e2ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	unsigned count;
 	int r;
 
-	r = dma_resv_get_fences(resv, NULL, &count, &fences);
+	r = dma_resv_get_fences(resv, true, &count, &fences);
 	if (r)
 		goto fallback;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b5e8ce86dbe7..64c90ff348f2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			continue;
 
 		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-			ret = dma_resv_get_fences(robj, NULL,
-						  &bo->nr_shared,
+			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
 						  &bo->shared);
 			if (ret)
 				return ret;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 3baf2a4a9a0d..fa2002939b19 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -436,8 +436,8 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
 			     struct dma_fence *fence);
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
-			unsigned *pshared_count, struct dma_fence ***pshared);
+int dma_resv_get_fences(struct dma_resv *obj, bool write,
+			unsigned int *num_fences, struct dma_fence ***fences);
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 			   unsigned long timeout);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (2 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:21     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence Christian König
                   ` (20 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Add a function to simplify getting a single fence for all the fences in
the dma_resv object.

v2: fix ref leak in error handling

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   |  2 ++
 2 files changed, 54 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 480c305554a1..694716a3d66d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
  */
 
 #include <linux/dma-resv.h>
+#include <linux/dma-fence-array.h>
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/sched/mm.h>
@@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
 }
 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
+/**
+ * dma_resv_get_singleton - Get a single fence for all the fences
+ * @obj: the reservation object
+ * @write: true if we should return all fences
+ * @fence: the resulting fence
+ *
+ * Get a single fence representing all the fences inside the resv object.
+ * Returns either 0 for success or -ENOMEM.
+ *
+ * Warning: This can't be used like this when adding the fence back to the resv
+ * object since that can lead to stack corruption when finalizing the
+ * dma_fence_array.
+ */
+int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+			   struct dma_fence **fence)
+{
+	struct dma_fence_array *array;
+	struct dma_fence **fences;
+	unsigned count;
+	int r;
+
+	r = dma_resv_get_fences(obj, write, &count, &fences);
+        if (r)
+		return r;
+
+	if (count == 0) {
+		*fence = NULL;
+		return 0;
+	}
+
+	if (count == 1) {
+		*fence = fences[0];
+		kfree(fences);
+		return 0;
+	}
+
+	array = dma_fence_array_create(count, fences,
+				       dma_fence_context_alloc(1),
+				       1, false);
+	if (!array) {
+		while (count--)
+			dma_fence_put(fences[count]);
+		kfree(fences);
+		return -ENOMEM;
+	}
+
+	*fence = &array->base;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
+
 /**
  * dma_resv_wait_timeout - Wait on reservation's objects
  * shared and/or exclusive fences.
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index fa2002939b19..cdfbbda6f600 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
 int dma_resv_get_fences(struct dma_resv *obj, bool write,
 			unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+			   struct dma_fence **fence);
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 			   unsigned long timeout);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (3 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2 Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:23     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence Christian König
                   ` (19 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Use dma_resv_wait() instead of extracting the exclusive fence and
waiting on it manually.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/infiniband/core/umem_dmabuf.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index f0760741f281..d32cd7538835 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -16,7 +16,6 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 {
 	struct sg_table *sgt;
 	struct scatterlist *sg;
-	struct dma_fence *fence;
 	unsigned long start, end, cur = 0;
 	unsigned int nmap = 0;
 	int i;
@@ -68,11 +67,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 	 * may be not up-to-date. Wait for the exporter to finish
 	 * the migration.
 	 */
-	fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
-	if (fence)
-		return dma_fence_wait(fence, false);
-
-	return 0;
+	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
+				     false, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (4 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:26     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 07/24] drm/nouveau: " Christian König
                   ` (18 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

We can get the excl fence together with the shared ones as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.h        |  1 -
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 14 +++++---------
 drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 10 ----------
 3 files changed, 5 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 98e60df882b6..f596d743baa3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -80,7 +80,6 @@ struct etnaviv_gem_submit_bo {
 	u64 va;
 	struct etnaviv_gem_object *obj;
 	struct etnaviv_vram_mapping *mapping;
-	struct dma_fence *excl;
 	unsigned int nr_shared;
 	struct dma_fence **shared;
 };
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 64c90ff348f2..4286dc93fdaa 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -188,15 +188,11 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
 			continue;
 
-		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
-						  &bo->shared);
-			if (ret)
-				return ret;
-		} else {
-			bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
-		}
-
+		ret = dma_resv_get_fences(robj,
+					  !!(bo->flags & ETNA_SUBMIT_BO_WRITE),
+					  &bo->nr_shared, &bo->shared);
+		if (ret)
+			return ret;
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 180bb633d5c5..8c038a363d15 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -39,16 +39,6 @@ etnaviv_sched_dependency(struct drm_sched_job *sched_job,
 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 		int j;
 
-		if (bo->excl) {
-			fence = bo->excl;
-			bo->excl = NULL;
-
-			if (!dma_fence_is_signaled(fence))
-				return fence;
-
-			dma_fence_put(fence);
-		}
-
 		for (j = 0; j < bo->nr_shared; j++) {
 			if (!bo->shared[j])
 				continue;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 07/24] drm/nouveau: stop using dma_resv_excl_fence
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (5 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:26     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 08/24] drm/vmwgfx: " Christian König
                   ` (17 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Instead use the new dma_resv_get_singleton function.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index fa73fe57f97b..74f8652d2bd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -959,7 +959,14 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
+	struct dma_fence *fence;
+	int ret;
+
+	/* TODO: This is actually a memory management dependency */
+	ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
+	if (ret)
+		dma_resv_wait_timeout(bo->base.resv, false, false,
+				      MAX_SCHEDULE_TIMEOUT);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 08/24] drm/vmwgfx: stop using dma_resv_excl_fence
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (6 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 07/24] drm/nouveau: " Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:31     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 09/24] drm/radeon: " Christian König
                   ` (16 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Instead use the new dma_resv_get_singleton function.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8d1e869cc196..23c3fc2cbf10 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1168,8 +1168,10 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 		vmw_bo_fence_single(bo, NULL);
 		if (bo->moving)
 			dma_fence_put(bo->moving);
-		bo->moving = dma_fence_get
-			(dma_resv_excl_fence(bo->base.resv));
+
+		/* TODO: This is actually a memory management dependency */
+		return dma_resv_get_singleton(bo->base.resv, false,
+					      &bo->moving);
 	}
 
 	return 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 09/24] drm/radeon: stop using dma_resv_excl_fence
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (7 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 08/24] drm/vmwgfx: " Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:30     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds Christian König
                   ` (15 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Instead use the new dma_resv_get_singleton function.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon_display.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 573154268d43..a6f875118f01 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,12 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
+	r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
+	if (r) {
+		radeon_bo_unreserve(new_rbo);
+		DRM_ERROR("failed to get new rbo buffer fences\n");
+		goto cleanup;
+	}
 	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 	radeon_bo_unreserve(new_rbo);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (8 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 09/24] drm/radeon: " Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:34     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround Christian König
                   ` (14 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

This was added because of the now dropped shared on excl dependency.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 5 +----
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ------
 2 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0311d799a010..53e407ea4c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1275,14 +1275,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 		/*
 		 * Work around dma_resv shortcommings by wrapping up the
 		 * submission in a dma_fence_chain and add it as exclusive
-		 * fence, but first add the submission as shared fence to make
-		 * sure that shared fences never signal before the exclusive
-		 * one.
+		 * fence.
 		 */
 		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
 				     dma_fence_get(p->fence), 1);
 
-		dma_resv_add_shared_fence(resv, p->fence);
 		rcu_assign_pointer(resv->fence_excl, &chain->base);
 		e->chain = NULL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a1e63ba4c54a..85d31d85c384 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,12 +226,6 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 	if (!amdgpu_vm_ready(vm))
 		goto out_unlock;
 
-	fence = dma_resv_excl_fence(bo->tbo.base.resv);
-	if (fence) {
-		amdgpu_bo_fence(bo, fence, true);
-		fence = NULL;
-	}
-
 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
 	if (r || !fence)
 		goto out_unlock;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (9 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:37     ` Daniel Vetter
  2021-12-07 12:33 ` [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private Christian König
                   ` (13 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Get the write fence using dma_resv_for_each_fence instead of accessing
it manually.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 53e407ea4c89..7facd614e50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1268,6 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 		struct dma_resv *resv = e->tv.bo->base.resv;
 		struct dma_fence_chain *chain = e->chain;
+		struct dma_resv_iter cursor;
+		struct dma_fence *fence;
 
 		if (!chain)
 			continue;
@@ -1277,9 +1279,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 		 * submission in a dma_fence_chain and add it as exclusive
 		 * fence.
 		 */
-		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
-				     dma_fence_get(p->fence), 1);
-
+		dma_resv_for_each_fence(&cursor, resv, false, fence) {
+			break;
+		}
+		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
 		rcu_assign_pointer(resv->fence_excl, &chain->base);
 		e->chain = NULL;
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (10 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround Christian König
@ 2021-12-07 12:33 ` Christian König
  2021-12-22 21:39     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object Christian König
                   ` (12 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:33 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Drivers should never touch this directly.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 17 +++++++++++++++++
 include/linux/dma-resv.h   | 17 -----------------
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 694716a3d66d..9acceabc9399 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -147,6 +147,23 @@ void dma_resv_fini(struct dma_resv *obj)
 }
 EXPORT_SYMBOL(dma_resv_fini);
 
+/**
+ * dma_resv_excl_fence - return the object's exclusive fence
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+dma_resv_excl_fence(struct dma_resv *obj)
+{
+	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
+}
+
 /**
  * dma_resv_shared_list - get the reservation object's shared fence list
  * @obj: the reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index cdfbbda6f600..40ac9d486f8f 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -412,23 +412,6 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 	ww_mutex_unlock(&obj->lock);
 }
 
-/**
- * dma_resv_excl_fence - return the object's exclusive fence
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_excl_fence(struct dma_resv *obj)
-{
-	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
-}
-
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (11 preceding siblings ...)
  2021-12-07 12:33 ` [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 21:43     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2 Christian König
                   ` (11 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

So far we had the approach of using a directed acyclic
graph with the dma_resv obj.

This turned out to have many downsides, especially it means
that every single driver and user of this interface needs
to be aware of this restriction when adding fences. If the
rules for the DAG are not followed then we end up with
potential hard to debug memory corruption, information
leaks or even elephant big security holes because we allow
userspace to access freed up memory.

Since we already took a step back from that by always
looking at all fences we now go a step further and stop
dropping the shared fences when a new exclusive one is
added.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 13 -------------
 1 file changed, 13 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 9acceabc9399..ecb2ff606bac 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
-	struct dma_resv_list *old;
-	u32 i = 0;
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_shared_list(obj);
-	if (old)
-		i = old->shared_count;
-
 	dma_fence_get(fence);
 
 	write_seqcount_begin(&obj->seq);
 	/* write_seqcount_begin provides the necessary memory barrier */
 	RCU_INIT_POINTER(obj->fence_excl, fence);
-	if (old)
-		old->shared_count = 0;
 	write_seqcount_end(&obj->seq);
 
-	/* inplace update, no shared fences */
-	while (i--)
-		dma_fence_put(rcu_dereference_protected(old->shared[i],
-						dma_resv_held(obj)));
-
 	dma_fence_put(old_fence);
 }
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (12 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 21:50     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb Christian König
                   ` (10 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Audit all the users of dma_resv_add_excl_fence() and make sure they
reserve a shared slot also when only trying to add an exclusive fence.

This is the next step towards handling the exclusive fence like a
shared one.

v2: fix missed case in amdgpu

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/st-dma-resv.c                 | 64 +++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  8 +++
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  8 +--
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |  3 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  8 +--
 .../drm/i915/gem/selftests/i915_gem_migrate.c |  5 +-
 drivers/gpu/drm/i915/i915_vma.c               |  6 ++
 .../drm/i915/selftests/intel_memory_region.c  |  7 ++
 drivers/gpu/drm/lima/lima_gem.c               | 10 ++-
 drivers/gpu/drm/msm/msm_gem_submit.c          | 18 +++---
 drivers/gpu/drm/nouveau/nouveau_fence.c       |  9 +--
 drivers/gpu/drm/panfrost/panfrost_job.c       |  4 ++
 drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++-
 drivers/gpu/drm/ttm/ttm_execbuf_util.c        | 11 ++--
 drivers/gpu/drm/v3d/v3d_gem.c                 | 15 +++--
 drivers/gpu/drm/vgem/vgem_fence.c             | 12 ++--
 drivers/gpu/drm/virtio/virtgpu_gem.c          |  9 +++
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            | 16 +++--
 18 files changed, 133 insertions(+), 92 deletions(-)

diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index cbe999c6e7a6..f33bafc78693 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -75,17 +75,16 @@ static int test_signaling(void *arg, bool shared)
 		goto err_free;
 	}
 
-	if (shared) {
-		r = dma_resv_reserve_shared(&resv, 1);
-		if (r) {
-			pr_err("Resv shared slot allocation failed\n");
-			goto err_unlock;
-		}
+	r = dma_resv_reserve_shared(&resv, 1);
+	if (r) {
+		pr_err("Resv shared slot allocation failed\n");
+		goto err_unlock;
+	}
 
+	if (shared)
 		dma_resv_add_shared_fence(&resv, f);
-	} else {
+	else
 		dma_resv_add_excl_fence(&resv, f);
-	}
 
 	if (dma_resv_test_signaled(&resv, shared)) {
 		pr_err("Resv unexpectedly signaled\n");
@@ -134,17 +133,16 @@ static int test_for_each(void *arg, bool shared)
 		goto err_free;
 	}
 
-	if (shared) {
-		r = dma_resv_reserve_shared(&resv, 1);
-		if (r) {
-			pr_err("Resv shared slot allocation failed\n");
-			goto err_unlock;
-		}
+	r = dma_resv_reserve_shared(&resv, 1);
+	if (r) {
+		pr_err("Resv shared slot allocation failed\n");
+		goto err_unlock;
+	}
 
+	if (shared)
 		dma_resv_add_shared_fence(&resv, f);
-	} else {
+	else
 		dma_resv_add_excl_fence(&resv, f);
-	}
 
 	r = -ENOENT;
 	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
@@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared)
 		goto err_free;
 	}
 
-	if (shared) {
-		r = dma_resv_reserve_shared(&resv, 1);
-		if (r) {
-			pr_err("Resv shared slot allocation failed\n");
-			dma_resv_unlock(&resv);
-			goto err_free;
-		}
+	r = dma_resv_reserve_shared(&resv, 1);
+	if (r) {
+		pr_err("Resv shared slot allocation failed\n");
+		dma_resv_unlock(&resv);
+		goto err_free;
+	}
 
+	if (shared)
 		dma_resv_add_shared_fence(&resv, f);
-	} else {
+	else
 		dma_resv_add_excl_fence(&resv, f);
-	}
 	dma_resv_unlock(&resv);
 
 	r = -ENOENT;
@@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared)
 		goto err_resv;
 	}
 
-	if (shared) {
-		r = dma_resv_reserve_shared(&resv, 1);
-		if (r) {
-			pr_err("Resv shared slot allocation failed\n");
-			dma_resv_unlock(&resv);
-			goto err_resv;
-		}
+	r = dma_resv_reserve_shared(&resv, 1);
+	if (r) {
+		pr_err("Resv shared slot allocation failed\n");
+		dma_resv_unlock(&resv);
+		goto err_resv;
+	}
 
+	if (shared)
 		dma_resv_add_shared_fence(&resv, f);
-	} else {
+	else
 		dma_resv_add_excl_fence(&resv, f);
-	}
 	dma_resv_unlock(&resv);
 
 	r = dma_resv_get_fences(&resv, shared, &i, &fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4fcfc2313b8c..24a6b88afcca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1367,6 +1367,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared)
 {
 	struct dma_resv *resv = bo->tbo.base.resv;
+	int r;
+
+	r = dma_resv_reserve_shared(resv, 1);
+	if (r) {
+		/* As last resort on OOM we block for the fence */
+		dma_fence_wait(fence, false);
+		return;
+	}
 
 	if (shared)
 		dma_resv_add_shared_fence(resv, fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4286dc93fdaa..d4a7073190ec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 		struct dma_resv *robj = bo->obj->base.resv;
 
-		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
-			ret = dma_resv_reserve_shared(robj, 1);
-			if (ret)
-				return ret;
-		}
+		ret = dma_resv_reserve_shared(robj, 1);
+		if (ret)
+			return ret;
 
 		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
 			continue;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index f0435c6feb68..fc57ab914b60 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -100,7 +100,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	trace_i915_gem_object_clflush(obj);
 
 	clflush = NULL;
-	if (!(flags & I915_CLFLUSH_SYNC))
+	if (!(flags & I915_CLFLUSH_SYNC) &&
+	    dma_resv_reserve_shared(obj->base.resv, 1) == 0)
 		clflush = clflush_work_create(obj);
 	if (clflush) {
 		i915_sw_fence_await_reservation(&clflush->base.chain,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 4d7da07442f2..fc0e1625847c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -989,11 +989,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
 			}
 		}
 
-		if (!(ev->flags & EXEC_OBJECT_WRITE)) {
-			err = dma_resv_reserve_shared(vma->resv, 1);
-			if (err)
-				return err;
-		}
+		err = dma_resv_reserve_shared(vma->resv, 1);
+		if (err)
+			return err;
 
 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
 			   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 28a700f08b49..2bf491fd5cdf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -179,7 +179,10 @@ static int igt_lmem_pages_migrate(void *arg)
 					  i915_gem_object_is_lmem(obj),
 					  0xdeadbeaf, &rq);
 		if (rq) {
-			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+			err = dma_resv_reserve_shared(obj->base.resv, 1);
+			if (!err)
+				dma_resv_add_excl_fence(obj->base.resv,
+							&rq->fence);
 			i915_request_put(rq);
 		}
 		if (err)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bef795e265a6..5ec87de63963 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1255,6 +1255,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
 			intel_frontbuffer_put(front);
 		}
 
+		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+			err = dma_resv_reserve_shared(vma->resv, 1);
+			if (unlikely(err))
+				return err;
+		}
+
 		if (fence) {
 			dma_resv_add_excl_fence(vma->resv, fence);
 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 418caae84759..b85af1672a7e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -894,6 +894,13 @@ static int igt_lmem_write_cpu(void *arg)
 	}
 
 	i915_gem_object_lock(obj, NULL);
+
+	err = dma_resv_reserve_shared(obj->base.resv, 1);
+	if (err) {
+		i915_gem_object_unlock(obj);
+		goto out_put;
+	}
+
 	/* Put the pages into a known state -- from the gpu for added fun */
 	intel_engine_pm_get(engine);
 	err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index f9a9198ef198..b4846007463f 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -255,13 +255,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 			    bool write, bool explicit)
 {
-	int err = 0;
+	int err;
 
-	if (!write) {
-		err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
-		if (err)
-			return err;
-	}
+	err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
+	if (err)
+		return err;
 
 	/* explicit sync use user passed dep fence */
 	if (explicit)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3cb029f10925..e874d09b74ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 
-		if (!write) {
-			/* NOTE: _reserve_shared() must happen before
-			 * _add_shared_fence(), which makes this a slightly
-			 * strange place to call it.  OTOH this is a
-			 * convenient can-fail point to hook it in.
-			 */
-			ret = dma_resv_reserve_shared(obj->resv, 1);
-			if (ret)
-				return ret;
-		}
+		/* NOTE: _reserve_shared() must happen before
+		 * _add_shared_fence(), which makes this a slightly
+		 * strange place to call it.  OTOH this is a
+		 * convenient can-fail point to hook it in.
+		 */
+		ret = dma_resv_reserve_shared(obj->resv, 1);
+		if (ret)
+			return ret;
 
 		/* exclusive fences must be ordered */
 		if (no_implicit && !write)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 26f9299df881..cd6715bd6d6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -349,12 +349,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 	struct nouveau_fence *f;
 	int ret;
 
-	if (!exclusive) {
-		ret = dma_resv_reserve_shared(resv, 1);
-
-		if (ret)
-			return ret;
-	}
+	ret = dma_resv_reserve_shared(resv, 1);
+	if (ret)
+		return ret;
 
 	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
 		struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 908d79520853..89c3fe389476 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i, ret;
 
 	for (i = 0; i < bo_count; i++) {
+		ret = dma_resv_reserve_shared(bos[i]->resv, 1);
+		if (ret)
+			return ret;
+
 		/* panfrost always uses write mode in its current uapi */
 		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
 							      true);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 72a94301bc95..ea9eabcc0a0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 
 	fbo->base = *bo;
 
-	ttm_bo_get(bo);
-	fbo->bo = bo;
-
 	/**
 	 * Fix up members that we shouldn't copy directly:
 	 * TODO: Explicit member copy would probably be better here.
@@ -246,6 +243,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	ret = dma_resv_trylock(&fbo->base.base._resv);
 	WARN_ON(!ret);
 
+	ret = dma_resv_reserve_shared(&fbo->base.base._resv, 1);
+	if (ret) {
+		kfree(fbo);
+		return ret;
+	}
+
+	ttm_bo_get(bo);
+	fbo->bo = bo;
+
 	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
 
 	*new_obj = &fbo->base;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 071c48d672c6..5da922639d54 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
+		unsigned int num_fences;
 
 		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 		if (ret == -EALREADY && dups) {
@@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 			continue;
 		}
 
+		num_fences = min(entry->num_shared, 1u);
 		if (!ret) {
-			if (!entry->num_shared)
-				continue;
-
 			ret = dma_resv_reserve_shared(bo->base.resv,
-								entry->num_shared);
+						      num_fences);
 			if (!ret)
 				continue;
 		}
@@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
 		}
 
-		if (!ret && entry->num_shared)
+		if (!ret)
 			ret = dma_resv_reserve_shared(bo->base.resv,
-								entry->num_shared);
+						      num_fences);
 
 		if (unlikely(ret != 0)) {
 			if (ticket) {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index c7ed2e1cbab6..1bea90e40ce1 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job,
 		return ret;
 
 	for (i = 0; i < job->bo_count; i++) {
+		ret = dma_resv_reserve_shared(job->bo[i]->resv, 1);
+		if (ret)
+			goto fail;
+
 		ret = drm_sched_job_add_implicit_dependencies(&job->base,
 							      job->bo[i], true);
-		if (ret) {
-			drm_gem_unlock_reservations(job->bo, job->bo_count,
-						    acquire_ctx);
-			return ret;
-		}
+		if (ret)
+			goto fail;
 	}
 
 	return 0;
+
+fail:
+	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+	return ret;
 }
 
 /**
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index bd6f75285fd9..a4cb296d4fcd 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 	}
 
 	/* Expose the fence via the dma-buf */
-	ret = 0;
 	dma_resv_lock(resv, NULL);
-	if (arg->flags & VGEM_FENCE_WRITE)
-		dma_resv_add_excl_fence(resv, fence);
-	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
-		dma_resv_add_shared_fence(resv, fence);
+	ret = dma_resv_reserve_shared(resv, 1);
+	if (!ret) {
+		if (arg->flags & VGEM_FENCE_WRITE)
+			dma_resv_add_excl_fence(resv, fence);
+		else
+			dma_resv_add_shared_fence(resv, fence);
+	}
 	dma_resv_unlock(resv);
 
 	/* Record the fence in our idr for later signaling */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 2de61b63ef91..aec105cdd64c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
 
 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 {
+	unsigned int i;
 	int ret;
 
 	if (objs->nents == 1) {
@@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
 						&objs->ticket);
 	}
+	if (ret)
+		return ret;
+
+	for (i = 0; i < objs->nents; ++i) {
+		ret = dma_resv_reserve_shared(objs->objs[i]->resv, 1);
+		if (ret)
+			return ret;
+	}
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index c97a3d5e90ce..6d0abc2b0beb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1053,16 +1053,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 			 struct vmw_fence_obj *fence)
 {
 	struct ttm_device *bdev = bo->bdev;
-
 	struct vmw_private *dev_priv =
 		container_of(bdev, struct vmw_private, bdev);
+	int ret;
 
-	if (fence == NULL) {
+	if (fence == NULL)
 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	else
+		dma_fence_get(&fence->base);
+
+	ret = dma_resv_reserve_shared(bo->base.resv, 1);
+	if (!ret)
 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
-		dma_fence_put(&fence->base);
-	} else
-		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+	else
+		/* Last resort fallback when we are OOM */
+		dma_fence_wait(&fence->base, false);
+	dma_fence_put(&fence->base);
 }
 
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (13 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2 Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 21:51     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb Christian König
                   ` (9 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Use dma_resv_get_singleton() here to eventually get more than one write
fence as single fence.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem_atomic_helper.c | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index c3189afe10cb..9338ddb7edff 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -143,25 +143,21 @@
  */
 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 {
-	struct dma_resv_iter cursor;
 	struct drm_gem_object *obj;
 	struct dma_fence *fence;
+	int ret;
 
 	if (!state->fb)
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	dma_resv_iter_begin(&cursor, obj->resv, false);
-	dma_resv_for_each_fence_unlocked(&cursor, fence) {
-		/* TODO: Currently there should be only one write fence, so this
-		 * here works fine. But drm_atomic_set_fence_for_plane() should
-		 * be changed to be able to handle more fences in general for
-		 * multiple BOs per fb anyway. */
-		dma_fence_get(fence);
-		break;
-	}
-	dma_resv_iter_end(&cursor);
+	ret = dma_resv_get_singleton(obj->resv, false, &fence);
+	if (ret)
+		return ret;
 
+	/* TODO: drm_atomic_set_fence_for_plane() should be changed to be able
+	 * to handle more fences in general for multiple BOs per fb.
+	 */
 	drm_atomic_set_fence_for_plane(state, fence);
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (14 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 21:52     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb Christian König
                   ` (8 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Use dma_resv_get_singleton() here to eventually get more than one write
fence as single fence.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 133c8736426a..b55a8a723581 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -536,8 +536,6 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	struct nouveau_bo *nvbo;
 	struct nv50_head_atom *asyh;
 	struct nv50_wndw_ctxdma *ctxdma;
-	struct dma_resv_iter cursor;
-	struct dma_fence *fence;
 	int ret;
 
 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
@@ -560,13 +558,11 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
-	dma_resv_for_each_fence_unlocked(&cursor, fence) {
-		/* TODO: We only use the first writer here */
-		asyw->state.fence = dma_fence_get(fence);
-		break;
-	}
-	dma_resv_iter_end(&cursor);
+	ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
+				     &asyw->state.fence);
+	if (ret)
+		return ret;
+
 	asyw->image.offset[0] = nvbo->offset;
 
 	if (wndw->func->prepare) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (15 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 21:53     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 18/24] dma-buf: add enum dma_resv_usage v3 Christian König
                   ` (7 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Makes the code a bit more simpler.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 23 +++--------------------
 1 file changed, 3 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index be48487e2ca7..888d97143177 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -107,36 +107,19 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 			       u32 pasid)
 {
-	struct dma_fence *fence, **fences;
 	struct amdgpu_pasid_cb *cb;
-	unsigned count;
+	struct dma_fence *fence;
 	int r;
 
-	r = dma_resv_get_fences(resv, true, &count, &fences);
+	r = dma_resv_get_singleton(resv, true, &fence);
 	if (r)
 		goto fallback;
 
-	if (count == 0) {
+	if (!fence) {
 		amdgpu_pasid_free(pasid);
 		return;
 	}
 
-	if (count == 1) {
-		fence = fences[0];
-		kfree(fences);
-	} else {
-		uint64_t context = dma_fence_context_alloc(1);
-		struct dma_fence_array *array;
-
-		array = dma_fence_array_create(count, fences, context,
-					       1, false);
-		if (!array) {
-			kfree(fences);
-			goto fallback;
-		}
-		fence = &array->base;
-	}
-
 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
 	if (!cb) {
 		/* Last resort when we are OOM */
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 18/24] dma-buf: add enum dma_resv_usage v3
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (16 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 22:00     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 19/24] dma-buf: specify usage while adding fences to dma_resv obj v2 Christian König
                   ` (6 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

This change adds the dma_resv_usage enum and allows us to specify why a
dma_resv object is queried for its containing fences.

Additional to that a dma_resv_usage_rw() helper function is added to aid
retrieving the fences for a read or write userspace submission.

This is then deployed to the different query functions of the dma_resv
object and all of their users. When the write paratermer was previously
true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.

v2: add KERNEL/OTHER in separate patch
v3: some kerneldoc suggestions by Daniel

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c                     |  3 +-
 drivers/dma-buf/dma-resv.c                    | 33 +++++----
 drivers/dma-buf/st-dma-resv.c                 | 48 ++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c        |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       |  5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  7 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  3 +-
 drivers/gpu/drm/drm_gem.c                     |  6 +-
 drivers/gpu/drm/drm_gem_atomic_helper.c       |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c         |  6 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  7 +-
 .../gpu/drm/i915/display/intel_atomic_plane.c |  3 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  4 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_wait.c      |  6 +-
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  |  3 +-
 drivers/gpu/drm/i915/i915_request.c           |  3 +-
 drivers/gpu/drm/i915/i915_sw_fence.c          |  2 +-
 drivers/gpu/drm/msm/msm_gem.c                 |  3 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c       |  3 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c          |  8 +--
 drivers/gpu/drm/nouveau/nouveau_fence.c       |  3 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c         |  3 +-
 drivers/gpu/drm/panfrost/panfrost_drv.c       |  3 +-
 drivers/gpu/drm/qxl/qxl_debugfs.c             |  3 +-
 drivers/gpu/drm/radeon/radeon_display.c       |  3 +-
 drivers/gpu/drm/radeon/radeon_gem.c           |  9 ++-
 drivers/gpu/drm/radeon/radeon_mn.c            |  4 +-
 drivers/gpu/drm/radeon/radeon_sync.c          |  2 +-
 drivers/gpu/drm/radeon/radeon_uvd.c           |  4 +-
 drivers/gpu/drm/scheduler/sched_main.c        |  3 +-
 drivers/gpu/drm/ttm/ttm_bo.c                  | 18 ++---
 drivers/gpu/drm/vgem/vgem_fence.c             |  4 +-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  5 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      |  4 +-
 drivers/infiniband/core/umem_dmabuf.c         |  3 +-
 include/linux/dma-resv.h                      | 69 +++++++++++++++----
 46 files changed, 205 insertions(+), 125 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 602b12d7470d..528983d3ba64 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1124,7 +1124,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	long ret;
 
 	/* Wait on any implicit rendering fences */
-	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
+	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+				    true, MAX_SCHEDULE_TIMEOUT);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index ecb2ff606bac..33a17db89fb4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -408,7 +408,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
 	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
 	cursor->index = -1;
 	cursor->shared_count = 0;
-	if (cursor->all_fences) {
+	if (cursor->usage >= DMA_RESV_USAGE_READ) {
 		cursor->fences = dma_resv_shared_list(cursor->obj);
 		if (cursor->fences)
 			cursor->shared_count = cursor->fences->shared_count;
@@ -515,7 +515,7 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
 	dma_resv_assert_held(cursor->obj);
 
 	cursor->index = 0;
-	if (cursor->all_fences)
+	if (cursor->usage >= DMA_RESV_USAGE_READ)
 		cursor->fences = dma_resv_shared_list(cursor->obj);
 	else
 		cursor->fences = NULL;
@@ -570,7 +570,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 	list = NULL;
 	excl = NULL;
 
-	dma_resv_iter_begin(&cursor, src, true);
+	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
 	dma_resv_for_each_fence_unlocked(&cursor, f) {
 
 		if (dma_resv_iter_is_restarted(&cursor)) {
@@ -616,7 +616,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
  * @num_fences: the number of fences returned
  * @fences: the array of fence ptrs returned (array is krealloc'd to the
  * required size, and must be freed by caller)
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * Retrieve all fences from the reservation object.
  * Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
 			unsigned int *num_fences, struct dma_fence ***fences)
 {
 	struct dma_resv_iter cursor;
@@ -633,7 +633,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
 	*num_fences = 0;
 	*fences = NULL;
 
-	dma_resv_iter_begin(&cursor, obj, write);
+	dma_resv_iter_begin(&cursor, obj, usage);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
 		if (dma_resv_iter_is_restarted(&cursor)) {
@@ -665,7 +665,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 /**
  * dma_resv_get_singleton - Get a single fence for all the fences
  * @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
  * @fence: the resulting fence
  *
  * Get a single fence representing all the fences inside the resv object.
@@ -675,7 +675,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
  * object since that can lead to stack corruption when finalizing the
  * dma_fence_array.
  */
-int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
 			   struct dma_fence **fence)
 {
 	struct dma_fence_array *array;
@@ -683,7 +683,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
 	unsigned count;
 	int r;
 
-	r = dma_resv_get_fences(obj, write, &count, &fences);
+	r = dma_resv_get_fences(obj, usage, &count, &fences);
         if (r)
 		return r;
 
@@ -717,7 +717,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
  * dma_resv_wait_timeout - Wait on reservation's objects
  * shared and/or exclusive fences.
  * @obj: the reservation object
- * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
  * @intr: if true, do interruptible wait
  * @timeout: timeout value in jiffies or zero to return immediately
  *
@@ -727,14 +727,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than zer on success.
  */
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
-			   unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+			   bool intr, unsigned long timeout)
 {
 	long ret = timeout ? timeout : 1;
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_iter_begin(&cursor, obj, wait_all);
+	dma_resv_iter_begin(&cursor, obj, usage);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
 		ret = dma_fence_wait_timeout(fence, intr, ret);
@@ -754,8 +754,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
  * dma_resv_test_signaled - Test if a reservation object's fences have been
  * signaled.
  * @obj: the reservation object
- * @test_all: if true, test all fences, otherwise only test the exclusive
- * fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
  *
  * Callers are not required to hold specific locks, but maybe hold
  * dma_resv_lock() already.
@@ -764,12 +763,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
  *
  * True if all fences signaled, else false.
  */
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
 {
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_iter_begin(&cursor, obj, test_all);
+	dma_resv_iter_begin(&cursor, obj, usage);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		dma_resv_iter_end(&cursor);
 		return false;
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index f33bafc78693..a52c5fbea87a 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -58,7 +58,7 @@ static int sanitycheck(void *arg)
 	return r;
 }
 
-static int test_signaling(void *arg, bool shared)
+static int test_signaling(void *arg, enum dma_resv_usage usage)
 {
 	struct dma_resv resv;
 	struct dma_fence *f;
@@ -81,18 +81,18 @@ static int test_signaling(void *arg, bool shared)
 		goto err_unlock;
 	}
 
-	if (shared)
+	if (usage >= DMA_RESV_USAGE_READ)
 		dma_resv_add_shared_fence(&resv, f);
 	else
 		dma_resv_add_excl_fence(&resv, f);
 
-	if (dma_resv_test_signaled(&resv, shared)) {
+	if (dma_resv_test_signaled(&resv, usage)) {
 		pr_err("Resv unexpectedly signaled\n");
 		r = -EINVAL;
 		goto err_unlock;
 	}
 	dma_fence_signal(f);
-	if (!dma_resv_test_signaled(&resv, shared)) {
+	if (!dma_resv_test_signaled(&resv, usage)) {
 		pr_err("Resv not reporting signaled\n");
 		r = -EINVAL;
 		goto err_unlock;
@@ -107,15 +107,15 @@ static int test_signaling(void *arg, bool shared)
 
 static int test_excl_signaling(void *arg)
 {
-	return test_signaling(arg, false);
+	return test_signaling(arg, DMA_RESV_USAGE_WRITE);
 }
 
 static int test_shared_signaling(void *arg)
 {
-	return test_signaling(arg, true);
+	return test_signaling(arg, DMA_RESV_USAGE_READ);
 }
 
-static int test_for_each(void *arg, bool shared)
+static int test_for_each(void *arg, enum dma_resv_usage usage)
 {
 	struct dma_resv_iter cursor;
 	struct dma_fence *f, *fence;
@@ -139,13 +139,13 @@ static int test_for_each(void *arg, bool shared)
 		goto err_unlock;
 	}
 
-	if (shared)
+	if (usage >= DMA_RESV_USAGE_READ)
 		dma_resv_add_shared_fence(&resv, f);
 	else
 		dma_resv_add_excl_fence(&resv, f);
 
 	r = -ENOENT;
-	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+	dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
 		if (!r) {
 			pr_err("More than one fence found\n");
 			r = -EINVAL;
@@ -156,7 +156,8 @@ static int test_for_each(void *arg, bool shared)
 			r = -EINVAL;
 			goto err_unlock;
 		}
-		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+		if (dma_resv_iter_is_exclusive(&cursor) !=
+		    (usage >= DMA_RESV_USAGE_READ)) {
 			pr_err("Unexpected fence usage\n");
 			r = -EINVAL;
 			goto err_unlock;
@@ -178,15 +179,15 @@ static int test_for_each(void *arg, bool shared)
 
 static int test_excl_for_each(void *arg)
 {
-	return test_for_each(arg, false);
+	return test_for_each(arg, DMA_RESV_USAGE_WRITE);
 }
 
 static int test_shared_for_each(void *arg)
 {
-	return test_for_each(arg, true);
+	return test_for_each(arg, DMA_RESV_USAGE_READ);
 }
 
-static int test_for_each_unlocked(void *arg, bool shared)
+static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
 {
 	struct dma_resv_iter cursor;
 	struct dma_fence *f, *fence;
@@ -211,14 +212,14 @@ static int test_for_each_unlocked(void *arg, bool shared)
 		goto err_free;
 	}
 
-	if (shared)
+	if (usage >= DMA_RESV_USAGE_READ)
 		dma_resv_add_shared_fence(&resv, f);
 	else
 		dma_resv_add_excl_fence(&resv, f);
 	dma_resv_unlock(&resv);
 
 	r = -ENOENT;
-	dma_resv_iter_begin(&cursor, &resv, shared);
+	dma_resv_iter_begin(&cursor, &resv, usage);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (!r) {
 			pr_err("More than one fence found\n");
@@ -234,7 +235,8 @@ static int test_for_each_unlocked(void *arg, bool shared)
 			r = -EINVAL;
 			goto err_iter_end;
 		}
-		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+		if (dma_resv_iter_is_exclusive(&cursor) !=
+		    (usage >= DMA_RESV_USAGE_READ)) {
 			pr_err("Unexpected fence usage\n");
 			r = -EINVAL;
 			goto err_iter_end;
@@ -262,15 +264,15 @@ static int test_for_each_unlocked(void *arg, bool shared)
 
 static int test_excl_for_each_unlocked(void *arg)
 {
-	return test_for_each_unlocked(arg, false);
+	return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
 }
 
 static int test_shared_for_each_unlocked(void *arg)
 {
-	return test_for_each_unlocked(arg, true);
+	return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
 }
 
-static int test_get_fences(void *arg, bool shared)
+static int test_get_fences(void *arg, enum dma_resv_usage usage)
 {
 	struct dma_fence *f, **fences = NULL;
 	struct dma_resv resv;
@@ -294,13 +296,13 @@ static int test_get_fences(void *arg, bool shared)
 		goto err_resv;
 	}
 
-	if (shared)
+	if (usage >= DMA_RESV_USAGE_READ)
 		dma_resv_add_shared_fence(&resv, f);
 	else
 		dma_resv_add_excl_fence(&resv, f);
 	dma_resv_unlock(&resv);
 
-	r = dma_resv_get_fences(&resv, shared, &i, &fences);
+	r = dma_resv_get_fences(&resv, usage, &i, &fences);
 	if (r) {
 		pr_err("get_fences failed\n");
 		goto err_free;
@@ -324,12 +326,12 @@ static int test_get_fences(void *arg, bool shared)
 
 static int test_excl_get_fences(void *arg)
 {
-	return test_get_fences(arg, false);
+	return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
 }
 
 static int test_shared_get_fences(void *arg)
 {
-	return test_get_fences(arg, true);
+	return test_get_fences(arg, DMA_RESV_USAGE_READ);
 }
 
 int dma_resv(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7facd614e50a..af0a61ce2ec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1279,7 +1279,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 		 * submission in a dma_fence_chain and add it as exclusive
 		 * fence.
 		 */
-		dma_resv_for_each_fence(&cursor, resv, false, fence) {
+		dma_resv_for_each_fence(&cursor, resv,
+					DMA_RESV_USAGE_WRITE,
+					fence) {
 			break;
 		}
 		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d17e1c911689..c2b1208a8c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto unpin;
 	}
 
-	/* TODO: Unify this with other drivers */
-	r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
+	r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
 				&work->shared_count,
 				&work->shared);
 	if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 85d31d85c384..e97b3a522b36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+				    true, timeout);
 
 	/* ret == 0 means not signaled,
 	 * ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 888d97143177..490d2a7a3e2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	struct dma_fence *fence;
 	int r;
 
-	r = dma_resv_get_singleton(resv, true, &fence);
+	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence);
 	if (r)
 		goto fallback;
 
@@ -139,7 +139,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	/* Not enough memory for the delayed delete, as last resort
 	 * block for all the fences to complete.
 	 */
-	dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
+	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+			      false, MAX_SCHEDULE_TIMEOUT);
 	amdgpu_pasid_free(pasid);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 4b153daf283d..86f5248676b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 
 	mmu_interval_set_seq(mni, cur_seq);
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
-				  MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+				  false, MAX_SCHEDULE_TIMEOUT);
 	mutex_unlock(&adev->notifier_lock);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 24a6b88afcca..7715390aff7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -764,8 +764,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 		return 0;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
-				  MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index f7d8487799b2..183623806056 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 	if (resv == NULL)
 		return -EINVAL;
 
-	dma_resv_for_each_fence(&cursor, resv, true, f) {
+	/* TODO: Use DMA_RESV_USAGE_READ here */
+	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
 		dma_fence_chain_for_each(f, f) {
 			struct dma_fence_chain *chain = to_dma_fence_chain(f);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c15687ce67c4..fd339762f534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1360,7 +1360,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
+				DMA_RESV_USAGE_READ, f) {
 		if (amdkfd_fence_check_mm(f, current->mm))
 			return false;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6f8de11a17f1..33deb0df62fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1162,7 +1162,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ib->length_dw = 16;
 
 	if (direct) {
-		r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+		r = dma_resv_wait_timeout(bo->tbo.base.resv,
+					  DMA_RESV_USAGE_WRITE, false,
 					  msecs_to_jiffies(10));
 		if (r == 0)
 			r = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a96ae4c0e040..e82997dbc0a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2105,7 +2105,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_for_each_fence(&cursor, resv, true, fence) {
+	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) {
 		/* Add a callback for each fence in the reservation object */
 		amdgpu_vm_prt_get(adev);
 		amdgpu_vm_add_prt_cb(adev, fence);
@@ -2707,7 +2707,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 		return true;
 
 	/* Don't evict VM page tables while they are busy */
-	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
+	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ))
 		return false;
 
 	/* Try to block ongoing updates */
@@ -2887,7 +2887,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
+	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
+					DMA_RESV_USAGE_READ,
 					true, timeout);
 	if (timeout <= 0)
 		return timeout;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4130082c5873..932edfab9cb0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9048,7 +9048,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		 * deadlock during GPU reset when this fence will not signal
 		 * but we hold reservation lock for the BO.
 		 */
-		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+		r = dma_resv_wait_timeout(abo->tbo.base.resv,
+					  DMA_RESV_USAGE_WRITE, false,
 					  msecs_to_jiffies(5000));
 		if (unlikely(r <= 0))
 			DRM_ERROR("Waiting for fences timed out!");
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4dcdec6487bb..d96355f98e75 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,7 +770,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 		return -EINVAL;
 	}
 
-	ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
+	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
+				    true, timeout);
 	if (ret == 0)
 		ret = -ETIME;
 	else if (ret > 0)
@@ -1344,7 +1345,8 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 	struct dma_fence *fence;
 	int ret = 0;
 
-	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+	dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
+				fence) {
 		ret = drm_gem_fence_array_add(fence_array, fence);
 		if (ret)
 			break;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 9338ddb7edff..a6d89aed0bda 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	ret = dma_resv_get_singleton(obj->resv, false, &fence);
+	ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d5314aa28ff7..507172e2780b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	}
 
 	if (op & ETNA_PREP_NOSYNC) {
-		if (!dma_resv_test_signaled(obj->resv, write))
+		if (!dma_resv_test_signaled(obj->resv,
+					    dma_resv_usage_rw(write)))
 			return -EBUSY;
 	} else {
 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 
-		ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+		ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+					    true, remain);
 		if (ret <= 0)
 			return ret == 0 ? -ETIMEDOUT : ret;
 	}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index d4a7073190ec..3b8d49319d8c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -178,6 +178,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 		struct dma_resv *robj = bo->obj->base.resv;
+		enum dma_resv_usage usage;
 
 		ret = dma_resv_reserve_shared(robj, 1);
 		if (ret)
@@ -186,9 +187,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
 			continue;
 
-		ret = dma_resv_get_fences(robj,
-					  !!(bo->flags & ETNA_SUBMIT_BO_WRITE),
-					  &bo->nr_shared, &bo->shared);
+		usage = dma_resv_usage_rw(bo->flags & ETNA_SUBMIT_BO_WRITE);
+		ret = dma_resv_get_fences(robj, usage, &bo->nr_shared,
+					  &bo->shared);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index cdc68fb51ba6..e10f2536837b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -749,7 +749,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 		if (ret < 0)
 			goto unpin_fb;
 
-		dma_resv_iter_begin(&cursor, obj->base.resv, false);
+		dma_resv_iter_begin(&cursor, obj->base.resv,
+				    DMA_RESV_USAGE_WRITE);
 		dma_resv_for_each_fence_unlocked(&cursor, fence) {
 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 470fdfd61a0f..14a1c0ad8c3c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -138,12 +138,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * Alternatively, we can trade that extra information on read/write
 	 * activity with
 	 *	args->busy =
-	 *		!dma_resv_test_signaled(obj->resv, true);
+	 *		!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
 	args->busy = 0;
-	dma_resv_iter_begin(&cursor, obj->base.resv, true);
+	dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (dma_resv_iter_is_restarted(&cursor))
 			args->busy = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 444f8268b9c5..a200d3e66573 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
 
 #ifdef CONFIG_LOCKDEP
-	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) &&
 		    i915_gem_object_evictable(obj));
 #endif
 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 3173c9f9a040..0ccb91385f84 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 		return true;
 
 	/* we will unbind on next submission, still have userptr pins */
-	r = dma_resv_wait_timeout(obj->base.resv, true, false,
+	r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false,
 				  MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index cd149aa99364..57abeba399cb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 	struct dma_fence *fence;
 	long ret = timeout ?: 1;
 
-	dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+	dma_resv_iter_begin(&cursor, resv,
+			    dma_resv_usage_rw(flags & I915_WAIT_ALL));
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		ret = i915_gem_object_wait_fence(fence, flags, timeout);
 		if (ret <= 0)
@@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+	dma_resv_iter_begin(&cursor, obj->base.resv,
+			    dma_resv_usage_rw(flags & I915_WAIT_ALL));
 	dma_resv_for_each_fence_unlocked(&cursor, fence)
 		i915_gem_fence_wait_priority(fence, attr);
 	dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 4a6bb64c3a35..95985bcc47f6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
 		goto out_detach;
 	}
 
-	timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ);
+	timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
+					true, 5 * HZ);
 	if (!timeout) {
 		pr_err("dmabuf wait for exclusive fence timed out.\n");
 		timeout = -ETIME;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 42cd17357771..f11c070c3262 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1541,7 +1541,8 @@ i915_request_await_object(struct i915_request *to,
 	struct dma_fence *fence;
 	int ret = 0;
 
-	dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+	dma_resv_for_each_fence(&cursor, obj->base.resv,
+				dma_resv_usage_rw(write), fence) {
 		ret = i915_request_await_dma_fence(to, fence);
 		if (ret)
 			break;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7ea0dbf81530..303d792a8912 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -579,7 +579,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 	debug_fence_assert(fence);
 	might_sleep_if(gfpflags_allow_blocking(gfp));
 
-	dma_resv_iter_begin(&cursor, resv, write);
+	dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write));
 	dma_resv_for_each_fence_unlocked(&cursor, f) {
 		pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
 							gfp);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2916480d9115..19e09a88dcca 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -848,7 +848,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
+	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+				    true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 	else if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b55a8a723581..53baf9aae4b1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -558,7 +558,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
+	ret = dma_resv_get_singleton(nvbo->bo.base.resv,
+				     DMA_RESV_USAGE_WRITE,
 				     &asyw->state.fence);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 74f8652d2bd3..c6bb4dbcd735 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -962,11 +962,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 	struct dma_fence *fence;
 	int ret;
 
-	/* TODO: This is actually a memory management dependency */
-	ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
+	ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
+				     &fence);
 	if (ret)
-		dma_resv_wait_timeout(bo->base.resv, false, false,
-				      MAX_SCHEDULE_TIMEOUT);
+		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
+				      false, MAX_SCHEDULE_TIMEOUT);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index cd6715bd6d6b..26725e23c075 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -353,7 +353,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 	if (ret)
 		return ret;
 
-	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
+	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(exclusive),
+				fence) {
 		struct nouveau_channel *prev = NULL;
 		bool must_wait = true;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9416bee92141..fab542a758ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -962,7 +962,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
+				     dma_resv_usage_rw(write), true,
 				     no_wait ? 0 : 30 * HZ);
 	if (!lret)
 		ret = -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 96bb5a465627..0deb2d21422f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -316,7 +316,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 	if (!gem_obj)
 		return -ENOENT;
 
-	ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
+	ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
+				    true, timeout);
 	if (!ret)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
 
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 6a36b0fd845c..33e5889d6608 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,8 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 		struct dma_fence *fence;
 		int rel = 0;
 
-		dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
+		dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
+				    DMA_RESV_USAGE_READ);
 		dma_resv_for_each_fence_unlocked(&cursor, fence) {
 			if (dma_resv_iter_is_restarted(&cursor))
 				rel = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a6f875118f01..9872d0b3e31a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,8 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
+	r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+				   &work->fence);
 	if (r) {
 		radeon_bo_unreserve(new_rbo);
 		DRM_ERROR("failed to get new rbo buffer fences\n");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a36a4f2c76b0..71bf9299e45c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+		r = dma_resv_wait_timeout(robj->tbo.base.resv,
+					  DMA_RESV_USAGE_READ,
+					  true, 30 * HZ);
 		if (!r)
 			r = -EBUSY;
 
@@ -523,7 +525,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	r = dma_resv_test_signaled(robj->tbo.base.resv, true);
+	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
 	if (r == 0)
 		r = -EBUSY;
 	else
@@ -552,7 +554,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+				    true, 30 * HZ);
 	if (ret == 0)
 		r = -EBUSY;
 	else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 9fa88549c89e..68ebeb1bdfff 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 		return true;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
-				  MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index b991ba1bcd51..49bbb2266c0f 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 	struct dma_fence *f;
 	int r = 0;
 
-	dma_resv_for_each_fence(&cursor, resv, shared, f) {
+	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) {
 		fence = to_radeon_fence(f);
 		if (fence && fence->rdev == rdev)
 			radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 377f9cdb5b53..4000ad2f39ba 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -478,8 +478,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 		return -EINVAL;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
-				  MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0) {
 		DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
 		return r ? r : -ETIME;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 94fe51b3caa2..a53506d21635 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -703,7 +703,8 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
 	struct dma_fence *fence;
 	int ret;
 
-	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+	dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
+				fence) {
 		ret = drm_sched_job_add_dependency(job, fence);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc124457ba2f..9d9a0039359c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -272,7 +272,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_iter_begin(&cursor, resv, true);
+	dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
@@ -301,7 +301,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	struct dma_resv *resv = &bo->base._resv;
 	int ret;
 
-	if (dma_resv_test_signaled(resv, true))
+	if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ))
 		ret = 0;
 	else
 		ret = -EBUSY;
@@ -313,7 +313,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&bo->bdev->lru_lock);
 
-		lret = dma_resv_wait_timeout(resv, true, interruptible,
+		lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+					     interruptible,
 					     30 * HZ);
 
 		if (lret < 0)
@@ -416,7 +417,8 @@ static void ttm_bo_release(struct kref *kref)
 			/* Last resort, if we fail to allocate memory for the
 			 * fences block for the BO to become idle
 			 */
-			dma_resv_wait_timeout(bo->base.resv, true, false,
+			dma_resv_wait_timeout(bo->base.resv,
+					      DMA_RESV_USAGE_READ, false,
 					      30 * HZ);
 		}
 
@@ -427,7 +429,7 @@ static void ttm_bo_release(struct kref *kref)
 		ttm_mem_io_free(bdev, bo->resource);
 	}
 
-	if (!dma_resv_test_signaled(bo->base.resv, true) ||
+	if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) ||
 	    !dma_resv_trylock(bo->base.resv)) {
 		/* The BO is not idle, resurrect it for delayed destroy */
 		ttm_bo_flush_all_fences(bo);
@@ -1072,14 +1074,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	long timeout = 15 * HZ;
 
 	if (no_wait) {
-		if (dma_resv_test_signaled(bo->base.resv, true))
+		if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ))
 			return 0;
 		else
 			return -EBUSY;
 	}
 
-	timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
-					timeout);
+	timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+					interruptible, timeout);
 	if (timeout < 0)
 		return timeout;
 
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index a4cb296d4fcd..74ebadf4e592 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -130,6 +130,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 	struct vgem_file *vfile = file->driver_priv;
 	struct dma_resv *resv;
 	struct drm_gem_object *obj;
+	enum dma_resv_usage usage;
 	struct dma_fence *fence;
 	int ret;
 
@@ -151,7 +152,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Check for a conflicting fence */
 	resv = obj->resv;
-	if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
+	usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE);
+	if (!dma_resv_test_signaled(resv, usage)) {
 		ret = -EBUSY;
 		goto err_fence;
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0007e423d885..35bcb015e714 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -518,9 +518,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 
 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
-		ret = dma_resv_test_signaled(obj->resv, true);
+		ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
 	} else {
-		ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
+		ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
+					    true, timeout);
 	}
 	if (ret == 0)
 		ret = -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 6d0abc2b0beb..5d5a438b1e23 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -739,8 +739,8 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 	if (flags & drm_vmw_synccpu_allow_cs) {
 		long lret;
 
-		lret = dma_resv_wait_timeout(bo->base.resv, true, true,
-					     nonblock ? 0 :
+		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+					     true, nonblock ? 0 :
 					     MAX_SCHEDULE_TIMEOUT);
 		if (!lret)
 			return -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 23c3fc2cbf10..29452e150424 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1169,8 +1169,8 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 		if (bo->moving)
 			dma_fence_put(bo->moving);
 
-		/* TODO: This is actually a memory management dependency */
-		return dma_resv_get_singleton(bo->base.resv, false,
+		return dma_resv_get_singleton(bo->base.resv,
+					      DMA_RESV_USAGE_WRITE,
 					      &bo->moving);
 	}
 
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index d32cd7538835..f9901d273b8e 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -67,7 +67,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 	 * may be not up-to-date. Wait for the exporter to finish
 	 * the migration.
 	 */
-	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
+	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+				     DMA_RESV_USAGE_WRITE,
 				     false, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 40ac9d486f8f..d96d8ca9af56 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -49,6 +49,49 @@ extern struct ww_class reservation_ww_class;
 
 struct dma_resv_list;
 
+/**
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ */
+enum dma_resv_usage {
+	/**
+	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+	 *
+	 * This should only be used for userspace command submissions which add
+	 * an implicit write dependency.
+	 */
+	DMA_RESV_USAGE_WRITE,
+
+	/**
+	 * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+	 *
+	 * This should only be used for userspace command submissions which add
+	 * an implicit read dependency.
+	 */
+	DMA_RESV_USAGE_READ,
+};
+
+/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+	/* This looks confusing at first sight, but is indeed correct.
+	 *
+	 * The rational is that new write operations needs to wait for the
+	 * existing read and write operations to finish.
+	 * But a new read operation only needs to wait for the existing write
+	 * operations to finish.
+	 */
+	return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
 /**
  * struct dma_resv - a reservation object manages fences for a buffer
  *
@@ -147,8 +190,8 @@ struct dma_resv_iter {
 	/** @obj: The dma_resv object we iterate over */
 	struct dma_resv *obj;
 
-	/** @all_fences: If all fences should be returned */
-	bool all_fences;
+	/** @usage: Controls which fences are returned */
+	enum dma_resv_usage usage;
 
 	/** @fence: the currently handled fence */
 	struct dma_fence *fence;
@@ -178,14 +221,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
  * dma_resv_iter_begin - initialize a dma_resv_iter object
  * @cursor: The dma_resv_iter object to initialize
  * @obj: The dma_resv object which we want to iterate over
- * @all_fences: If all fences should be returned or just the exclusive one
+ * @usage: controls which fences to include, see enum dma_resv_usage.
  */
 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
 				       struct dma_resv *obj,
-				       bool all_fences)
+				       enum dma_resv_usage usage)
 {
 	cursor->obj = obj;
-	cursor->all_fences = all_fences;
+	cursor->usage = usage;
 	cursor->fence = NULL;
 }
 
@@ -242,7 +285,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
  * dma_resv_for_each_fence - fence iterator
  * @cursor: a struct dma_resv_iter pointer
  * @obj: a dma_resv object pointer
- * @all_fences: true if all fences should be returned
+ * @usage: controls which fences to return
  * @fence: the current fence
  *
  * Iterate over the fences in a struct dma_resv object while holding the
@@ -251,8 +294,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
  * valid as long as the lock is held and so no extra reference to the fence is
  * taken.
  */
-#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
-	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+#define dma_resv_for_each_fence(cursor, obj, usage, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, usage),	\
 	     fence = dma_resv_iter_first(cursor); fence;	\
 	     fence = dma_resv_iter_next(cursor))
 
@@ -419,14 +462,14 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
 			     struct dma_fence *fence);
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
 			unsigned int *num_fences, struct dma_fence ***fences);
-int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
 			   struct dma_fence **fence);
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
-			   unsigned long timeout);
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+			   bool intr, unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
 
 #endif /* _LINUX_RESERVATION_H */
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 19/24] dma-buf: specify usage while adding fences to dma_resv obj v2
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (17 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 18/24] dma-buf: add enum dma_resv_usage v3 Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-07 12:34 ` [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL Christian König
                   ` (5 subsequent siblings)
  24 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Instead of distingting between shared and exclusive fences specify
the fence usage while adding fences.

Rework all drivers to use this interface instead and deprecate the old one.

v2: some kerneldoc comments suggested by Daniel

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c                    | 374 ++++++++----------
 drivers/dma-buf/st-dma-resv.c                 | 109 ++---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c        |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |   8 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |   4 +-
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c          |   2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  12 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  13 +-
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |   5 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |   4 +-
 .../drm/i915/gem/selftests/i915_gem_migrate.c |   2 +-
 drivers/gpu/drm/i915/i915_vma.c               |  10 +-
 .../drm/i915/selftests/intel_memory_region.c  |   2 +-
 drivers/gpu/drm/lima/lima_gem.c               |   4 +-
 drivers/gpu/drm/msm/msm_gem_submit.c          |   4 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c          |   9 +-
 drivers/gpu/drm/nouveau/nouveau_fence.c       |   2 +-
 drivers/gpu/drm/panfrost/panfrost_job.c       |   2 +-
 drivers/gpu/drm/qxl/qxl_release.c             |   5 +-
 drivers/gpu/drm/radeon/radeon_object.c        |   6 +-
 drivers/gpu/drm/radeon/radeon_vm.c            |   2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                  |   6 +-
 drivers/gpu/drm/ttm/ttm_bo_util.c             |   7 +-
 drivers/gpu/drm/ttm/ttm_execbuf_util.c        |  10 +-
 drivers/gpu/drm/v3d/v3d_gem.c                 |   6 +-
 drivers/gpu/drm/vc4/vc4_gem.c                 |   4 +-
 drivers/gpu/drm/vgem/vgem_fence.c             |  11 +-
 drivers/gpu/drm/virtio/virtgpu_gem.c          |   5 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |   5 +-
 include/linux/dma-resv.h                      |  88 ++---
 31 files changed, 307 insertions(+), 426 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 33a17db89fb4..a2a0b5b6c107 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -44,12 +44,12 @@
 /**
  * DOC: Reservation Object Overview
  *
- * The reservation object provides a mechanism to manage shared and
- * exclusive fences associated with a buffer.  A reservation object
- * can have attached one exclusive fence (normally associated with
- * write operations) or N shared fences (read operations).  The RCU
- * mechanism is used to protect read access to fences from locked
- * write-side updates.
+ * The reservation object provides a mechanism to manage a container of
+ * dma_fence object associated with a resource. A reservation object
+ * can have any number of fences attaches to it. Each fence carring an usage
+ * parameter determining how the operation represented by the fence is using the
+ * resource. The RCU mechanism is used to protect read access to fences from
+ * locked write-side updates.
  *
  * See struct dma_resv for more details.
  */
@@ -57,36 +57,80 @@
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
+/* Mask for the lower fence pointer bits */
+#define DMA_RESV_LIST_MASK	0x3
+
 /**
- * struct dma_resv_list - a list of shared fences
+ * struct dma_resv_list - an array of fences
  * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
+ * @num_fences: table of fences
+ * @max_fences: for growing fence table
+ * @table: fence table
  */
 struct dma_resv_list {
 	struct rcu_head rcu;
-	u32 shared_count, shared_max;
-	struct dma_fence __rcu *shared[];
+	u32 num_fences, max_fences;
+	struct dma_fence __rcu *table[];
 };
 
+/**
+ * dma_resv_list_entry - extract fence and usage from a list entry
+ * @list: the list to extract and entry from
+ * @index: which entry we want
+ * @check: lockdep check that the access is allowed
+ * @fence: the resulting fence
+ * @usage: the resulting usage
+ *
+ * Extract the fence and usage flags from an RCU protected entry in the list.
+ */
+static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
+				bool check, struct dma_fence **fence,
+				enum dma_resv_usage *usage)
+{
+	long tmp;
+
+	tmp = (long)rcu_dereference_check(list->table[index], check);
+	*fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
+	if (usage)
+		*usage = tmp & DMA_RESV_LIST_MASK;
+}
+
+/**
+ * dma_resv_list_set - set fence and usage at a specific index
+ * @list: the list to modify
+ * @index: where to make the change
+ * @fence: the fence to set
+ * @usage: the usage to set
+ *
+ * Set the fence and usage flags at the specific index in the list.
+ */
+static void dma_resv_list_set(struct dma_resv_list *list,
+			      unsigned int index,
+			      struct dma_fence *fence,
+			      enum dma_resv_usage usage)
+{
+	long tmp = ((long)fence) | usage;
+
+	RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
+}
+
 /**
  * dma_resv_list_alloc - allocate fence list
- * @shared_max: number of fences we need space for
+ * @max_fences: number of fences we need space for
  *
  * Allocate a new dma_resv_list and make sure to correctly initialize
- * shared_max.
+ * max_fences.
  */
-static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
 {
 	struct dma_resv_list *list;
 
-	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
+	list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
 	if (!list)
 		return NULL;
 
-	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
-		sizeof(*list->shared);
+	list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
+		sizeof(*list->table);
 
 	return list;
 }
@@ -104,9 +148,12 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 	if (!list)
 		return;
 
-	for (i = 0; i < list->shared_count; ++i)
-		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+	for (i = 0; i < list->num_fences; ++i) {
+		struct dma_fence *fence;
 
+		dma_resv_list_entry(list, i, true, &fence, NULL);
+		dma_fence_put(fence);
+	}
 	kfree_rcu(list, rcu);
 }
 
@@ -119,8 +166,7 @@ void dma_resv_init(struct dma_resv *obj)
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
 
-	RCU_INIT_POINTER(obj->fence, NULL);
-	RCU_INIT_POINTER(obj->fence_excl, NULL);
+	RCU_INIT_POINTER(obj->fences, NULL);
 }
 EXPORT_SYMBOL(dma_resv_init);
 
@@ -130,81 +176,54 @@ EXPORT_SYMBOL(dma_resv_init);
  */
 void dma_resv_fini(struct dma_resv *obj)
 {
-	struct dma_resv_list *fobj;
-	struct dma_fence *excl;
-
 	/*
 	 * This object should be dead and all references must have
 	 * been released to it, so no need to be protected with rcu.
 	 */
-	excl = rcu_dereference_protected(obj->fence_excl, 1);
-	if (excl)
-		dma_fence_put(excl);
-
-	fobj = rcu_dereference_protected(obj->fence, 1);
-	dma_resv_list_free(fobj);
+	dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
 	ww_mutex_destroy(&obj->lock);
 }
 EXPORT_SYMBOL(dma_resv_fini);
 
 /**
- * dma_resv_excl_fence - return the object's exclusive fence
+ * dma_resv_fences_list - return the dma_resv object's fence list
  * @obj: the reservation object
  *
- * Returns the exclusive fence (if any). Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- *
- * RETURNS
- * The exclusive fence or NULL
+ * Returns the fence list. Caller must either hold the objects through
+ * dma_resv_lock() or the RCU read side lock through rcu_read_lock().
  */
-static inline struct dma_fence *
-dma_resv_excl_fence(struct dma_resv *obj)
+static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
 {
-	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
+	return rcu_dereference_check(obj->fences, dma_resv_held(obj));
 }
 
 /**
- * dma_resv_shared_list - get the reservation object's shared fence list
- * @obj: the reservation object
- *
- * Returns the shared fence list. Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- */
-static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
-{
-	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
-}
-
-/**
- * dma_resv_reserve_shared - Reserve space to add shared fences to
- * a dma_resv.
+ * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
  * @obj: reservation object
  * @num_fences: number of fences we want to add
  *
- * Should be called before dma_resv_add_shared_fence().  Must
- * be called with @obj locked through dma_resv_lock().
+ * Should be called before dma_resv_add_fence().  Must be called with @obj
+ * locked through dma_resv_lock().
  *
  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
- * at any time before calling dma_resv_add_shared_fence(). This is validated
- * when CONFIG_DEBUG_MUTEXES is enabled.
+ * at any time before calling dma_resv_add_fence(). This is validated when
+ * CONFIG_DEBUG_MUTEXES is enabled.
  *
  * RETURNS
  * Zero for success, or -errno
  */
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
 {
 	struct dma_resv_list *old, *new;
 	unsigned int i, j, k, max;
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_shared_list(obj);
-	if (old && old->shared_max) {
-		if ((old->shared_count + num_fences) <= old->shared_max)
+	old = dma_resv_fences_list(obj);
+	if (old && old->max_fences) {
+		if ((old->num_fences + num_fences) <= old->max_fences)
 			return 0;
-		max = max(old->shared_count + num_fences, old->shared_max * 2);
+		max = max(old->num_fences + num_fences, old->max_fences * 2);
 	} else {
 		max = max(4ul, roundup_pow_of_two(num_fences));
 	}
@@ -219,27 +238,27 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 	 * references from the old struct are carried over to
 	 * the new.
 	 */
-	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+	for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
+		enum dma_resv_usage usage;
 		struct dma_fence *fence;
 
-		fence = rcu_dereference_protected(old->shared[i],
-						  dma_resv_held(obj));
+		dma_resv_list_entry(old, i, dma_resv_held(obj), &fence, &usage);
 		if (dma_fence_is_signaled(fence))
-			RCU_INIT_POINTER(new->shared[--k], fence);
+			RCU_INIT_POINTER(new->table[--k], fence);
 		else
-			RCU_INIT_POINTER(new->shared[j++], fence);
+			dma_resv_list_set(new, j++, fence, usage);
 	}
-	new->shared_count = j;
+	new->num_fences = j;
 
 	/*
 	 * We are not changing the effective set of fences here so can
 	 * merely update the pointer to the new array; both existing
 	 * readers and new readers will see exactly the same set of
-	 * active (unsignaled) shared fences. Individual fences and the
+	 * active (unsignaled) fences. Individual fences and the
 	 * old array are protected by RCU and so will not vanish under
 	 * the gaze of the rcu_read_lock() readers.
 	 */
-	rcu_assign_pointer(obj->fence, new);
+	rcu_assign_pointer(obj->fences, new);
 
 	if (!old)
 		return 0;
@@ -248,7 +267,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 	for (i = k; i < max; ++i) {
 		struct dma_fence *fence;
 
-		fence = rcu_dereference_protected(new->shared[i],
+		fence = rcu_dereference_protected(new->table[i],
 						  dma_resv_held(obj));
 		dma_fence_put(fence);
 	}
@@ -256,41 +275,43 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 
 	return 0;
 }
-EXPORT_SYMBOL(dma_resv_reserve_shared);
+EXPORT_SYMBOL(dma_resv_reserve_fences);
 
 #ifdef CONFIG_DEBUG_MUTEXES
 /**
- * dma_resv_reset_shared_max - reset shared fences for debugging
+ * dma_resv_reset_max_fences - reset fences for debugging
  * @obj: the dma_resv object to reset
  *
- * Reset the number of pre-reserved shared slots to test that drivers do
- * correct slot allocation using dma_resv_reserve_shared(). See also
- * &dma_resv_list.shared_max.
+ * Reset the number of pre-reserved fence slots to test that drivers do
+ * correct slot allocation using dma_resv_reserve_fences(). See also
+ * &dma_resv_list.max_fences.
  */
-void dma_resv_reset_shared_max(struct dma_resv *obj)
+void dma_resv_reset_max_fences(struct dma_resv *obj)
 {
-	struct dma_resv_list *fences = dma_resv_shared_list(obj);
+	struct dma_resv_list *fences = dma_resv_fences_list(obj);
 
 	dma_resv_assert_held(obj);
 
-	/* Test shared fence slot reservation */
+	/* Test fence slot reservation */
 	if (fences)
-		fences->shared_max = fences->shared_count;
+		fences->max_fences = fences->num_fences;
 }
-EXPORT_SYMBOL(dma_resv_reset_shared_max);
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
 #endif
 
 /**
- * dma_resv_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
  * @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
  *
- * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
- * dma_resv_reserve_shared() has been called.
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
+ * dma_resv_reserve_fences() has been called.
  *
  * See also &dma_resv.fence for a discussion of the semantics.
  */
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+			enum dma_resv_usage usage)
 {
 	struct dma_resv_list *fobj;
 	struct dma_fence *old;
@@ -300,39 +321,41 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 
 	dma_resv_assert_held(obj);
 
-	fobj = dma_resv_shared_list(obj);
-	count = fobj->shared_count;
+	fobj = dma_resv_fences_list(obj);
+	count = fobj->num_fences;
 
 	write_seqcount_begin(&obj->seq);
 
 	for (i = 0; i < count; ++i) {
+		enum dma_resv_usage old_usage;
 
-		old = rcu_dereference_protected(fobj->shared[i],
-						dma_resv_held(obj));
-		if (old->context == fence->context ||
+		dma_resv_list_entry(fobj, i, dma_resv_held(obj),
+				    &old, &old_usage);
+		if ((old->context == fence->context && old_usage >= usage) ||
 		    dma_fence_is_signaled(old))
 			goto replace;
 	}
 
-	BUG_ON(fobj->shared_count >= fobj->shared_max);
+	BUG_ON(fobj->num_fences >= fobj->max_fences);
 	old = NULL;
 	count++;
 
 replace:
-	RCU_INIT_POINTER(fobj->shared[i], fence);
-	/* pointer update must be visible before we extend the shared_count */
-	smp_store_mb(fobj->shared_count, count);
+	dma_resv_list_set(fobj, i, fence, usage);
+	/* pointer update must be visible before we extend the num_fences */
+	smp_store_mb(fobj->num_fences, count);
 
 	write_seqcount_end(&obj->seq);
 	dma_fence_put(old);
 }
-EXPORT_SYMBOL(dma_resv_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_fence);
 
 /**
  * dma_resv_replace_fences - replace fences in the dma_resv obj
  * @obj: the reservation object
  * @context: the context of the fences to replace
  * @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
  *
  * Replace fences with a specified context with a new fence. Only valid if the
  * operation represented by the original fences is completed or has no longer
@@ -340,63 +363,30 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
  * completes.
  */
 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
-			     struct dma_fence *replacement)
+			     struct dma_fence *replacement,
+			     enum dma_resv_usage usage)
 {
 	struct dma_resv_list *list;
-	struct dma_fence *old;
 	unsigned int i;
 
 	dma_resv_assert_held(obj);
 
+	list = dma_resv_fences_list(obj);
 	write_seqcount_begin(&obj->seq);
+	for (i = 0; list && i < list->num_fences; ++i) {
+		struct dma_fence *old;
 
-	old = dma_resv_excl_fence(obj);
-	if (old->context == context) {
-		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
-		dma_fence_put(old);
-	}
-
-	list = dma_resv_shared_list(obj);
-	for (i = 0; list && i < list->shared_count; ++i) {
-		old = rcu_dereference_protected(list->shared[i],
-						dma_resv_held(obj));
+		dma_resv_list_entry(list, i, dma_resv_held(obj), &old, NULL);
 		if (old->context != context)
 			continue;
 
-		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
+		dma_resv_list_set(list, i, replacement, usage);
 		dma_fence_put(old);
 	}
-
 	write_seqcount_end(&obj->seq);
 }
 EXPORT_SYMBOL(dma_resv_replace_fences);
 
-/**
- * dma_resv_add_excl_fence - Add an exclusive fence.
- * @obj: the reservation object
- * @fence: the exclusive fence to add
- *
- * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
- * Note that this function replaces all fences attached to @obj, see also
- * &dma_resv.fence_excl for a discussion of the semantics.
- */
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
-{
-	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
-
-	dma_resv_assert_held(obj);
-
-	dma_fence_get(fence);
-
-	write_seqcount_begin(&obj->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
-	RCU_INIT_POINTER(obj->fence_excl, fence);
-	write_seqcount_end(&obj->seq);
-
-	dma_fence_put(old_fence);
-}
-EXPORT_SYMBOL(dma_resv_add_excl_fence);
-
 /**
  * dma_resv_iter_restart_unlocked - restart the unlocked iterator
  * @cursor: The dma_resv_iter object to restart
@@ -406,15 +396,11 @@ EXPORT_SYMBOL(dma_resv_add_excl_fence);
 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
 {
 	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
-	cursor->index = -1;
-	cursor->shared_count = 0;
-	if (cursor->usage >= DMA_RESV_USAGE_READ) {
-		cursor->fences = dma_resv_shared_list(cursor->obj);
-		if (cursor->fences)
-			cursor->shared_count = cursor->fences->shared_count;
-	} else {
-		cursor->fences = NULL;
-	}
+	cursor->index = 0;
+	cursor->num_fences = 0;
+	cursor->fences = dma_resv_fences_list(cursor->obj);
+	if (cursor->fences)
+		cursor->num_fences = cursor->fences->num_fences;
 	cursor->is_restarted = true;
 }
 
@@ -428,31 +414,29 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
  */
 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
 {
-	struct dma_resv *obj = cursor->obj;
+	if (!cursor->fences)
+		return;
 
 	do {
 		/* Drop the reference from the previous round */
 		dma_fence_put(cursor->fence);
 
-		if (cursor->index == -1) {
-			cursor->fence = dma_resv_excl_fence(obj);
-			cursor->index++;
-			if (!cursor->fence)
-				continue;
-
-		} else if (!cursor->fences ||
-			   cursor->index >= cursor->shared_count) {
+		if (cursor->index >= cursor->num_fences) {
 			cursor->fence = NULL;
 			break;
 
-		} else {
-			struct dma_resv_list *fences = cursor->fences;
-			unsigned int idx = cursor->index++;
-
-			cursor->fence = rcu_dereference(fences->shared[idx]);
 		}
+
+		dma_resv_list_entry(cursor->fences, cursor->index++,
+				    dma_resv_held(cursor->obj),
+				    &cursor->fence,
+				    &cursor->fence_usage);
 		cursor->fence = dma_fence_get_rcu(cursor->fence);
-		if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+		if (!cursor->fence)
+			break;
+
+		if (!dma_fence_is_signaled(cursor->fence) &&
+		    cursor->usage >= cursor->fence_usage)
 			break;
 	} while (true);
 }
@@ -515,15 +499,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
 	dma_resv_assert_held(cursor->obj);
 
 	cursor->index = 0;
-	if (cursor->usage >= DMA_RESV_USAGE_READ)
-		cursor->fences = dma_resv_shared_list(cursor->obj);
-	else
-		cursor->fences = NULL;
-
-	fence = dma_resv_excl_fence(cursor->obj);
-	if (!fence)
-		fence = dma_resv_iter_next(cursor);
+	cursor->fences = dma_resv_fences_list(cursor->obj);
 
+	fence = dma_resv_iter_next(cursor);
 	cursor->is_restarted = true;
 	return fence;
 }
@@ -538,17 +516,18 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first);
  */
 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
 {
-	unsigned int idx;
+	struct dma_fence *fence;
 
 	dma_resv_assert_held(cursor->obj);
 
 	cursor->is_restarted = false;
-	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
+	if (!cursor->fences || cursor->index >= cursor->fences->num_fences)
 		return NULL;
 
-	idx = cursor->index++;
-	return rcu_dereference_protected(cursor->fences->shared[idx],
-					 dma_resv_held(cursor->obj));
+	dma_resv_list_entry(cursor->fences, cursor->index++,
+			    dma_resv_held(cursor->obj),
+			    &fence, &cursor->fence_usage);
+	return fence;
 }
 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
 
@@ -563,57 +542,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
 	struct dma_resv_iter cursor;
 	struct dma_resv_list *list;
-	struct dma_fence *f, *excl;
+	struct dma_fence *f;
 
 	dma_resv_assert_held(dst);
 
 	list = NULL;
-	excl = NULL;
 
 	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
 	dma_resv_for_each_fence_unlocked(&cursor, f) {
 
 		if (dma_resv_iter_is_restarted(&cursor)) {
 			dma_resv_list_free(list);
-			dma_fence_put(excl);
-
-			if (cursor.shared_count) {
-				list = dma_resv_list_alloc(cursor.shared_count);
-				if (!list) {
-					dma_resv_iter_end(&cursor);
-					return -ENOMEM;
-				}
-
-				list->shared_count = 0;
 
-			} else {
-				list = NULL;
+			list = dma_resv_list_alloc(cursor.num_fences);
+			if (!list) {
+				dma_resv_iter_end(&cursor);
+				return -ENOMEM;
 			}
-			excl = NULL;
+			list->num_fences = 0;
 		}
 
 		dma_fence_get(f);
-		if (dma_resv_iter_is_exclusive(&cursor))
-			excl = f;
-		else
-			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+		dma_resv_list_set(list, list->num_fences++, f,
+				  dma_resv_iter_usage(&cursor));
 	}
 	dma_resv_iter_end(&cursor);
 
 	write_seqcount_begin(&dst->seq);
-	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
-	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
+	list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
 	write_seqcount_end(&dst->seq);
 
 	dma_resv_list_free(list);
-	dma_fence_put(excl);
-
 	return 0;
 }
 EXPORT_SYMBOL(dma_resv_copy_fences);
 
 /**
- * dma_resv_get_fences - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's fences
  * fences without update side lock held
  * @obj: the reservation object
  * @usage: controls which fences to include, see enum dma_resv_usage.
@@ -642,7 +607,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
 			while (*num_fences)
 				dma_fence_put((*fences)[--(*num_fences)]);
 
-			count = cursor.shared_count + 1;
+			count = cursor.num_fences + 1;
 
 			/* Eventually re-allocate the array */
 			*fences = krealloc_array(*fences, count,
@@ -714,8 +679,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
 EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
 
 /**
- * dma_resv_wait_timeout - Wait on reservation's objects
- * shared and/or exclusive fences.
+ * dma_resv_wait_timeout - Wait on reservation's objects fences
  * @obj: the reservation object
  * @usage: controls which fences to include, see enum dma_resv_usage.
  * @intr: if true, do interruptible wait
@@ -788,13 +752,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
  */
 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
 {
+	static const char *usage[] = { "kernel", "write", "read", "other" };
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
 	dma_resv_for_each_fence(&cursor, obj, true, fence) {
 		seq_printf(seq, "\t%s fence:",
-			   dma_resv_iter_is_exclusive(&cursor) ?
-				"Exclusive" : "Shared");
+			   usage[dma_resv_iter_usage(&cursor)]);
 		dma_fence_describe(fence, seq);
 	}
 }
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index a52c5fbea87a..d0f7c2bfd4f0 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
 	return r;
 }
 
-static int test_signaling(void *arg, enum dma_resv_usage usage)
+static int test_signaling(void *arg)
 {
+	enum dma_resv_usage usage = (unsigned long)arg;
 	struct dma_resv resv;
 	struct dma_fence *f;
 	int r;
@@ -75,17 +76,13 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
 		goto err_free;
 	}
 
-	r = dma_resv_reserve_shared(&resv, 1);
+	r = dma_resv_reserve_fences(&resv, 1);
 	if (r) {
 		pr_err("Resv shared slot allocation failed\n");
 		goto err_unlock;
 	}
 
-	if (usage >= DMA_RESV_USAGE_READ)
-		dma_resv_add_shared_fence(&resv, f);
-	else
-		dma_resv_add_excl_fence(&resv, f);
-
+	dma_resv_add_fence(&resv, f, usage);
 	if (dma_resv_test_signaled(&resv, usage)) {
 		pr_err("Resv unexpectedly signaled\n");
 		r = -EINVAL;
@@ -105,18 +102,9 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
 	return r;
 }
 
-static int test_excl_signaling(void *arg)
-{
-	return test_signaling(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_signaling(void *arg)
-{
-	return test_signaling(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each(void *arg, enum dma_resv_usage usage)
+static int test_for_each(void *arg)
 {
+	enum dma_resv_usage usage = (unsigned long)arg;
 	struct dma_resv_iter cursor;
 	struct dma_fence *f, *fence;
 	struct dma_resv resv;
@@ -133,16 +121,13 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
 		goto err_free;
 	}
 
-	r = dma_resv_reserve_shared(&resv, 1);
+	r = dma_resv_reserve_fences(&resv, 1);
 	if (r) {
 		pr_err("Resv shared slot allocation failed\n");
 		goto err_unlock;
 	}
 
-	if (usage >= DMA_RESV_USAGE_READ)
-		dma_resv_add_shared_fence(&resv, f);
-	else
-		dma_resv_add_excl_fence(&resv, f);
+	dma_resv_add_fence(&resv, f, usage);
 
 	r = -ENOENT;
 	dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
@@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
 			r = -EINVAL;
 			goto err_unlock;
 		}
-		if (dma_resv_iter_is_exclusive(&cursor) !=
-		    (usage >= DMA_RESV_USAGE_READ)) {
+		if (dma_resv_iter_usage(&cursor) != usage) {
 			pr_err("Unexpected fence usage\n");
 			r = -EINVAL;
 			goto err_unlock;
@@ -177,18 +161,9 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
 	return r;
 }
 
-static int test_excl_for_each(void *arg)
-{
-	return test_for_each(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each(void *arg)
-{
-	return test_for_each(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
+static int test_for_each_unlocked(void *arg)
 {
+	enum dma_resv_usage usage = (unsigned long)arg;
 	struct dma_resv_iter cursor;
 	struct dma_fence *f, *fence;
 	struct dma_resv resv;
@@ -205,17 +180,14 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
 		goto err_free;
 	}
 
-	r = dma_resv_reserve_shared(&resv, 1);
+	r = dma_resv_reserve_fences(&resv, 1);
 	if (r) {
 		pr_err("Resv shared slot allocation failed\n");
 		dma_resv_unlock(&resv);
 		goto err_free;
 	}
 
-	if (usage >= DMA_RESV_USAGE_READ)
-		dma_resv_add_shared_fence(&resv, f);
-	else
-		dma_resv_add_excl_fence(&resv, f);
+	dma_resv_add_fence(&resv, f, usage);
 	dma_resv_unlock(&resv);
 
 	r = -ENOENT;
@@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
 			r = -EINVAL;
 			goto err_iter_end;
 		}
-		if (dma_resv_iter_is_exclusive(&cursor) !=
-		    (usage >= DMA_RESV_USAGE_READ)) {
+		if (dma_resv_iter_usage(&cursor) != usage) {
 			pr_err("Unexpected fence usage\n");
 			r = -EINVAL;
 			goto err_iter_end;
@@ -262,18 +233,9 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
 	return r;
 }
 
-static int test_excl_for_each_unlocked(void *arg)
-{
-	return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each_unlocked(void *arg)
-{
-	return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_get_fences(void *arg, enum dma_resv_usage usage)
+static int test_get_fences(void *arg)
 {
+	enum dma_resv_usage usage = (unsigned long)arg;
 	struct dma_fence *f, **fences = NULL;
 	struct dma_resv resv;
 	int r, i;
@@ -289,17 +251,14 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
 		goto err_resv;
 	}
 
-	r = dma_resv_reserve_shared(&resv, 1);
+	r = dma_resv_reserve_fences(&resv, 1);
 	if (r) {
 		pr_err("Resv shared slot allocation failed\n");
 		dma_resv_unlock(&resv);
 		goto err_resv;
 	}
 
-	if (usage >= DMA_RESV_USAGE_READ)
-		dma_resv_add_shared_fence(&resv, f);
-	else
-		dma_resv_add_excl_fence(&resv, f);
+	dma_resv_add_fence(&resv, f, usage);
 	dma_resv_unlock(&resv);
 
 	r = dma_resv_get_fences(&resv, usage, &i, &fences);
@@ -324,30 +283,24 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
 	return r;
 }
 
-static int test_excl_get_fences(void *arg)
-{
-	return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_get_fences(void *arg)
-{
-	return test_get_fences(arg, DMA_RESV_USAGE_READ);
-}
-
 int dma_resv(void)
 {
 	static const struct subtest tests[] = {
 		SUBTEST(sanitycheck),
-		SUBTEST(test_excl_signaling),
-		SUBTEST(test_shared_signaling),
-		SUBTEST(test_excl_for_each),
-		SUBTEST(test_shared_for_each),
-		SUBTEST(test_excl_for_each_unlocked),
-		SUBTEST(test_shared_for_each_unlocked),
-		SUBTEST(test_excl_get_fences),
-		SUBTEST(test_shared_get_fences),
+		SUBTEST(test_signaling),
+		SUBTEST(test_for_each),
+		SUBTEST(test_for_each_unlocked),
+		SUBTEST(test_get_fences),
 	};
+	enum dma_resv_usage usage;
+	int r;
 
 	spin_lock_init(&fence_lock);
-	return subtests(tests, NULL);
+	for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+	     ++usage) {
+		r = subtests(tests, (void *)(unsigned long)usage);
+		if (r)
+			return r;
+	}
+	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b558ef0f8c4a..4a469831afe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -246,7 +246,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	 */
 	replacement = dma_fence_get_stub();
 	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
-				replacement);
+				replacement, DMA_RESV_USAGE_READ);
 	dma_fence_put(replacement);
 	return 0;
 }
@@ -1207,7 +1207,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 				  AMDGPU_FENCE_OWNER_KFD, false);
 	if (ret)
 		goto wait_pd_fail;
-	ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
 	if (ret)
 		goto reserve_shared_fail;
 	amdgpu_bo_fence(vm->root.bo,
@@ -2454,7 +2454,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 	 * Add process eviction fence to bo so they can
 	 * evict each other.
 	 */
-	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
 	if (ret)
 		goto reserve_shared_fail;
 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index af0a61ce2ec7..92091e800022 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -54,8 +54,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 	p->uf_entry.priority = 0;
 	p->uf_entry.tv.bo = &bo->tbo;
-	/* One for TTM and one for the CS job */
-	p->uf_entry.tv.num_shared = 2;
+	/* One for TTM and two for the CS job */
+	p->uf_entry.tv.num_shared = 3;
 
 	drm_gem_object_put(gobj);
 
@@ -1285,7 +1285,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 			break;
 		}
 		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
-		rcu_assign_pointer(resv->fence_excl, &chain->base);
+		dma_resv_add_fence(resv, &chain->base, DMA_RESV_USAGE_WRITE);
 		e->chain = NULL;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7715390aff7a..eaa19154551c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1369,17 +1369,15 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 	struct dma_resv *resv = bo->tbo.base.resv;
 	int r;
 
-	r = dma_resv_reserve_shared(resv, 1);
+	r = dma_resv_reserve_fences(resv, 1);
 	if (r) {
 		/* As last resort on OOM we block for the fence */
 		dma_fence_wait(fence, false);
 		return;
 	}
 
-	if (shared)
-		dma_resv_add_shared_fence(resv, fence);
-	else
-		dma_resv_add_excl_fence(resv, fence);
+	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+			   DMA_RESV_USAGE_WRITE);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e82997dbc0a4..9eac1e783bbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2969,7 +2969,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	if (r)
 		goto error_free_root;
 
-	r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
+	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
 	if (r)
 		goto error_unreserve;
 
@@ -3412,7 +3412,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 		value = 0;
 	}
 
-	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
 	if (r) {
 		pr_debug("failed %d to reserve fence slot\n", r);
 		goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 16137c4247bb..a30aa9bf41a4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -524,7 +524,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
 		goto reserve_bo_failed;
 	}
 
-	r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 	if (r) {
 		pr_debug("failed %d to reserve bo\n", r);
 		amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3b8d49319d8c..2d77e469ef3c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -180,7 +180,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 		struct dma_resv *robj = bo->obj->base.resv;
 		enum dma_resv_usage usage;
 
-		ret = dma_resv_reserve_shared(robj, 1);
+		ret = dma_resv_reserve_fences(robj, 1);
 		if (ret)
 			return ret;
 
@@ -203,14 +203,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
+		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
 
-		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
-			dma_resv_add_excl_fence(obj->resv,
-							  submit->out_fence);
-		else
-			dma_resv_add_shared_fence(obj->resv,
-							    submit->out_fence);
-
+		dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
 		submit_unlock_object(submit, i);
 	}
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 14a1c0ad8c3c..e7ae94ee1b44 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -148,12 +148,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		if (dma_resv_iter_is_restarted(&cursor))
 			args->busy = 0;
 
-		if (dma_resv_iter_is_exclusive(&cursor))
-			/* Translate the exclusive fence to the READ *and* WRITE engine */
-			args->busy |= busy_check_writer(fence);
-		else
-			/* Translate shared fences to READ set of engines */
-			args->busy |= busy_check_reader(fence);
+		/* Translate read fences to READ set of engines */
+		args->busy |= busy_check_reader(fence);
+	}
+	dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_WRITE);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		/* Translate the write fences to the READ *and* WRITE engine */
+		args->busy |= busy_check_writer(fence);
 	}
 	dma_resv_iter_end(&cursor);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index fc57ab914b60..e70fb65bb54f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -101,14 +101,15 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 
 	clflush = NULL;
 	if (!(flags & I915_CLFLUSH_SYNC) &&
-	    dma_resv_reserve_shared(obj->base.resv, 1) == 0)
+	    dma_resv_reserve_fences(obj->base.resv, 1) == 0)
 		clflush = clflush_work_create(obj);
 	if (clflush) {
 		i915_sw_fence_await_reservation(&clflush->base.chain,
 						obj->base.resv, NULL, true,
 						i915_fence_timeout(to_i915(obj->base.dev)),
 						I915_FENCE_GFP);
-		dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+		dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
+				   DMA_RESV_USAGE_WRITE);
 		dma_fence_work_commit(&clflush->base);
 	} else if (obj->mm.pages) {
 		__do_clflush(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index fc0e1625847c..3eb3716a35f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -989,7 +989,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
 			}
 		}
 
-		err = dma_resv_reserve_shared(vma->resv, 1);
+		err = dma_resv_reserve_fences(vma->resv, 1);
 		if (err)
 			return err;
 
@@ -2162,7 +2162,7 @@ static int eb_parse(struct i915_execbuffer *eb)
 		goto err_trampoline;
 	}
 
-	err = dma_resv_reserve_shared(shadow->resv, 1);
+	err = dma_resv_reserve_fences(shadow->resv, 1);
 	if (err)
 		goto err_trampoline;
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 2bf491fd5cdf..2b4cde8250b6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -179,7 +179,7 @@ static int igt_lmem_pages_migrate(void *arg)
 					  i915_gem_object_is_lmem(obj),
 					  0xdeadbeaf, &rq);
 		if (rq) {
-			err = dma_resv_reserve_shared(obj->base.resv, 1);
+			err = dma_resv_reserve_fences(obj->base.resv, 1);
 			if (!err)
 				dma_resv_add_excl_fence(obj->base.resv,
 							&rq->fence);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5ec87de63963..23d92b233b3b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1256,25 +1256,27 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
 		}
 
 		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_shared(vma->resv, 1);
+			err = dma_resv_reserve_fences(vma->resv, 1);
 			if (unlikely(err))
 				return err;
 		}
 
 		if (fence) {
-			dma_resv_add_excl_fence(vma->resv, fence);
+			dma_resv_add_fence(vma->resv, fence,
+					   DMA_RESV_USAGE_WRITE);
 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
 			obj->read_domains = 0;
 		}
 	} else {
 		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_shared(vma->resv, 1);
+			err = dma_resv_reserve_fences(vma->resv, 1);
 			if (unlikely(err))
 				return err;
 		}
 
 		if (fence) {
-			dma_resv_add_shared_fence(vma->resv, fence);
+			dma_resv_add_fence(vma->resv, fence,
+					   DMA_RESV_USAGE_READ);
 			obj->write_domain = 0;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index b85af1672a7e..3e0edee1b238 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -895,7 +895,7 @@ static int igt_lmem_write_cpu(void *arg)
 
 	i915_gem_object_lock(obj, NULL);
 
-	err = dma_resv_reserve_shared(obj->base.resv, 1);
+	err = dma_resv_reserve_fences(obj->base.resv, 1);
 	if (err) {
 		i915_gem_object_unlock(obj);
 		goto out_put;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index b4846007463f..cc076bd87323 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -257,7 +257,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 {
 	int err;
 
-	err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
+	err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
 	if (err)
 		return err;
 
@@ -365,7 +365,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
 		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
 			dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
 		else
-			dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
+			dma_resv_add_fence(lima_bo_resv(bos[i]), fence);
 	}
 
 	drm_gem_unlock_reservations((struct drm_gem_object **)bos,
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index e874d09b74ef..dcaaa7e2a342 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -325,7 +325,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 		 * strange place to call it.  OTOH this is a
 		 * convenient can-fail point to hook it in.
 		 */
-		ret = dma_resv_reserve_shared(obj->resv, 1);
+		ret = dma_resv_reserve_fences(obj->resv, 1);
 		if (ret)
 			return ret;
 
@@ -397,7 +397,7 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 			dma_resv_add_excl_fence(obj->resv, submit->user_fence);
 		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
-			dma_resv_add_shared_fence(obj->resv, submit->user_fence);
+			dma_resv_add_fence(obj->resv, submit->user_fence);
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c6bb4dbcd735..05076e530e7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1308,10 +1308,11 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
 {
 	struct dma_resv *resv = nvbo->bo.base.resv;
 
-	if (exclusive)
-		dma_resv_add_excl_fence(resv, &fence->base);
-	else if (fence)
-		dma_resv_add_shared_fence(resv, &fence->base);
+	if (!fence)
+		return;
+
+	dma_resv_add_fence(resv, &fence->base, exclusive ?
+			   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 26725e23c075..02a7b86b74a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -349,7 +349,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 	struct nouveau_fence *f;
 	int ret;
 
-	ret = dma_resv_reserve_shared(resv, 1);
+	ret = dma_resv_reserve_fences(resv, 1);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 89c3fe389476..cca50cbb7f0b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -247,7 +247,7 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i, ret;
 
 	for (i = 0; i < bo_count; i++) {
-		ret = dma_resv_reserve_shared(bos[i]->resv, 1);
+		ret = dma_resv_reserve_fences(bos[i]->resv, 1);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 469979cd0341..368d26da0d6a 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -200,7 +200,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
 			return ret;
 	}
 
-	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 	if (ret)
 		return ret;
 
@@ -429,7 +429,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 	list_for_each_entry(entry, &release->bos, head) {
 		bo = entry->bo;
 
-		dma_resv_add_shared_fence(bo->base.resv, &release->base);
+		dma_resv_add_fence(bo->base.resv, &release->base,
+				   DMA_RESV_USAGE_READ);
 		ttm_bo_move_to_lru_tail_unlocked(bo);
 		dma_resv_unlock(bo->base.resv);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 56ede9d63b12..915194345e20 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -809,8 +809,6 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 {
 	struct dma_resv *resv = bo->tbo.base.resv;
 
-	if (shared)
-		dma_resv_add_shared_fence(resv, &fence->base);
-	else
-		dma_resv_add_excl_fence(resv, &fence->base);
+	dma_resv_add_fence(resv, &fence->base, shared ?
+			   DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index bb53016f3138..987cabbf1318 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
 		int r;
 
 		radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
-		r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
+		r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
 		if (r)
 			return r;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9d9a0039359c..f52b451e26dc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -762,9 +762,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 		return ret;
 	}
 
-	dma_resv_add_shared_fence(bo->base.resv, fence);
+	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
 
-	ret = dma_resv_reserve_shared(bo->base.resv, 1);
+	ret = dma_resv_reserve_fences(bo->base.resv, 1);
 	if (unlikely(ret)) {
 		dma_fence_put(fence);
 		return ret;
@@ -823,7 +823,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	bool type_found = false;
 	int i, ret;
 
-	ret = dma_resv_reserve_shared(bo->base.resv, 1);
+	ret = dma_resv_reserve_fences(bo->base.resv, 1);
 	if (unlikely(ret))
 		return ret;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ea9eabcc0a0c..e56e16a7f886 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -243,7 +243,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	ret = dma_resv_trylock(&fbo->base.base._resv);
 	WARN_ON(!ret);
 
-	ret = dma_resv_reserve_shared(&fbo->base.base._resv, 1);
+	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
 	if (ret) {
 		kfree(fbo);
 		return ret;
@@ -503,7 +503,8 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 	if (ret)
 		return ret;
 
-	dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+	dma_resv_add_fence(&ghost_obj->base._resv, fence,
+			   DMA_RESV_USAGE_WRITE);
 
 	/**
 	 * If we're not moving to fixed memory, the TTM object
@@ -558,7 +559,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
 	int ret = 0;
 
-	dma_resv_add_excl_fence(bo->base.resv, fence);
+	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
 	if (!evict)
 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
 	else if (!from->use_tt && pipeline)
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 5da922639d54..0eb995d25df1 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -103,7 +103,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
 		num_fences = min(entry->num_shared, 1u);
 		if (!ret) {
-			ret = dma_resv_reserve_shared(bo->base.resv,
+			ret = dma_resv_reserve_fences(bo->base.resv,
 						      num_fences);
 			if (!ret)
 				continue;
@@ -120,7 +120,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 		}
 
 		if (!ret)
-			ret = dma_resv_reserve_shared(bo->base.resv,
+			ret = dma_resv_reserve_fences(bo->base.resv,
 						      num_fences);
 
 		if (unlikely(ret != 0)) {
@@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		if (entry->num_shared)
-			dma_resv_add_shared_fence(bo->base.resv, fence);
-		else
-			dma_resv_add_excl_fence(bo->base.resv, fence);
+		dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+				   DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
 		ttm_bo_move_to_lru_tail_unlocked(bo);
 		dma_resv_unlock(bo->base.resv);
 	}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 1bea90e40ce1..e3a09e33822f 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -259,7 +259,7 @@ v3d_lock_bo_reservations(struct v3d_job *job,
 		return ret;
 
 	for (i = 0; i < job->bo_count; i++) {
-		ret = dma_resv_reserve_shared(job->bo[i]->resv, 1);
+		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
 		if (ret)
 			goto fail;
 
@@ -550,8 +550,8 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
 
 	for (i = 0; i < job->bo_count; i++) {
 		/* XXX: Use shared fences for read-only objects. */
-		dma_resv_add_excl_fence(job->bo[i]->resv,
-					job->done_fence);
+		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+				   DMA_RESV_USAGE_WRITE);
 	}
 
 	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 445d3bab89e0..e925e65fcd9d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 		bo = to_vc4_bo(&exec->bo[i]->base);
 		bo->seqno = seqno;
 
-		dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
+		dma_resv_add_fence(bo->base.base.resv, exec->fence);
 	}
 
 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -641,7 +641,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 	for (i = 0; i < exec->bo_count; i++) {
 		bo = &exec->bo[i]->base;
 
-		ret = dma_resv_reserve_shared(bo->resv, 1);
+		ret = dma_resv_reserve_fences(bo->resv, 1);
 		if (ret) {
 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 			return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 74ebadf4e592..c2a879734d40 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -160,13 +160,10 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Expose the fence via the dma-buf */
 	dma_resv_lock(resv, NULL);
-	ret = dma_resv_reserve_shared(resv, 1);
-	if (!ret) {
-		if (arg->flags & VGEM_FENCE_WRITE)
-			dma_resv_add_excl_fence(resv, fence);
-		else
-			dma_resv_add_shared_fence(resv, fence);
-	}
+	ret = dma_resv_reserve_fences(resv, 1);
+	if (!ret)
+		dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ?
+				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
 	dma_resv_unlock(resv);
 
 	/* Record the fence in our idr for later signaling */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index aec105cdd64c..a24ab63063e5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -227,7 +227,7 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 		return ret;
 
 	for (i = 0; i < objs->nents; ++i) {
-		ret = dma_resv_reserve_shared(objs->objs[i]->resv, 1);
+		ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
 		if (ret)
 			return ret;
 	}
@@ -250,7 +250,8 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
 	int i;
 
 	for (i = 0; i < objs->nents; i++)
-		dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+		dma_resv_add_fence(objs->objs[i]->resv, fence,
+				   DMA_RESV_USAGE_WRITE);
 }
 
 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 5d5a438b1e23..a77921a44a72 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1062,9 +1062,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 	else
 		dma_fence_get(&fence->base);
 
-	ret = dma_resv_reserve_shared(bo->base.resv, 1);
+	ret = dma_resv_reserve_fences(bo->base.resv, 1);
 	if (!ret)
-		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+		dma_resv_add_fence(bo->base.resv, &fence->base,
+				   DMA_RESV_USAGE_WRITE);
 	else
 		/* Last resort fallback when we are OOM */
 		dma_fence_wait(&fence->base, false);
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d96d8ca9af56..4f3a6abf43c4 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -95,8 +95,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
 /**
  * struct dma_resv - a reservation object manages fences for a buffer
  *
- * There are multiple uses for this, with sometimes slightly different rules in
- * how the fence slots are used.
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
  *
  * One use is to synchronize cross-driver access to a struct dma_buf, either for
  * dynamic buffer management or just to handle implicit synchronization between
@@ -126,59 +126,22 @@ struct dma_resv {
 	 * @seq:
 	 *
 	 * Sequence count for managing RCU read-side synchronization, allows
-	 * read-only access to @fence_excl and @fence while ensuring we take a
-	 * consistent snapshot.
+	 * read-only access to @fences while ensuring we take a consistent
+	 * snapshot.
 	 */
 	seqcount_ww_mutex_t seq;
 
 	/**
-	 * @fence_excl:
+	 * @fences:
 	 *
-	 * The exclusive fence, if there is one currently.
+	 * Array of fences which where added to the dma_resv object
 	 *
-	 * There are two ways to update this fence:
-	 *
-	 * - First by calling dma_resv_add_excl_fence(), which replaces all
-	 *   fences attached to the reservation object. To guarantee that no
-	 *   fences are lost, this new fence must signal only after all previous
-	 *   fences, both shared and exclusive, have signalled. In some cases it
-	 *   is convenient to achieve that by attaching a struct dma_fence_array
-	 *   with all the new and old fences.
-	 *
-	 * - Alternatively the fence can be set directly, which leaves the
-	 *   shared fences unchanged. To guarantee that no fences are lost, this
-	 *   new fence must signal only after the previous exclusive fence has
-	 *   signalled. Since the shared fences are staying intact, it is not
-	 *   necessary to maintain any ordering against those. If semantically
-	 *   only a new access is added without actually treating the previous
-	 *   one as a dependency the exclusive fences can be strung together
-	 *   using struct dma_fence_chain.
-	 *
-	 * Note that actual semantics of what an exclusive or shared fence mean
-	 * is defined by the user, for reservation objects shared across drivers
-	 * see &dma_buf.resv.
-	 */
-	struct dma_fence __rcu *fence_excl;
-
-	/**
-	 * @fence:
-	 *
-	 * List of current shared fences.
-	 *
-	 * There are no ordering constraints of shared fences against the
-	 * exclusive fence slot. If a waiter needs to wait for all access, it
-	 * has to wait for both sets of fences to signal.
-	 *
-	 * A new fence is added by calling dma_resv_add_shared_fence(). Since
-	 * this often needs to be done past the point of no return in command
+	 * A new fence is added by calling dma_resv_add_fence(). Since this
+	 * often needs to be done past the point of no return in command
 	 * submission it cannot fail, and therefore sufficient slots need to be
-	 * reserved by calling dma_resv_reserve_shared().
-	 *
-	 * Note that actual semantics of what an exclusive or shared fence mean
-	 * is defined by the user, for reservation objects shared across drivers
-	 * see &dma_buf.resv.
+	 * reserved by calling dma_resv_reserve_fences().
 	 */
-	struct dma_resv_list __rcu *fence;
+	struct dma_resv_list __rcu *fences;
 };
 
 /**
@@ -196,6 +159,9 @@ struct dma_resv_iter {
 	/** @fence: the currently handled fence */
 	struct dma_fence *fence;
 
+	/** @fence_usage: the usage of the current fence */
+	enum dma_resv_usage fence_usage;
+
 	/** @seq: sequence number to check for modifications */
 	unsigned int seq;
 
@@ -205,8 +171,8 @@ struct dma_resv_iter {
 	/** @fences: the shared fences; private, *MUST* not dereference  */
 	struct dma_resv_list *fences;
 
-	/** @shared_count: number of shared fences */
-	unsigned int shared_count;
+	/** @num_fences: number of fences */
+	unsigned int num_fences;
 
 	/** @is_restarted: true if this is the first returned fence */
 	bool is_restarted;
@@ -245,14 +211,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
 }
 
 /**
- * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * dma_resv_iter_usage - Return the usage of the current fence
  * @cursor: the cursor of the current position
  *
- * Returns true if the currently returned fence is the exclusive one.
+ * Returns the usage of the currently processed fence.
  */
-static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
 {
-	return cursor->index == 0;
+	return cursor->fence_usage;
 }
 
 /**
@@ -303,9 +270,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
 #ifdef CONFIG_DEBUG_MUTEXES
-void dma_resv_reset_shared_max(struct dma_resv *obj);
+void dma_resv_reset_max_fences(struct dma_resv *obj);
 #else
-static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
 #endif
 
 /**
@@ -451,17 +418,18 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
  */
 static inline void dma_resv_unlock(struct dma_resv *obj)
 {
-	dma_resv_reset_shared_max(obj);
+	dma_resv_reset_max_fences(obj);
 	ww_mutex_unlock(&obj->lock);
 }
 
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+			enum dma_resv_usage usage);
 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
-			     struct dma_fence *fence);
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
+			     struct dma_fence *fence,
+			     enum dma_resv_usage usage);
 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
 			unsigned int *num_fences, struct dma_fence ***fences);
 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (18 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 19/24] dma-buf: specify usage while adding fences to dma_resv obj v2 Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 22:05     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP Christian König
                   ` (4 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Add an usage for kernel submissions. Waiting for those
are mandatory for dynamic DMA-bufs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/st-dma-resv.c                |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c      |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  6 ++++--
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c  |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c         |  4 ++--
 drivers/gpu/drm/radeon/radeon_uvd.c          |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                 |  2 +-
 drivers/gpu/drm/ttm/ttm_bo_util.c            |  4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c           |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c     |  2 +-
 drivers/infiniband/core/umem_dmabuf.c        |  2 +-
 include/linux/dma-resv.h                     | 22 ++++++++++++++++++++
 13 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index d0f7c2bfd4f0..062b57d63fa6 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -296,7 +296,7 @@ int dma_resv(void)
 	int r;
 
 	spin_lock_init(&fence_lock);
-	for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+	for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
 	     ++usage) {
 		r = subtests(tests, (void *)(unsigned long)usage);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index eaa19154551c..a40ede9bccd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -764,7 +764,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 		return 0;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
 				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r < 0)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 33deb0df62fd..9e102080dad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1163,7 +1163,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
 	if (direct) {
 		r = dma_resv_wait_timeout(bo->tbo.base.resv,
-					  DMA_RESV_USAGE_WRITE, false,
+					  DMA_RESV_USAGE_KERNEL, false,
 					  msecs_to_jiffies(10));
 		if (r == 0)
 			r = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 2d77e469ef3c..a2f627af3ce2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -185,9 +185,11 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			return ret;
 
 		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
-			continue;
+			usage = DMA_RESV_USAGE_KERNEL;
+		else
+			usage = dma_resv_usage_rw(bo->flags &
+						  ETNA_SUBMIT_BO_WRITE);
 
-		usage = dma_resv_usage_rw(bo->flags & ETNA_SUBMIT_BO_WRITE);
 		ret = dma_resv_get_fences(robj, usage, &bo->nr_shared,
 					  &bo->shared);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index e70fb65bb54f..b9281ca96ece 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -109,7 +109,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 						i915_fence_timeout(to_i915(obj->base.dev)),
 						I915_FENCE_GFP);
 		dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
-				   DMA_RESV_USAGE_WRITE);
+				   DMA_RESV_USAGE_KERNEL);
 		dma_fence_work_commit(&clflush->base);
 	} else if (obj->mm.pages) {
 		__do_clflush(obj);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..13deb6c70ba6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -962,10 +962,10 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 	struct dma_fence *fence;
 	int ret;
 
-	ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
+	ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_KERNEL,
 				     &fence);
 	if (ret)
-		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
+		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL,
 				      false, MAX_SCHEDULE_TIMEOUT);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 4000ad2f39ba..488e78889dd6 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -478,7 +478,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 		return -EINVAL;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
 				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0) {
 		DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f52b451e26dc..ad83f42fc9ee 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -762,7 +762,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 		return ret;
 	}
 
-	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
 
 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
 	if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e56e16a7f886..b9cfb62c4b6e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -504,7 +504,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 		return ret;
 
 	dma_resv_add_fence(&ghost_obj->base._resv, fence,
-			   DMA_RESV_USAGE_WRITE);
+			   DMA_RESV_USAGE_KERNEL);
 
 	/**
 	 * If we're not moving to fixed memory, the TTM object
@@ -559,7 +559,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
 	int ret = 0;
 
-	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
 	if (!evict)
 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
 	else if (!from->use_tt && pipeline)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a77921a44a72..f871ddb62606 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1065,7 +1065,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
 	if (!ret)
 		dma_resv_add_fence(bo->base.resv, &fence->base,
-				   DMA_RESV_USAGE_WRITE);
+				   DMA_RESV_USAGE_KERNEL);
 	else
 		/* Last resort fallback when we are OOM */
 		dma_fence_wait(&fence->base, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 29452e150424..9e3dcbb573e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1170,7 +1170,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 			dma_fence_put(bo->moving);
 
 		return dma_resv_get_singleton(bo->base.resv,
-					      DMA_RESV_USAGE_WRITE,
+					      DMA_RESV_USAGE_KERNEL,
 					      &bo->moving);
 	}
 
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index f9901d273b8e..fce80a4a5147 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -68,7 +68,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 	 * the migration.
 	 */
 	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
-				     DMA_RESV_USAGE_WRITE,
+				     DMA_RESV_USAGE_KERNEL,
 				     false, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 4f3a6abf43c4..29d799991496 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -54,8 +54,30 @@ struct dma_resv_list;
  *
  * This enum describes the different use cases for a dma_resv object and
  * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
  */
 enum dma_resv_usage {
+	/**
+	 * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+	 *
+	 * This should only be used for things like copying or clearing memory
+	 * with a DMA hardware engine for the purpose of kernel memory
+	 * management.
+	 *
+         * Drivers *always* need to wait for those fences before accessing the
+	 * resource protected by the dma_resv object. The only exception for
+	 * that is when the resource is known to be locked down in place by
+	 * pinning it previously.
+	 */
+	DMA_RESV_USAGE_KERNEL,
+
 	/**
 	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
 	 *
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (19 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 22:10     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 22/24] dma-buf: wait for map to complete for static attachments Christian König
                   ` (3 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Add an usage for submissions independent of implicit sync but still
interesting for memory management.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c                     |  2 +-
 drivers/dma-buf/st-dma-resv.c                  |  2 +-
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c        |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c         |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c        |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c         |  6 +++---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c       |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c    |  2 +-
 drivers/gpu/drm/qxl/qxl_debugfs.c              |  2 +-
 drivers/gpu/drm/radeon/radeon_gem.c            |  2 +-
 drivers/gpu/drm/radeon/radeon_mn.c             |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                   | 14 +++++++-------
 include/linux/dma-resv.h                       | 18 +++++++++++++++++-
 15 files changed, 40 insertions(+), 24 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a2a0b5b6c107..a058a3e805ab 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -548,7 +548,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 	list = NULL;
 
-	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
+	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
 	dma_resv_for_each_fence_unlocked(&cursor, f) {
 
 		if (dma_resv_iter_is_restarted(&cursor)) {
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index 062b57d63fa6..8ace9e84c845 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -296,7 +296,7 @@ int dma_resv(void)
 	int r;
 
 	spin_lock_init(&fence_lock);
-	for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
+	for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
 	     ++usage) {
 		r = subtests(tests, (void *)(unsigned long)usage);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 4a469831afe3..bbfd7a1e42e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -246,7 +246,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	 */
 	replacement = dma_fence_get_stub();
 	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
-				replacement, DMA_RESV_USAGE_READ);
+				replacement, DMA_RESV_USAGE_BOOKKEEP);
 	dma_fence_put(replacement);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 490d2a7a3e2b..ddf46802b1ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	struct dma_fence *fence;
 	int r;
 
-	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence);
+	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
 	if (r)
 		goto fallback;
 
@@ -139,7 +139,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	/* Not enough memory for the delayed delete, as last resort
 	 * block for all the fences to complete.
 	 */
-	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
 			      false, MAX_SCHEDULE_TIMEOUT);
 	amdgpu_pasid_free(pasid);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 86f5248676b0..b86c0b8252a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 
 	mmu_interval_set_seq(mni, cur_seq);
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
 				  false, MAX_SCHEDULE_TIMEOUT);
 	mutex_unlock(&adev->notifier_lock);
 	if (r <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 183623806056..1447f009a957 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -260,7 +260,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		return -EINVAL;
 
 	/* TODO: Use DMA_RESV_USAGE_READ here */
-	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
+	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
 		dma_fence_chain_for_each(f, f) {
 			struct dma_fence_chain *chain = to_dma_fence_chain(f);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fd339762f534..3740d6e788ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1361,7 +1361,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * be resident to run successfully
 	 */
 	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
-				DMA_RESV_USAGE_READ, f) {
+				DMA_RESV_USAGE_BOOKKEEP, f) {
 		if (amdkfd_fence_check_mm(f, current->mm))
 			return false;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9eac1e783bbb..6a3ccd344a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2105,7 +2105,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) {
+	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
 		/* Add a callback for each fence in the reservation object */
 		amdgpu_vm_prt_get(adev);
 		amdgpu_vm_add_prt_cb(adev, fence);
@@ -2707,7 +2707,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 		return true;
 
 	/* Don't evict VM page tables while they are busy */
-	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ))
+	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
 		return false;
 
 	/* Try to block ongoing updates */
@@ -2888,7 +2888,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
-					DMA_RESV_USAGE_READ,
+					DMA_RESV_USAGE_BOOKKEEP,
 					true, timeout);
 	if (timeout <= 0)
 		return timeout;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index a200d3e66573..4115a222a853 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
 
 #ifdef CONFIG_LOCKDEP
-	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) &&
+	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
 		    i915_gem_object_evictable(obj));
 #endif
 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 0ccb91385f84..67b1fa845f22 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,7 +85,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 		return true;
 
 	/* we will unbind on next submission, still have userptr pins */
-	r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false,
+	r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
 				  MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 33e5889d6608..2d9ed3b94574 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -62,7 +62,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 		int rel = 0;
 
 		dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
-				    DMA_RESV_USAGE_READ);
+				    DMA_RESV_USAGE_BOOKKEEP);
 		dma_resv_for_each_fence_unlocked(&cursor, fence) {
 			if (dma_resv_iter_is_restarted(&cursor))
 				rel = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 71bf9299e45c..9587ab88bedd 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -162,7 +162,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
 		r = dma_resv_wait_timeout(robj->tbo.base.resv,
-					  DMA_RESV_USAGE_READ,
+					  DMA_RESV_USAGE_BOOKKEEP,
 					  true, 30 * HZ);
 		if (!r)
 			r = -EBUSY;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 68ebeb1bdfff..29fe8423bd90 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 		return true;
 	}
 
-	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
 				  false, MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ad83f42fc9ee..d3527d3f7b18 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -272,7 +272,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
-	dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+	dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
@@ -301,7 +301,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	struct dma_resv *resv = &bo->base._resv;
 	int ret;
 
-	if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ))
+	if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
 		ret = 0;
 	else
 		ret = -EBUSY;
@@ -313,7 +313,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&bo->bdev->lru_lock);
 
-		lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
+		lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
 					     interruptible,
 					     30 * HZ);
 
@@ -418,7 +418,7 @@ static void ttm_bo_release(struct kref *kref)
 			 * fences block for the BO to become idle
 			 */
 			dma_resv_wait_timeout(bo->base.resv,
-					      DMA_RESV_USAGE_READ, false,
+					      DMA_RESV_USAGE_BOOKKEEP, false,
 					      30 * HZ);
 		}
 
@@ -429,7 +429,7 @@ static void ttm_bo_release(struct kref *kref)
 		ttm_mem_io_free(bdev, bo->resource);
 	}
 
-	if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) ||
+	if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
 	    !dma_resv_trylock(bo->base.resv)) {
 		/* The BO is not idle, resurrect it for delayed destroy */
 		ttm_bo_flush_all_fences(bo);
@@ -1074,13 +1074,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	long timeout = 15 * HZ;
 
 	if (no_wait) {
-		if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ))
+		if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
 			return 0;
 		else
 			return -EBUSY;
 	}
 
-	timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+	timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
 					interruptible, timeout);
 	if (timeout < 0)
 		return timeout;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 29d799991496..07ae5b00c1fa 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -55,7 +55,7 @@ struct dma_resv_list;
  * This enum describes the different use cases for a dma_resv object and
  * controls which fences are returned when queried.
  *
- * An important fact is that there is the order KERNEL<WRITE<READ and
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
  * when the dma_resv object is asked for fences for one use case the fences
  * for the lower use case are returned as well.
  *
@@ -93,6 +93,22 @@ enum dma_resv_usage {
 	 * an implicit read dependency.
 	 */
 	DMA_RESV_USAGE_READ,
+
+	/**
+	 * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+	 *
+	 * This should be used by submissions which don't want to participate in
+	 * implicit synchronization.
+	 *
+	 * The most common case are submissions with explicit synchronization,
+	 * but also things like preemption fences as well as page table updates
+	 * might use this.
+	 *
+	 * The kernel memory management *always* need to wait for those fences
+	 * before moving or freeing the resource protected by the dma_resv
+	 * object.
+	 */
+	DMA_RESV_USAGE_BOOKKEEP
 };
 
 /**
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 22/24] dma-buf: wait for map to complete for static attachments
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (20 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-22 22:16     ` Daniel Vetter
  2021-12-07 12:34 ` [PATCH 23/24] amdgpu: remove DMA-buf fence workaround Christian König
                   ` (2 subsequent siblings)
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

We have previously done that in the individual drivers but it is
more defensive to move that into the common code.

Dynamic attachments should wait for map operations to complete by themselves.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c                   | 18 +++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 14 +-------------
 drivers/gpu/drm/nouveau/nouveau_prime.c     | 17 +----------------
 drivers/gpu/drm/radeon/radeon_prime.c       | 16 +++-------------
 4 files changed, 20 insertions(+), 45 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 528983d3ba64..d3dd602c4753 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -660,12 +660,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
 				       enum dma_data_direction direction)
 {
 	struct sg_table *sg_table;
+	signed long ret;
 
 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	if (IS_ERR_OR_NULL(sg_table))
+		return sg_table;
+
+	if (!dma_buf_attachment_is_dynamic(attach)) {
+		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+					    DMA_RESV_USAGE_KERNEL, true,
+					    MAX_SCHEDULE_TIMEOUT);
+		if (ret < 0) {
+			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+							   direction);
+			return ERR_PTR(ret);
+		}
+	}
 
-	if (!IS_ERR_OR_NULL(sg_table))
-		mangle_sg_table(sg_table);
-
+	mangle_sg_table(sg_table);
 	return sg_table;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 4896c876ffec..33127bd56c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
 {
 	struct drm_gem_object *obj = attach->dmabuf->priv;
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-	int r;
 
 	/* pin buffer into GTT */
-	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
-	if (r)
-		return r;
-
-	if (bo->tbo.moving) {
-		r = dma_fence_wait(bo->tbo.moving, true);
-		if (r) {
-			amdgpu_bo_unpin(bo);
-			return r;
-		}
-	}
-	return 0;
+	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
 }
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 60019d0532fc..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
 	if (ret)
 		return -EINVAL;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
-	if (ret)
-		goto error;
-
-	if (nvbo->bo.moving)
-		ret = dma_fence_wait(nvbo->bo.moving, true);
-
-	ttm_bo_unreserve(&nvbo->bo);
-	if (ret)
-		goto error;
-
-	return ret;
-
-error:
-	nouveau_bo_unpin(nvbo);
-	return ret;
+	return 0;
 }
 
 void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 4a90807351e7..42a87948e28c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
 
 	/* pin buffer into GTT */
 	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-	if (unlikely(ret))
-		goto error;
-
-	if (bo->tbo.moving) {
-		ret = dma_fence_wait(bo->tbo.moving, false);
-		if (unlikely(ret)) {
-			radeon_bo_unpin(bo);
-			goto error;
-		}
-	}
-
-	bo->prime_shared_count++;
-error:
+	if (likely(ret == 0))
+		bo->prime_shared_count++;
+
 	radeon_bo_unreserve(bo);
 	return ret;
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 23/24] amdgpu: remove DMA-buf fence workaround
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (21 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 22/24] dma-buf: wait for map to complete for static attachments Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-07 12:34 ` [PATCH 24/24] drm/ttm: remove bo->moving Christian König
  2021-12-17 14:39 ` completely rework the dma_resv semantic Christian König
  24 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Not needed any more now we have that inside the framework.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h |  1 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c      | 52 +++------------------
 2 files changed, 6 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 044b41f0bfd9..529d52a204cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -34,7 +34,6 @@ struct amdgpu_fpriv;
 struct amdgpu_bo_list_entry {
 	struct ttm_validate_buffer	tv;
 	struct amdgpu_bo_va		*bo_va;
-	struct dma_fence_chain		*chain;
 	uint32_t			priority;
 	struct page			**user_pages;
 	bool				user_invalidated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 92091e800022..413606d10080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -576,14 +576,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
 		e->bo_va = amdgpu_vm_bo_find(vm, bo);
-
-		if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
-			e->chain = dma_fence_chain_alloc();
-			if (!e->chain) {
-				r = -ENOMEM;
-				goto error_validate;
-			}
-		}
 	}
 
 	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -634,13 +626,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 	}
 
 error_validate:
-	if (r) {
-		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-			dma_fence_chain_free(e->chain);
-			e->chain = NULL;
-		}
+	if (r)
 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-	}
 out:
 	return r;
 }
@@ -680,17 +667,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 {
 	unsigned i;
 
-	if (error && backoff) {
-		struct amdgpu_bo_list_entry *e;
-
-		amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
-			dma_fence_chain_free(e->chain);
-			e->chain = NULL;
-		}
-
+	if (error && backoff)
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
-	}
 
 	for (i = 0; i < parser->num_post_deps; i++) {
 		drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -1265,29 +1244,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
 	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
 
-	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-		struct dma_resv *resv = e->tv.bo->base.resv;
-		struct dma_fence_chain *chain = e->chain;
-		struct dma_resv_iter cursor;
-		struct dma_fence *fence;
-
-		if (!chain)
-			continue;
-
-		/*
-		 * Work around dma_resv shortcommings by wrapping up the
-		 * submission in a dma_fence_chain and add it as exclusive
-		 * fence.
-		 */
-		dma_resv_for_each_fence(&cursor, resv,
-					DMA_RESV_USAGE_WRITE,
-					fence) {
-			break;
-		}
-		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
-		dma_resv_add_fence(resv, &chain->base, DMA_RESV_USAGE_WRITE);
-		e->chain = NULL;
-	}
+	/* For now manually add the resulting fence as writer as well */
+	amdgpu_bo_list_for_each_entry(e, p->bo_list)
+		dma_resv_add_fence(e->tv.bo->base.resv, p->fence,
+				   DMA_RESV_USAGE_WRITE);
 
 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
 	mutex_unlock(&p->adev->notifier_lock);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* [PATCH 24/24] drm/ttm: remove bo->moving
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (22 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 23/24] amdgpu: remove DMA-buf fence workaround Christian König
@ 2021-12-07 12:34 ` Christian König
  2021-12-17 14:39 ` completely rework the dma_resv semantic Christian König
  24 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2021-12-07 12:34 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

This is now handled by the DMA-buf framework in the dma_resv obj.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 13 ++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  7 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c    | 11 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c   | 11 ++++--
 drivers/gpu/drm/ttm/ttm_bo.c                  | 10 ++----
 drivers/gpu/drm/ttm/ttm_bo_util.c             |  7 ----
 drivers/gpu/drm/ttm/ttm_bo_vm.c               | 34 +++++++------------
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      |  6 ----
 include/drm/ttm/ttm_bo_api.h                  |  2 --
 9 files changed, 40 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index bbfd7a1e42e8..7bd39e5d36dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2330,6 +2330,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 		struct amdgpu_bo *bo = mem->bo;
 		uint32_t domain = mem->domain;
 		struct kfd_mem_attachment *attachment;
+		struct dma_resv_iter cursor;
+		struct dma_fence *fence;
 
 		total_size += amdgpu_bo_size(bo);
 
@@ -2344,10 +2346,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 				goto validate_map_fail;
 			}
 		}
-		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
-		if (ret) {
-			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
-			goto validate_map_fail;
+		dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+					DMA_RESV_USAGE_KERNEL, fence) {
+			ret = amdgpu_sync_fence(&sync_obj, fence);
+			if (ret) {
+				pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+				goto validate_map_fail;
+			}
 		}
 		list_for_each_entry(attachment, &mem->attachments, list) {
 			if (!attachment->is_mapped)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a40ede9bccd0..3881a503a7bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -608,9 +608,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 		if (unlikely(r))
 			goto fail_unreserve;
 
-		amdgpu_bo_fence(bo, fence, false);
-		dma_fence_put(bo->tbo.moving);
-		bo->tbo.moving = dma_fence_get(fence);
+		dma_resv_add_fence(bo->tbo.base.resv, fence,
+				   DMA_RESV_USAGE_KERNEL);
 		dma_fence_put(fence);
 	}
 	if (!bp->resv)
@@ -1290,7 +1289,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 
 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
 	if (!WARN_ON(r)) {
-		amdgpu_bo_fence(abo, fence, false);
+		dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
 		dma_fence_put(fence);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e3fbf0f10add..31913ae86de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 {
 	unsigned int i;
 	uint64_t value;
-	int r;
+	long r;
 
-	if (vmbo->bo.tbo.moving) {
-		r = dma_fence_wait(vmbo->bo.tbo.moving, true);
-		if (r)
-			return r;
-	}
+	r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+				  true, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0)
+		return r;
 
 	pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index dbb551762805..bdb44cee19d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 	struct amdgpu_bo *bo = &vmbo->bo;
 	enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
 		: AMDGPU_IB_POOL_DELAYED;
+	struct dma_resv_iter cursor;
 	unsigned int i, ndw, nptes;
+	struct dma_fence *fence;
 	uint64_t *pte;
 	int r;
 
 	/* Wait for PD/PT moves to be completed */
-	r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
-	if (r)
-		return r;
+	dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+				DMA_RESV_USAGE_KERNEL, fence) {
+		r = amdgpu_sync_fence(&p->job->sync, fence);
+		if (r)
+			return r;
+	}
 
 	do {
 		ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d3527d3f7b18..7b9e0f46f121 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -468,7 +468,6 @@ static void ttm_bo_release(struct kref *kref)
 	dma_resv_unlock(bo->base.resv);
 
 	atomic_dec(&ttm_glob.bo_count);
-	dma_fence_put(bo->moving);
 	bo->destroy(bo);
 }
 
@@ -737,9 +736,8 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 }
 
 /*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 				 struct ttm_resource_manager *man,
@@ -769,9 +767,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 		dma_fence_put(fence);
 		return ret;
 	}
-
-	dma_fence_put(bo->moving);
-	bo->moving = fence;
 	return 0;
 }
 
@@ -978,7 +973,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	bo->bdev = bdev;
 	bo->type = type;
 	bo->page_alignment = page_alignment;
-	bo->moving = NULL;
 	bo->pin_count = 0;
 	bo->sg = sg;
 	if (resv) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index b9cfb62c4b6e..95de2691ee7c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -229,7 +229,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	atomic_inc(&ttm_glob.bo_count);
 	INIT_LIST_HEAD(&fbo->base.ddestroy);
 	INIT_LIST_HEAD(&fbo->base.lru);
-	fbo->base.moving = NULL;
 	drm_vma_node_reset(&fbo->base.base.vma_node);
 
 	kref_init(&fbo->base.kref);
@@ -496,9 +495,6 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 	 * operation has completed.
 	 */
 
-	dma_fence_put(bo->moving);
-	bo->moving = dma_fence_get(fence);
-
 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 	if (ret)
 		return ret;
@@ -543,9 +539,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
 	spin_unlock(&from->move_lock);
 
 	ttm_resource_free(bo, &bo->resource);
-
-	dma_fence_put(bo->moving);
-	bo->moving = dma_fence_get(fence);
 }
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08ba083a80d2..5b324f245265 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -46,17 +46,13 @@
 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 				struct vm_fault *vmf)
 {
-	vm_fault_t ret = 0;
-	int err = 0;
-
-	if (likely(!bo->moving))
-		goto out_unlock;
+	long err = 0;
 
 	/*
 	 * Quick non-stalling check for idle.
 	 */
-	if (dma_fence_is_signaled(bo->moving))
-		goto out_clear;
+	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+		return 0;
 
 	/*
 	 * If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 	 * is the first attempt.
 	 */
 	if (fault_flag_allow_retry_first(vmf->flags)) {
-		ret = VM_FAULT_RETRY;
 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
-			goto out_unlock;
+			return VM_FAULT_RETRY;
 
 		ttm_bo_get(bo);
 		mmap_read_unlock(vmf->vma->vm_mm);
-		(void) dma_fence_wait(bo->moving, true);
+		(void)dma_resv_wait_timeout(bo->base.resv,
+					    DMA_RESV_USAGE_KERNEL, true,
+					    MAX_SCHEDULE_TIMEOUT);
 		dma_resv_unlock(bo->base.resv);
 		ttm_bo_put(bo);
-		goto out_unlock;
+		return VM_FAULT_RETRY;
 	}
 
 	/*
 	 * Ordinary wait.
 	 */
-	err = dma_fence_wait(bo->moving, true);
-	if (unlikely(err != 0)) {
-		ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+	err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+				    MAX_SCHEDULE_TIMEOUT);
+	if (unlikely(err < 0)) {
+		return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
 			VM_FAULT_NOPAGE;
-		goto out_unlock;
 	}
 
-out_clear:
-	dma_fence_put(bo->moving);
-	bo->moving = NULL;
-
-out_unlock:
-	return ret;
+	return 0;
 }
 
 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9e3dcbb573e7..40cc2c13e963 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,12 +1166,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
 						      PAGE_SIZE);
 		vmw_bo_fence_single(bo, NULL);
-		if (bo->moving)
-			dma_fence_put(bo->moving);
-
-		return dma_resv_get_singleton(bo->base.resv,
-					      DMA_RESV_USAGE_KERNEL,
-					      &bo->moving);
 	}
 
 	return 0;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c17b2df9178b..4c7134550262 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -97,7 +97,6 @@ struct ttm_tt;
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -150,7 +149,6 @@ struct ttm_buffer_object {
 	 * Members protected by a bo reservation.
 	 */
 
-	struct dma_fence *moving;
 	unsigned priority;
 	unsigned pin_count;
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
  2021-12-07 12:33 completely rework the dma_resv semantic Christian König
                   ` (23 preceding siblings ...)
  2021-12-07 12:34 ` [PATCH 24/24] drm/ttm: remove bo->moving Christian König
@ 2021-12-17 14:39 ` Christian König
  2021-12-22 22:17     ` Daniel Vetter
  24 siblings, 1 reply; 95+ messages in thread
From: Christian König @ 2021-12-17 14:39 UTC (permalink / raw)
  To: daniel, dri-devel, linux-media, linaro-mm-sig

Hi Daniel,

looks like this is going nowhere and you don't seem to have time to review.

What can we do?

Thanks,
Christian.

Am 07.12.21 um 13:33 schrieb Christian König:
> Hi Daniel,
>
> just a gentle ping that you wanted to take a look at this.
>
> Not much changed compared to the last version, only a minor bugfix in
> the dma_resv_get_singleton error handling.
>
> Regards,
> Christian.
>
>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
  2021-12-07 12:33 ` [PATCH 01/24] dma-buf: add dma_resv_replace_fences Christian König
@ 2021-12-22 21:05     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:05 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
> This function allows to replace fences from the shared fence list when
> we can gurantee that the operation represented by the original fence has
> finished or no accesses to the resources protected by the dma_resv
> object any more when the new fence finishes.
> 
> Then use this function in the amdkfd code when BOs are unmapped from the
> process.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
>  include/linux/dma-resv.h                      |  2 +
>  3 files changed, 52 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 4deea75c0b9c..a688dbded3d3 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>  }
>  EXPORT_SYMBOL(dma_resv_add_shared_fence);
>  
> +/**
> + * dma_resv_replace_fences - replace fences in the dma_resv obj
> + * @obj: the reservation object
> + * @context: the context of the fences to replace
> + * @replacement: the new fence to use instead
> + *
> + * Replace fences with a specified context with a new fence. Only valid if the
> + * operation represented by the original fences is completed or has no longer
> + * access to the resources protected by the dma_resv object when the new fence
> + * completes.
> + */
> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> +			     struct dma_fence *replacement)
> +{
> +	struct dma_resv_list *list;
> +	struct dma_fence *old;
> +	unsigned int i;
> +
> +	dma_resv_assert_held(obj);
> +
> +	write_seqcount_begin(&obj->seq);
> +
> +	old = dma_resv_excl_fence(obj);
> +	if (old->context == context) {
> +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
> +		dma_fence_put(old);
> +	}
> +
> +	list = dma_resv_shared_list(obj);
> +	for (i = 0; list && i < list->shared_count; ++i) {
> +		old = rcu_dereference_protected(list->shared[i],
> +						dma_resv_held(obj));
> +		if (old->context != context)
> +			continue;
> +
> +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
> +		dma_fence_put(old);

Since the fences are all guaranteed to be from the same context, maybe we
should have a WARN_ON(__dma_fence_is_later()); here just to be safe?

With that added:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +	}
> +
> +	write_seqcount_end(&obj->seq);
> +}
> +EXPORT_SYMBOL(dma_resv_replace_fences);
> +
>  /**
>   * dma_resv_add_excl_fence - Add an exclusive fence.
>   * @obj: the reservation object
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index 71acd577803e..b558ef0f8c4a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
>  static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>  					struct amdgpu_amdkfd_fence *ef)
>  {
> -	struct dma_resv *resv = bo->tbo.base.resv;
> -	struct dma_resv_list *old, *new;
> -	unsigned int i, j, k;
> +	struct dma_fence *replacement;
>  
>  	if (!ef)
>  		return -EINVAL;
>  
> -	old = dma_resv_shared_list(resv);
> -	if (!old)
> -		return 0;
> -
> -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
> -	if (!new)
> -		return -ENOMEM;
> -
> -	/* Go through all the shared fences in the resevation object and sort
> -	 * the interesting ones to the end of the list.
> +	/* TODO: Instead of block before we should use the fence of the page
> +	 * table update and TLB flush here directly.
>  	 */
> -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
> -		struct dma_fence *f;
> -
> -		f = rcu_dereference_protected(old->shared[i],
> -					      dma_resv_held(resv));
> -
> -		if (f->context == ef->base.context)
> -			RCU_INIT_POINTER(new->shared[--j], f);
> -		else
> -			RCU_INIT_POINTER(new->shared[k++], f);
> -	}
> -	new->shared_max = old->shared_max;
> -	new->shared_count = k;
> -
> -	/* Install the new fence list, seqcount provides the barriers */
> -	write_seqcount_begin(&resv->seq);
> -	RCU_INIT_POINTER(resv->fence, new);
> -	write_seqcount_end(&resv->seq);
> -
> -	/* Drop the references to the removed fences or move them to ef_list */
> -	for (i = j; i < old->shared_count; ++i) {
> -		struct dma_fence *f;
> -
> -		f = rcu_dereference_protected(new->shared[i],
> -					      dma_resv_held(resv));
> -		dma_fence_put(f);
> -	}
> -	kfree_rcu(old, rcu);
> -
> +	replacement = dma_fence_get_stub();
> +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
> +				replacement);
> +	dma_fence_put(replacement);
>  	return 0;
>  }
>  
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index eebf04325b34..e0be34265eae 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>  void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> +			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>  int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>  			unsigned *pshared_count, struct dma_fence ***pshared);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
@ 2021-12-22 21:05     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:05 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
> This function allows to replace fences from the shared fence list when
> we can gurantee that the operation represented by the original fence has
> finished or no accesses to the resources protected by the dma_resv
> object any more when the new fence finishes.
> 
> Then use this function in the amdkfd code when BOs are unmapped from the
> process.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
>  include/linux/dma-resv.h                      |  2 +
>  3 files changed, 52 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 4deea75c0b9c..a688dbded3d3 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>  }
>  EXPORT_SYMBOL(dma_resv_add_shared_fence);
>  
> +/**
> + * dma_resv_replace_fences - replace fences in the dma_resv obj
> + * @obj: the reservation object
> + * @context: the context of the fences to replace
> + * @replacement: the new fence to use instead
> + *
> + * Replace fences with a specified context with a new fence. Only valid if the
> + * operation represented by the original fences is completed or has no longer
> + * access to the resources protected by the dma_resv object when the new fence
> + * completes.
> + */
> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> +			     struct dma_fence *replacement)
> +{
> +	struct dma_resv_list *list;
> +	struct dma_fence *old;
> +	unsigned int i;
> +
> +	dma_resv_assert_held(obj);
> +
> +	write_seqcount_begin(&obj->seq);
> +
> +	old = dma_resv_excl_fence(obj);
> +	if (old->context == context) {
> +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
> +		dma_fence_put(old);
> +	}
> +
> +	list = dma_resv_shared_list(obj);
> +	for (i = 0; list && i < list->shared_count; ++i) {
> +		old = rcu_dereference_protected(list->shared[i],
> +						dma_resv_held(obj));
> +		if (old->context != context)
> +			continue;
> +
> +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
> +		dma_fence_put(old);

Since the fences are all guaranteed to be from the same context, maybe we
should have a WARN_ON(__dma_fence_is_later()); here just to be safe?

With that added:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +	}
> +
> +	write_seqcount_end(&obj->seq);
> +}
> +EXPORT_SYMBOL(dma_resv_replace_fences);
> +
>  /**
>   * dma_resv_add_excl_fence - Add an exclusive fence.
>   * @obj: the reservation object
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index 71acd577803e..b558ef0f8c4a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
>  static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>  					struct amdgpu_amdkfd_fence *ef)
>  {
> -	struct dma_resv *resv = bo->tbo.base.resv;
> -	struct dma_resv_list *old, *new;
> -	unsigned int i, j, k;
> +	struct dma_fence *replacement;
>  
>  	if (!ef)
>  		return -EINVAL;
>  
> -	old = dma_resv_shared_list(resv);
> -	if (!old)
> -		return 0;
> -
> -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
> -	if (!new)
> -		return -ENOMEM;
> -
> -	/* Go through all the shared fences in the resevation object and sort
> -	 * the interesting ones to the end of the list.
> +	/* TODO: Instead of block before we should use the fence of the page
> +	 * table update and TLB flush here directly.
>  	 */
> -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
> -		struct dma_fence *f;
> -
> -		f = rcu_dereference_protected(old->shared[i],
> -					      dma_resv_held(resv));
> -
> -		if (f->context == ef->base.context)
> -			RCU_INIT_POINTER(new->shared[--j], f);
> -		else
> -			RCU_INIT_POINTER(new->shared[k++], f);
> -	}
> -	new->shared_max = old->shared_max;
> -	new->shared_count = k;
> -
> -	/* Install the new fence list, seqcount provides the barriers */
> -	write_seqcount_begin(&resv->seq);
> -	RCU_INIT_POINTER(resv->fence, new);
> -	write_seqcount_end(&resv->seq);
> -
> -	/* Drop the references to the removed fences or move them to ef_list */
> -	for (i = j; i < old->shared_count; ++i) {
> -		struct dma_fence *f;
> -
> -		f = rcu_dereference_protected(new->shared[i],
> -					      dma_resv_held(resv));
> -		dma_fence_put(f);
> -	}
> -	kfree_rcu(old, rcu);
> -
> +	replacement = dma_fence_get_stub();
> +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
> +				replacement);
> +	dma_fence_put(replacement);
>  	return 0;
>  }
>  
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index eebf04325b34..e0be34265eae 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>  void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> +			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>  int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>  			unsigned *pshared_count, struct dma_fence ***pshared);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 02/24] dma-buf: finally make the dma_resv_list private
  2021-12-07 12:33 ` [PATCH 02/24] dma-buf: finally make the dma_resv_list private Christian König
@ 2021-12-22 21:08     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:08 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:49PM +0100, Christian König wrote:
> Drivers should never touch this directly.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 26 ++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 26 +-------------------------
>  2 files changed, 27 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a688dbded3d3..a12a3a39f280 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -56,6 +56,19 @@
>  DEFINE_WD_CLASS(reservation_ww_class);
>  EXPORT_SYMBOL(reservation_ww_class);
>  
> +/**
> + * struct dma_resv_list - a list of shared fences
> + * @rcu: for internal use
> + * @shared_count: table of shared fences
> + * @shared_max: for growing shared fence table
> + * @shared: shared fence table
> + */

Imo drop the kerneldoc here and just make these comments before the right
member if you feel like keeping them. Imo it's obvious enough what's going
on that the comments aren't necessary, and we don't kerneldoc document
internals generally at all - only interfaces relevant by drivers and
things outside of a subsystem.

> +struct dma_resv_list {
> +	struct rcu_head rcu;
> +	u32 shared_count, shared_max;
> +	struct dma_fence __rcu *shared[];
> +};
> +
>  /**
>   * dma_resv_list_alloc - allocate fence list
>   * @shared_max: number of fences we need space for
> @@ -133,6 +146,19 @@ void dma_resv_fini(struct dma_resv *obj)
>  }
>  EXPORT_SYMBOL(dma_resv_fini);
>  
> +/**
> + * dma_resv_shared_list - get the reservation object's shared fence list
> + * @obj: the reservation object
> + *
> + * Returns the shared fence list. Caller must either hold the objects
> + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> + * or one of the variants of each
> + */

Same here. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
> +{
> +	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> +}
> +
>  /**
>   * dma_resv_reserve_shared - Reserve space to add shared fences to
>   * a dma_resv.
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index e0be34265eae..3baf2a4a9a0d 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -47,18 +47,7 @@
>  
>  extern struct ww_class reservation_ww_class;
>  
> -/**
> - * struct dma_resv_list - a list of shared fences
> - * @rcu: for internal use
> - * @shared_count: table of shared fences
> - * @shared_max: for growing shared fence table
> - * @shared: shared fence table
> - */
> -struct dma_resv_list {
> -	struct rcu_head rcu;
> -	u32 shared_count, shared_max;
> -	struct dma_fence __rcu *shared[];
> -};
> +struct dma_resv_list;
>  
>  /**
>   * struct dma_resv - a reservation object manages fences for a buffer
> @@ -440,19 +429,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
>  	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
> -/**
> - * dma_resv_shared_list - get the reservation object's shared fence list
> - * @obj: the reservation object
> - *
> - * Returns the shared fence list. Caller must either hold the objects
> - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> - * or one of the variants of each
> - */
> -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
> -{
> -	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> -}
> -
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 02/24] dma-buf: finally make the dma_resv_list private
@ 2021-12-22 21:08     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:08 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:49PM +0100, Christian König wrote:
> Drivers should never touch this directly.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 26 ++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 26 +-------------------------
>  2 files changed, 27 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a688dbded3d3..a12a3a39f280 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -56,6 +56,19 @@
>  DEFINE_WD_CLASS(reservation_ww_class);
>  EXPORT_SYMBOL(reservation_ww_class);
>  
> +/**
> + * struct dma_resv_list - a list of shared fences
> + * @rcu: for internal use
> + * @shared_count: table of shared fences
> + * @shared_max: for growing shared fence table
> + * @shared: shared fence table
> + */

Imo drop the kerneldoc here and just make these comments before the right
member if you feel like keeping them. Imo it's obvious enough what's going
on that the comments aren't necessary, and we don't kerneldoc document
internals generally at all - only interfaces relevant by drivers and
things outside of a subsystem.

> +struct dma_resv_list {
> +	struct rcu_head rcu;
> +	u32 shared_count, shared_max;
> +	struct dma_fence __rcu *shared[];
> +};
> +
>  /**
>   * dma_resv_list_alloc - allocate fence list
>   * @shared_max: number of fences we need space for
> @@ -133,6 +146,19 @@ void dma_resv_fini(struct dma_resv *obj)
>  }
>  EXPORT_SYMBOL(dma_resv_fini);
>  
> +/**
> + * dma_resv_shared_list - get the reservation object's shared fence list
> + * @obj: the reservation object
> + *
> + * Returns the shared fence list. Caller must either hold the objects
> + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> + * or one of the variants of each
> + */

Same here. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
> +{
> +	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> +}
> +
>  /**
>   * dma_resv_reserve_shared - Reserve space to add shared fences to
>   * a dma_resv.
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index e0be34265eae..3baf2a4a9a0d 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -47,18 +47,7 @@
>  
>  extern struct ww_class reservation_ww_class;
>  
> -/**
> - * struct dma_resv_list - a list of shared fences
> - * @rcu: for internal use
> - * @shared_count: table of shared fences
> - * @shared_max: for growing shared fence table
> - * @shared: shared fence table
> - */
> -struct dma_resv_list {
> -	struct rcu_head rcu;
> -	u32 shared_count, shared_max;
> -	struct dma_fence __rcu *shared[];
> -};
> +struct dma_resv_list;
>  
>  /**
>   * struct dma_resv - a reservation object manages fences for a buffer
> @@ -440,19 +429,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
>  	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
> -/**
> - * dma_resv_shared_list - get the reservation object's shared fence list
> - * @obj: the reservation object
> - *
> - * Returns the shared fence list. Caller must either hold the objects
> - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> - * or one of the variants of each
> - */
> -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
> -{
> -	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
> -}
> -
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences
  2021-12-07 12:33 ` [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences Christian König
@ 2021-12-22 21:13     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:13 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:50PM +0100, Christian König wrote:
> Returning the exclusive fence separately is no longer used.
> 
> Instead add a write parameter to indicate the use case.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                   | 48 ++++++++------------
>  drivers/dma-buf/st-dma-resv.c                | 26 ++---------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_display.c  |  6 ++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c      |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  3 +-
>  include/linux/dma-resv.h                     |  4 +-
>  6 files changed, 31 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a12a3a39f280..480c305554a1 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -611,57 +611,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
> - * @fence_excl: the returned exclusive fence (or NULL)
> - * @shared_count: the number of shared fences returned
> - * @shared: the array of shared fence ptrs returned (array is krealloc'd to
> - * the required size, and must be freed by caller)
> - *
> - * Retrieve all fences from the reservation object. If the pointer for the
> - * exclusive fence is not specified the fence is put into the array of the
> - * shared fences as well. Returns either zero or -ENOMEM.
> + * @write: true if we should return all fences

I'm assuming that this will be properly documented later on in the series
...

> + * @num_fences: the number of fences returned
> + * @fences: the array of fence ptrs returned (array is krealloc'd to the
> + * required size, and must be freed by caller)
> + *
> + * Retrieve all fences from the reservation object.
> + * Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
> -			unsigned int *shared_count, struct dma_fence ***shared)
> +int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +			unsigned int *num_fences, struct dma_fence ***fences)
>  {
>  	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
>  
> -	*shared_count = 0;
> -	*shared = NULL;
> -
> -	if (fence_excl)
> -		*fence_excl = NULL;
> +	*num_fences = 0;
> +	*fences = NULL;
>  
> -	dma_resv_iter_begin(&cursor, obj, true);
> +	dma_resv_iter_begin(&cursor, obj, write);
>  	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  
>  		if (dma_resv_iter_is_restarted(&cursor)) {
>  			unsigned int count;
>  
> -			while (*shared_count)
> -				dma_fence_put((*shared)[--(*shared_count)]);
> +			while (*num_fences)
> +				dma_fence_put((*fences)[--(*num_fences)]);
>  
> -			if (fence_excl)
> -				dma_fence_put(*fence_excl);
> -
> -			count = cursor.shared_count;
> -			count += fence_excl ? 0 : 1;
> +			count = cursor.shared_count + 1;
>  
>  			/* Eventually re-allocate the array */
> -			*shared = krealloc_array(*shared, count,
> +			*fences = krealloc_array(*fences, count,
>  						 sizeof(void *),
>  						 GFP_KERNEL);
> -			if (count && !*shared) {
> +			if (count && !*fences) {
>  				dma_resv_iter_end(&cursor);
>  				return -ENOMEM;
>  			}
>  		}
>  
> -		dma_fence_get(fence);
> -		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
> -			*fence_excl = fence;
> -		else
> -			(*shared)[(*shared_count)++] = fence;
> +		(*fences)[(*num_fences)++] = dma_fence_get(fence);
>  	}
>  	dma_resv_iter_end(&cursor);
>  
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> index bc32b3eedcb6..cbe999c6e7a6 100644
> --- a/drivers/dma-buf/st-dma-resv.c
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg)
>  
>  static int test_get_fences(void *arg, bool shared)
>  {
> -	struct dma_fence *f, *excl = NULL, **fences = NULL;
> +	struct dma_fence *f, **fences = NULL;
>  	struct dma_resv resv;
>  	int r, i;
>  
> @@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared)
>  	}
>  	dma_resv_unlock(&resv);
>  
> -	r = dma_resv_get_fences(&resv, &excl, &i, &fences);
> +	r = dma_resv_get_fences(&resv, shared, &i, &fences);
>  	if (r) {
>  		pr_err("get_fences failed\n");
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		if (excl != NULL) {
> -			pr_err("get_fences returned unexpected excl fence\n");
> -			goto err_free;
> -		}
> -		if (i != 1 || fences[0] != f) {
> -			pr_err("get_fences returned unexpected shared fence\n");
> -			goto err_free;
> -		}
> -	} else {
> -		if (excl != f) {
> -			pr_err("get_fences returned unexpected excl fence\n");
> -			goto err_free;
> -		}
> -		if (i != 0) {
> -			pr_err("get_fences returned unexpected shared fence\n");
> -			goto err_free;
> -		}
> +	if (i != 1 || fences[0] != f) {
> +		pr_err("get_fences returned unexpected fence\n");
> +		goto err_free;
>  	}
>  
>  	dma_fence_signal(f);
>  err_free:
> -	dma_fence_put(excl);
>  	while (i--)
>  		dma_fence_put(fences[i]);
>  	kfree(fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> index 68108f151dad..d17e1c911689 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> @@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
>  		goto unpin;
>  	}
>  
> -	r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
> -				&work->shared_count, &work->shared);
> +	/* TODO: Unify this with other drivers */
> +	r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
> +				&work->shared_count,
> +				&work->shared);
>  	if (unlikely(r != 0)) {
>  		DRM_ERROR("failed to get fences for buffer\n");
>  		goto unpin;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b7fb72bff2c1..be48487e2ca7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  	unsigned count;
>  	int r;
>  
> -	r = dma_resv_get_fences(resv, NULL, &count, &fences);
> +	r = dma_resv_get_fences(resv, true, &count, &fences);
>  	if (r)
>  		goto fallback;
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index b5e8ce86dbe7..64c90ff348f2 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  			continue;
>  
>  		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -			ret = dma_resv_get_fences(robj, NULL,
> -						  &bo->nr_shared,
> +			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
>  						  &bo->shared);
>  			if (ret)
>  				return ret;
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 3baf2a4a9a0d..fa2002939b19 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -436,8 +436,8 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> -			unsigned *pshared_count, struct dma_fence ***pshared);
> +int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +			unsigned int *num_fences, struct dma_fence ***fences);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>  long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout);
> -- 
> 2.25.1
> 


Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences
@ 2021-12-22 21:13     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:13 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:50PM +0100, Christian König wrote:
> Returning the exclusive fence separately is no longer used.
> 
> Instead add a write parameter to indicate the use case.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                   | 48 ++++++++------------
>  drivers/dma-buf/st-dma-resv.c                | 26 ++---------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_display.c  |  6 ++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c      |  2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  3 +-
>  include/linux/dma-resv.h                     |  4 +-
>  6 files changed, 31 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a12a3a39f280..480c305554a1 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -611,57 +611,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
> - * @fence_excl: the returned exclusive fence (or NULL)
> - * @shared_count: the number of shared fences returned
> - * @shared: the array of shared fence ptrs returned (array is krealloc'd to
> - * the required size, and must be freed by caller)
> - *
> - * Retrieve all fences from the reservation object. If the pointer for the
> - * exclusive fence is not specified the fence is put into the array of the
> - * shared fences as well. Returns either zero or -ENOMEM.
> + * @write: true if we should return all fences

I'm assuming that this will be properly documented later on in the series
...

> + * @num_fences: the number of fences returned
> + * @fences: the array of fence ptrs returned (array is krealloc'd to the
> + * required size, and must be freed by caller)
> + *
> + * Retrieve all fences from the reservation object.
> + * Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
> -			unsigned int *shared_count, struct dma_fence ***shared)
> +int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +			unsigned int *num_fences, struct dma_fence ***fences)
>  {
>  	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
>  
> -	*shared_count = 0;
> -	*shared = NULL;
> -
> -	if (fence_excl)
> -		*fence_excl = NULL;
> +	*num_fences = 0;
> +	*fences = NULL;
>  
> -	dma_resv_iter_begin(&cursor, obj, true);
> +	dma_resv_iter_begin(&cursor, obj, write);
>  	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  
>  		if (dma_resv_iter_is_restarted(&cursor)) {
>  			unsigned int count;
>  
> -			while (*shared_count)
> -				dma_fence_put((*shared)[--(*shared_count)]);
> +			while (*num_fences)
> +				dma_fence_put((*fences)[--(*num_fences)]);
>  
> -			if (fence_excl)
> -				dma_fence_put(*fence_excl);
> -
> -			count = cursor.shared_count;
> -			count += fence_excl ? 0 : 1;
> +			count = cursor.shared_count + 1;
>  
>  			/* Eventually re-allocate the array */
> -			*shared = krealloc_array(*shared, count,
> +			*fences = krealloc_array(*fences, count,
>  						 sizeof(void *),
>  						 GFP_KERNEL);
> -			if (count && !*shared) {
> +			if (count && !*fences) {
>  				dma_resv_iter_end(&cursor);
>  				return -ENOMEM;
>  			}
>  		}
>  
> -		dma_fence_get(fence);
> -		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
> -			*fence_excl = fence;
> -		else
> -			(*shared)[(*shared_count)++] = fence;
> +		(*fences)[(*num_fences)++] = dma_fence_get(fence);
>  	}
>  	dma_resv_iter_end(&cursor);
>  
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> index bc32b3eedcb6..cbe999c6e7a6 100644
> --- a/drivers/dma-buf/st-dma-resv.c
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg)
>  
>  static int test_get_fences(void *arg, bool shared)
>  {
> -	struct dma_fence *f, *excl = NULL, **fences = NULL;
> +	struct dma_fence *f, **fences = NULL;
>  	struct dma_resv resv;
>  	int r, i;
>  
> @@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared)
>  	}
>  	dma_resv_unlock(&resv);
>  
> -	r = dma_resv_get_fences(&resv, &excl, &i, &fences);
> +	r = dma_resv_get_fences(&resv, shared, &i, &fences);
>  	if (r) {
>  		pr_err("get_fences failed\n");
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		if (excl != NULL) {
> -			pr_err("get_fences returned unexpected excl fence\n");
> -			goto err_free;
> -		}
> -		if (i != 1 || fences[0] != f) {
> -			pr_err("get_fences returned unexpected shared fence\n");
> -			goto err_free;
> -		}
> -	} else {
> -		if (excl != f) {
> -			pr_err("get_fences returned unexpected excl fence\n");
> -			goto err_free;
> -		}
> -		if (i != 0) {
> -			pr_err("get_fences returned unexpected shared fence\n");
> -			goto err_free;
> -		}
> +	if (i != 1 || fences[0] != f) {
> +		pr_err("get_fences returned unexpected fence\n");
> +		goto err_free;
>  	}
>  
>  	dma_fence_signal(f);
>  err_free:
> -	dma_fence_put(excl);
>  	while (i--)
>  		dma_fence_put(fences[i]);
>  	kfree(fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> index 68108f151dad..d17e1c911689 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
> @@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
>  		goto unpin;
>  	}
>  
> -	r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
> -				&work->shared_count, &work->shared);
> +	/* TODO: Unify this with other drivers */
> +	r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
> +				&work->shared_count,
> +				&work->shared);
>  	if (unlikely(r != 0)) {
>  		DRM_ERROR("failed to get fences for buffer\n");
>  		goto unpin;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b7fb72bff2c1..be48487e2ca7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  	unsigned count;
>  	int r;
>  
> -	r = dma_resv_get_fences(resv, NULL, &count, &fences);
> +	r = dma_resv_get_fences(resv, true, &count, &fences);
>  	if (r)
>  		goto fallback;
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index b5e8ce86dbe7..64c90ff348f2 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  			continue;
>  
>  		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -			ret = dma_resv_get_fences(robj, NULL,
> -						  &bo->nr_shared,
> +			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
>  						  &bo->shared);
>  			if (ret)
>  				return ret;
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 3baf2a4a9a0d..fa2002939b19 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -436,8 +436,8 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> -			unsigned *pshared_count, struct dma_fence ***pshared);
> +int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +			unsigned int *num_fences, struct dma_fence ***fences);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>  long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout);
> -- 
> 2.25.1
> 


Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2021-12-07 12:33 ` [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2 Christian König
@ 2021-12-22 21:21     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:21 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
> Add a function to simplify getting a single fence for all the fences in
> the dma_resv object.
> 
> v2: fix ref leak in error handling
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>  include/linux/dma-resv.h   |  2 ++
>  2 files changed, 54 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 480c305554a1..694716a3d66d 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -34,6 +34,7 @@
>   */
>  
>  #include <linux/dma-resv.h>
> +#include <linux/dma-fence-array.h>
>  #include <linux/export.h>
>  #include <linux/mm.h>
>  #include <linux/sched/mm.h>
> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>  
> +/**
> + * dma_resv_get_singleton - Get a single fence for all the fences
> + * @obj: the reservation object
> + * @write: true if we should return all fences
> + * @fence: the resulting fence
> + *
> + * Get a single fence representing all the fences inside the resv object.
> + * Returns either 0 for success or -ENOMEM.
> + *
> + * Warning: This can't be used like this when adding the fence back to the resv
> + * object since that can lead to stack corruption when finalizing the
> + * dma_fence_array.

Uh I don't get this one? I thought the only problem with nested fences is
the signalling recursion, which we work around with the irq_work?

Also if there's really an issue with dma_fence_array fences, then that
warning should be on the dma_resv kerneldoc, not somewhere hidden like
this. And finally I really don't see what can go wrong, sure we'll end up
with the same fence once in the dma_resv_list and then once more in the
fence array. But they're all refcounted, so really shouldn't matter.

The code itself looks correct, but me not understanding what even goes
wrong here freaks me out a bit.

I guess something to figure out next year, I kinda hoped I could squeeze a
review in before I disappear :-/
-Daniel

> + */
> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +			   struct dma_fence **fence)
> +{
> +	struct dma_fence_array *array;
> +	struct dma_fence **fences;
> +	unsigned count;
> +	int r;
> +
> +	r = dma_resv_get_fences(obj, write, &count, &fences);
> +        if (r)
> +		return r;
> +
> +	if (count == 0) {
> +		*fence = NULL;
> +		return 0;
> +	}
> +
> +	if (count == 1) {
> +		*fence = fences[0];
> +		kfree(fences);
> +		return 0;
> +	}
> +
> +	array = dma_fence_array_create(count, fences,
> +				       dma_fence_context_alloc(1),
> +				       1, false);
> +	if (!array) {
> +		while (count--)
> +			dma_fence_put(fences[count]);
> +		kfree(fences);
> +		return -ENOMEM;
> +	}
> +
> +	*fence = &array->base;
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
> +
>  /**
>   * dma_resv_wait_timeout - Wait on reservation's objects
>   * shared and/or exclusive fences.
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index fa2002939b19..cdfbbda6f600 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>  int dma_resv_get_fences(struct dma_resv *obj, bool write,
>  			unsigned int *num_fences, struct dma_fence ***fences);
> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +			   struct dma_fence **fence);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>  long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
@ 2021-12-22 21:21     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:21 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
> Add a function to simplify getting a single fence for all the fences in
> the dma_resv object.
> 
> v2: fix ref leak in error handling
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>  include/linux/dma-resv.h   |  2 ++
>  2 files changed, 54 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 480c305554a1..694716a3d66d 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -34,6 +34,7 @@
>   */
>  
>  #include <linux/dma-resv.h>
> +#include <linux/dma-fence-array.h>
>  #include <linux/export.h>
>  #include <linux/mm.h>
>  #include <linux/sched/mm.h>
> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>  
> +/**
> + * dma_resv_get_singleton - Get a single fence for all the fences
> + * @obj: the reservation object
> + * @write: true if we should return all fences
> + * @fence: the resulting fence
> + *
> + * Get a single fence representing all the fences inside the resv object.
> + * Returns either 0 for success or -ENOMEM.
> + *
> + * Warning: This can't be used like this when adding the fence back to the resv
> + * object since that can lead to stack corruption when finalizing the
> + * dma_fence_array.

Uh I don't get this one? I thought the only problem with nested fences is
the signalling recursion, which we work around with the irq_work?

Also if there's really an issue with dma_fence_array fences, then that
warning should be on the dma_resv kerneldoc, not somewhere hidden like
this. And finally I really don't see what can go wrong, sure we'll end up
with the same fence once in the dma_resv_list and then once more in the
fence array. But they're all refcounted, so really shouldn't matter.

The code itself looks correct, but me not understanding what even goes
wrong here freaks me out a bit.

I guess something to figure out next year, I kinda hoped I could squeeze a
review in before I disappear :-/
-Daniel

> + */
> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +			   struct dma_fence **fence)
> +{
> +	struct dma_fence_array *array;
> +	struct dma_fence **fences;
> +	unsigned count;
> +	int r;
> +
> +	r = dma_resv_get_fences(obj, write, &count, &fences);
> +        if (r)
> +		return r;
> +
> +	if (count == 0) {
> +		*fence = NULL;
> +		return 0;
> +	}
> +
> +	if (count == 1) {
> +		*fence = fences[0];
> +		kfree(fences);
> +		return 0;
> +	}
> +
> +	array = dma_fence_array_create(count, fences,
> +				       dma_fence_context_alloc(1),
> +				       1, false);
> +	if (!array) {
> +		while (count--)
> +			dma_fence_put(fences[count]);
> +		kfree(fences);
> +		return -ENOMEM;
> +	}
> +
> +	*fence = &array->base;
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
> +
>  /**
>   * dma_resv_wait_timeout - Wait on reservation's objects
>   * shared and/or exclusive fences.
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index fa2002939b19..cdfbbda6f600 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>  int dma_resv_get_fences(struct dma_resv *obj, bool write,
>  			unsigned int *num_fences, struct dma_fence ***fences);
> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +			   struct dma_fence **fence);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>  long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence
  2021-12-07 12:33 ` [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence Christian König
@ 2021-12-22 21:23     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:23 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:52PM +0100, Christian König wrote:
> Use dma_resv_wait() instead of extracting the exclusive fence and
> waiting on it manually.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

No rdma lists nor maintainers on cc, so no chances to get the ack you need
to merge this through drm-misc-next.


> ---
>  drivers/infiniband/core/umem_dmabuf.c | 8 ++------
>  1 file changed, 2 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
> index f0760741f281..d32cd7538835 100644
> --- a/drivers/infiniband/core/umem_dmabuf.c
> +++ b/drivers/infiniband/core/umem_dmabuf.c
> @@ -16,7 +16,6 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
>  {
>  	struct sg_table *sgt;
>  	struct scatterlist *sg;
> -	struct dma_fence *fence;
>  	unsigned long start, end, cur = 0;
>  	unsigned int nmap = 0;
>  	int i;
> @@ -68,11 +67,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
>  	 * may be not up-to-date. Wait for the exporter to finish
>  	 * the migration.
>  	 */
> -	fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
> -	if (fence)
> -		return dma_fence_wait(fence, false);
> -
> -	return 0;
> +	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
> +				     false, MAX_SCHEDULE_TIMEOUT);

I think a wrapper for dma_resv_wait() without timeout would be neat, which
we lack. Either way:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  }
>  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence
@ 2021-12-22 21:23     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:23 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:52PM +0100, Christian König wrote:
> Use dma_resv_wait() instead of extracting the exclusive fence and
> waiting on it manually.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

No rdma lists nor maintainers on cc, so no chances to get the ack you need
to merge this through drm-misc-next.


> ---
>  drivers/infiniband/core/umem_dmabuf.c | 8 ++------
>  1 file changed, 2 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
> index f0760741f281..d32cd7538835 100644
> --- a/drivers/infiniband/core/umem_dmabuf.c
> +++ b/drivers/infiniband/core/umem_dmabuf.c
> @@ -16,7 +16,6 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
>  {
>  	struct sg_table *sgt;
>  	struct scatterlist *sg;
> -	struct dma_fence *fence;
>  	unsigned long start, end, cur = 0;
>  	unsigned int nmap = 0;
>  	int i;
> @@ -68,11 +67,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
>  	 * may be not up-to-date. Wait for the exporter to finish
>  	 * the migration.
>  	 */
> -	fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
> -	if (fence)
> -		return dma_fence_wait(fence, false);
> -
> -	return 0;
> +	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
> +				     false, MAX_SCHEDULE_TIMEOUT);

I think a wrapper for dma_resv_wait() without timeout would be neat, which
we lack. Either way:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  }
>  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence
  2021-12-07 12:33 ` [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence Christian König
@ 2021-12-22 21:26     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:26 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:53PM +0100, Christian König wrote:
> We can get the excl fence together with the shared ones as well.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Pls cc driver maintainers.

dim add-missing-cc

is your friend if you're lazy can even combine that with git rebase -x.
Same for all the other driver patches, some acks/testing would be good to
avoid fallout (we had a bit much of that with all these I think).

> ---
>  drivers/gpu/drm/etnaviv/etnaviv_gem.h        |  1 -
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 14 +++++---------
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 10 ----------
>  3 files changed, 5 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index 98e60df882b6..f596d743baa3 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -80,7 +80,6 @@ struct etnaviv_gem_submit_bo {
>  	u64 va;
>  	struct etnaviv_gem_object *obj;
>  	struct etnaviv_vram_mapping *mapping;
> -	struct dma_fence *excl;
>  	unsigned int nr_shared;
>  	struct dma_fence **shared;
>  };
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 64c90ff348f2..4286dc93fdaa 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -188,15 +188,11 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
>  			continue;
>  
> -		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
> -						  &bo->shared);
> -			if (ret)
> -				return ret;
> -		} else {
> -			bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
> -		}
> -
> +		ret = dma_resv_get_fences(robj,
> +					  !!(bo->flags & ETNA_SUBMIT_BO_WRITE),

Afaik the cast to bool !! here is overkill, compiler will do that for you
or something like that. With that dropped:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>


> +					  &bo->nr_shared, &bo->shared);
> +		if (ret)
> +			return ret;
>  	}
>  
>  	return ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 180bb633d5c5..8c038a363d15 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -39,16 +39,6 @@ etnaviv_sched_dependency(struct drm_sched_job *sched_job,
>  		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
>  		int j;
>  
> -		if (bo->excl) {
> -			fence = bo->excl;
> -			bo->excl = NULL;
> -
> -			if (!dma_fence_is_signaled(fence))
> -				return fence;
> -
> -			dma_fence_put(fence);
> -		}
> -
>  		for (j = 0; j < bo->nr_shared; j++) {
>  			if (!bo->shared[j])
>  				continue;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence
@ 2021-12-22 21:26     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:26 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:53PM +0100, Christian König wrote:
> We can get the excl fence together with the shared ones as well.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Pls cc driver maintainers.

dim add-missing-cc

is your friend if you're lazy can even combine that with git rebase -x.
Same for all the other driver patches, some acks/testing would be good to
avoid fallout (we had a bit much of that with all these I think).

> ---
>  drivers/gpu/drm/etnaviv/etnaviv_gem.h        |  1 -
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 14 +++++---------
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 10 ----------
>  3 files changed, 5 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index 98e60df882b6..f596d743baa3 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -80,7 +80,6 @@ struct etnaviv_gem_submit_bo {
>  	u64 va;
>  	struct etnaviv_gem_object *obj;
>  	struct etnaviv_vram_mapping *mapping;
> -	struct dma_fence *excl;
>  	unsigned int nr_shared;
>  	struct dma_fence **shared;
>  };
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 64c90ff348f2..4286dc93fdaa 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -188,15 +188,11 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
>  			continue;
>  
> -		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
> -			ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
> -						  &bo->shared);
> -			if (ret)
> -				return ret;
> -		} else {
> -			bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
> -		}
> -
> +		ret = dma_resv_get_fences(robj,
> +					  !!(bo->flags & ETNA_SUBMIT_BO_WRITE),

Afaik the cast to bool !! here is overkill, compiler will do that for you
or something like that. With that dropped:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>


> +					  &bo->nr_shared, &bo->shared);
> +		if (ret)
> +			return ret;
>  	}
>  
>  	return ret;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 180bb633d5c5..8c038a363d15 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -39,16 +39,6 @@ etnaviv_sched_dependency(struct drm_sched_job *sched_job,
>  		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
>  		int j;
>  
> -		if (bo->excl) {
> -			fence = bo->excl;
> -			bo->excl = NULL;
> -
> -			if (!dma_fence_is_signaled(fence))
> -				return fence;
> -
> -			dma_fence_put(fence);
> -		}
> -
>  		for (j = 0; j < bo->nr_shared; j++) {
>  			if (!bo->shared[j])
>  				continue;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 07/24] drm/nouveau: stop using dma_resv_excl_fence
  2021-12-07 12:33 ` [PATCH 07/24] drm/nouveau: " Christian König
@ 2021-12-22 21:26     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:26 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:54PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/nouveau/nouveau_bo.c | 9 ++++++++-
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index fa73fe57f97b..74f8652d2bd3 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -959,7 +959,14 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
>  {
>  	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
>  	struct drm_device *dev = drm->dev;
> -	struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
> +	struct dma_fence *fence;
> +	int ret;
> +
> +	/* TODO: This is actually a memory management dependency */
> +	ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
> +	if (ret)
> +		dma_resv_wait_timeout(bo->base.resv, false, false,
> +				      MAX_SCHEDULE_TIMEOUT);

Needs ack from nouveau folks.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  
>  	nv10_bo_put_tile_region(dev, *old_tile, fence);
>  	*old_tile = new_tile;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 07/24] drm/nouveau: stop using dma_resv_excl_fence
@ 2021-12-22 21:26     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:26 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:54PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/nouveau/nouveau_bo.c | 9 ++++++++-
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index fa73fe57f97b..74f8652d2bd3 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -959,7 +959,14 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
>  {
>  	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
>  	struct drm_device *dev = drm->dev;
> -	struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
> +	struct dma_fence *fence;
> +	int ret;
> +
> +	/* TODO: This is actually a memory management dependency */
> +	ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
> +	if (ret)
> +		dma_resv_wait_timeout(bo->base.resv, false, false,
> +				      MAX_SCHEDULE_TIMEOUT);

Needs ack from nouveau folks.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  
>  	nv10_bo_put_tile_region(dev, *old_tile, fence);
>  	*old_tile = new_tile;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 09/24] drm/radeon: stop using dma_resv_excl_fence
  2021-12-07 12:33 ` [PATCH 09/24] drm/radeon: " Christian König
@ 2021-12-22 21:30     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:30 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:56PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/radeon/radeon_display.c | 7 ++++++-
>  1 file changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
> index 573154268d43..a6f875118f01 100644
> --- a/drivers/gpu/drm/radeon/radeon_display.c
> +++ b/drivers/gpu/drm/radeon/radeon_display.c
> @@ -533,7 +533,12 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
>  		DRM_ERROR("failed to pin new rbo buffer before flip\n");
>  		goto cleanup;
>  	}
> -	work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
> +	r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
> +	if (r) {
> +		radeon_bo_unreserve(new_rbo);
> +		DRM_ERROR("failed to get new rbo buffer fences\n");
> +		goto cleanup;
> +	}

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
>  	radeon_bo_unreserve(new_rbo);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 09/24] drm/radeon: stop using dma_resv_excl_fence
@ 2021-12-22 21:30     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:30 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:56PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/radeon/radeon_display.c | 7 ++++++-
>  1 file changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
> index 573154268d43..a6f875118f01 100644
> --- a/drivers/gpu/drm/radeon/radeon_display.c
> +++ b/drivers/gpu/drm/radeon/radeon_display.c
> @@ -533,7 +533,12 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
>  		DRM_ERROR("failed to pin new rbo buffer before flip\n");
>  		goto cleanup;
>  	}
> -	work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
> +	r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
> +	if (r) {
> +		radeon_bo_unreserve(new_rbo);
> +		DRM_ERROR("failed to get new rbo buffer fences\n");
> +		goto cleanup;
> +	}

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
>  	radeon_bo_unreserve(new_rbo);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 08/24] drm/vmwgfx: stop using dma_resv_excl_fence
  2021-12-07 12:33 ` [PATCH 08/24] drm/vmwgfx: " Christian König
@ 2021-12-22 21:31     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:31 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:55PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ++++--
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 8d1e869cc196..23c3fc2cbf10 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -1168,8 +1168,10 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
>  		vmw_bo_fence_single(bo, NULL);
>  		if (bo->moving)
>  			dma_fence_put(bo->moving);
> -		bo->moving = dma_fence_get
> -			(dma_resv_excl_fence(bo->base.resv));
> +
> +		/* TODO: This is actually a memory management dependency */
> +		return dma_resv_get_singleton(bo->base.resv, false,
> +					      &bo->moving);

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  	}
>  
>  	return 0;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 08/24] drm/vmwgfx: stop using dma_resv_excl_fence
@ 2021-12-22 21:31     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:31 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:55PM +0100, Christian König wrote:
> Instead use the new dma_resv_get_singleton function.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ++++--
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 8d1e869cc196..23c3fc2cbf10 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -1168,8 +1168,10 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
>  		vmw_bo_fence_single(bo, NULL);
>  		if (bo->moving)
>  			dma_fence_put(bo->moving);
> -		bo->moving = dma_fence_get
> -			(dma_resv_excl_fence(bo->base.resv));
> +
> +		/* TODO: This is actually a memory management dependency */
> +		return dma_resv_get_singleton(bo->base.resv, false,
> +					      &bo->moving);

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  	}
>  
>  	return 0;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds
  2021-12-07 12:33 ` [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds Christian König
@ 2021-12-22 21:34     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:34 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:57PM +0100, Christian König wrote:
> This was added because of the now dropped shared on excl dependency.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

I didn't do a full re-audit of whether you got them all, I think latest
with the semantic change to allow more kinds of fence types with dma-resv
we should catch them all.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 5 +----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ------
>  2 files changed, 1 insertion(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 0311d799a010..53e407ea4c89 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1275,14 +1275,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  		/*
>  		 * Work around dma_resv shortcommings by wrapping up the
>  		 * submission in a dma_fence_chain and add it as exclusive
> -		 * fence, but first add the submission as shared fence to make
> -		 * sure that shared fences never signal before the exclusive
> -		 * one.
> +		 * fence.
>  		 */
>  		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
>  				     dma_fence_get(p->fence), 1);
>  
> -		dma_resv_add_shared_fence(resv, p->fence);
>  		rcu_assign_pointer(resv->fence_excl, &chain->base);
>  		e->chain = NULL;
>  	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index a1e63ba4c54a..85d31d85c384 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -226,12 +226,6 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
>  	if (!amdgpu_vm_ready(vm))
>  		goto out_unlock;
>  
> -	fence = dma_resv_excl_fence(bo->tbo.base.resv);
> -	if (fence) {
> -		amdgpu_bo_fence(bo, fence, true);
> -		fence = NULL;
> -	}
> -
>  	r = amdgpu_vm_clear_freed(adev, vm, &fence);
>  	if (r || !fence)
>  		goto out_unlock;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds
@ 2021-12-22 21:34     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:34 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:57PM +0100, Christian König wrote:
> This was added because of the now dropped shared on excl dependency.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

I didn't do a full re-audit of whether you got them all, I think latest
with the semantic change to allow more kinds of fence types with dma-resv
we should catch them all.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 5 +----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ------
>  2 files changed, 1 insertion(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 0311d799a010..53e407ea4c89 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1275,14 +1275,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  		/*
>  		 * Work around dma_resv shortcommings by wrapping up the
>  		 * submission in a dma_fence_chain and add it as exclusive
> -		 * fence, but first add the submission as shared fence to make
> -		 * sure that shared fences never signal before the exclusive
> -		 * one.
> +		 * fence.
>  		 */
>  		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
>  				     dma_fence_get(p->fence), 1);
>  
> -		dma_resv_add_shared_fence(resv, p->fence);
>  		rcu_assign_pointer(resv->fence_excl, &chain->base);
>  		e->chain = NULL;
>  	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index a1e63ba4c54a..85d31d85c384 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -226,12 +226,6 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
>  	if (!amdgpu_vm_ready(vm))
>  		goto out_unlock;
>  
> -	fence = dma_resv_excl_fence(bo->tbo.base.resv);
> -	if (fence) {
> -		amdgpu_bo_fence(bo, fence, true);
> -		fence = NULL;
> -	}
> -
>  	r = amdgpu_vm_clear_freed(adev, vm, &fence);
>  	if (r || !fence)
>  		goto out_unlock;
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround
  2021-12-07 12:33 ` [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround Christian König
@ 2021-12-22 21:37     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:37 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:58PM +0100, Christian König wrote:
> Get the write fence using dma_resv_for_each_fence instead of accessing
> it manually.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 53e407ea4c89..7facd614e50a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1268,6 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
>  		struct dma_resv *resv = e->tv.bo->base.resv;
>  		struct dma_fence_chain *chain = e->chain;
> +		struct dma_resv_iter cursor;
> +		struct dma_fence *fence;
>  
>  		if (!chain)
>  			continue;
> @@ -1277,9 +1279,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  		 * submission in a dma_fence_chain and add it as exclusive
>  		 * fence.
>  		 */
> -		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
> -				     dma_fence_get(p->fence), 1);
> -
> +		dma_resv_for_each_fence(&cursor, resv, false, fence) {
> +			break;
> +		}
> +		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);

Uh this needs a TODO. I'm assuming you'll fix this up later on when
there's more than write fence, but in case of bisect or whatever this is a
bit too clever. Like you just replace one "dig around in dma-resv
implementation details" with one that's not even a documented interface
:-)

With an adequately loud comment added interim:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  		rcu_assign_pointer(resv->fence_excl, &chain->base);
>  		e->chain = NULL;
>  	}
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround
@ 2021-12-22 21:37     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:37 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:58PM +0100, Christian König wrote:
> Get the write fence using dma_resv_for_each_fence instead of accessing
> it manually.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 53e407ea4c89..7facd614e50a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1268,6 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
>  		struct dma_resv *resv = e->tv.bo->base.resv;
>  		struct dma_fence_chain *chain = e->chain;
> +		struct dma_resv_iter cursor;
> +		struct dma_fence *fence;
>  
>  		if (!chain)
>  			continue;
> @@ -1277,9 +1279,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  		 * submission in a dma_fence_chain and add it as exclusive
>  		 * fence.
>  		 */
> -		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
> -				     dma_fence_get(p->fence), 1);
> -
> +		dma_resv_for_each_fence(&cursor, resv, false, fence) {
> +			break;
> +		}
> +		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);

Uh this needs a TODO. I'm assuming you'll fix this up later on when
there's more than write fence, but in case of bisect or whatever this is a
bit too clever. Like you just replace one "dig around in dma-resv
implementation details" with one that's not even a documented interface
:-)

With an adequately loud comment added interim:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  		rcu_assign_pointer(resv->fence_excl, &chain->base);
>  		e->chain = NULL;
>  	}
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private
  2021-12-07 12:33 ` [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private Christian König
@ 2021-12-22 21:39     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:39 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:33:59PM +0100, Christian König wrote:
> Drivers should never touch this directly.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 17 +++++++++++++++++
>  include/linux/dma-resv.h   | 17 -----------------
>  2 files changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 694716a3d66d..9acceabc9399 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -147,6 +147,23 @@ void dma_resv_fini(struct dma_resv *obj)
>  }
>  EXPORT_SYMBOL(dma_resv_fini);
>  
> +/**
> + * dma_resv_excl_fence - return the object's exclusive fence
> + * @obj: the reservation object
> + *
> + * Returns the exclusive fence (if any). Caller must either hold the objects
> + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> + * or one of the variants of each
> + *
> + * RETURNS
> + * The exclusive fence or NULL
> + */

Same thing with us not documenting internals, pls drop the comment
outright it doesn't really explain anything. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +static inline struct dma_fence *
> +dma_resv_excl_fence(struct dma_resv *obj)
> +{
> +	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
> +}
> +
>  /**
>   * dma_resv_shared_list - get the reservation object's shared fence list
>   * @obj: the reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index cdfbbda6f600..40ac9d486f8f 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -412,23 +412,6 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>  	ww_mutex_unlock(&obj->lock);
>  }
>  
> -/**
> - * dma_resv_excl_fence - return the object's exclusive fence
> - * @obj: the reservation object
> - *
> - * Returns the exclusive fence (if any). Caller must either hold the objects
> - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> - * or one of the variants of each
> - *
> - * RETURNS
> - * The exclusive fence or NULL
> - */
> -static inline struct dma_fence *
> -dma_resv_excl_fence(struct dma_resv *obj)
> -{
> -	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
> -}
> -
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private
@ 2021-12-22 21:39     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:39 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:33:59PM +0100, Christian König wrote:
> Drivers should never touch this directly.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 17 +++++++++++++++++
>  include/linux/dma-resv.h   | 17 -----------------
>  2 files changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 694716a3d66d..9acceabc9399 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -147,6 +147,23 @@ void dma_resv_fini(struct dma_resv *obj)
>  }
>  EXPORT_SYMBOL(dma_resv_fini);
>  
> +/**
> + * dma_resv_excl_fence - return the object's exclusive fence
> + * @obj: the reservation object
> + *
> + * Returns the exclusive fence (if any). Caller must either hold the objects
> + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> + * or one of the variants of each
> + *
> + * RETURNS
> + * The exclusive fence or NULL
> + */

Same thing with us not documenting internals, pls drop the comment
outright it doesn't really explain anything. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +static inline struct dma_fence *
> +dma_resv_excl_fence(struct dma_resv *obj)
> +{
> +	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
> +}
> +
>  /**
>   * dma_resv_shared_list - get the reservation object's shared fence list
>   * @obj: the reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index cdfbbda6f600..40ac9d486f8f 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -412,23 +412,6 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>  	ww_mutex_unlock(&obj->lock);
>  }
>  
> -/**
> - * dma_resv_excl_fence - return the object's exclusive fence
> - * @obj: the reservation object
> - *
> - * Returns the exclusive fence (if any). Caller must either hold the objects
> - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
> - * or one of the variants of each
> - *
> - * RETURNS
> - * The exclusive fence or NULL
> - */
> -static inline struct dma_fence *
> -dma_resv_excl_fence(struct dma_resv *obj)
> -{
> -	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
> -}
> -
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
  2021-12-07 12:34 ` [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object Christian König
@ 2021-12-22 21:43     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:43 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
> So far we had the approach of using a directed acyclic
> graph with the dma_resv obj.
> 
> This turned out to have many downsides, especially it means
> that every single driver and user of this interface needs
> to be aware of this restriction when adding fences. If the
> rules for the DAG are not followed then we end up with
> potential hard to debug memory corruption, information
> leaks or even elephant big security holes because we allow
> userspace to access freed up memory.
> 
> Since we already took a step back from that by always
> looking at all fences we now go a step further and stop
> dropping the shared fences when a new exclusive one is
> added.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 13 -------------
>  1 file changed, 13 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 9acceabc9399..ecb2ff606bac 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c

No doc update at all!

I checked, we're not that shitty with docs, Minimally the DOC: section
header and also the struct dma_resv kerneldoc. Also there's maybe more
references and stuff I've missed on a quick look, please check for them
(e.g. dma_buf.resv kerneldoc is rather important to keep correct too).

Code itself does what it says in the commit message, but we really need
the most accurate docs we can get for this stuff, or the confusion will
persist :-/

Cheers, Daniel

> @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  {
>  	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
> -	struct dma_resv_list *old;
> -	u32 i = 0;
>  
>  	dma_resv_assert_held(obj);
>  
> -	old = dma_resv_shared_list(obj);
> -	if (old)
> -		i = old->shared_count;
> -
>  	dma_fence_get(fence);
>  
>  	write_seqcount_begin(&obj->seq);
>  	/* write_seqcount_begin provides the necessary memory barrier */
>  	RCU_INIT_POINTER(obj->fence_excl, fence);
> -	if (old)
> -		old->shared_count = 0;
>  	write_seqcount_end(&obj->seq);
>  
> -	/* inplace update, no shared fences */
> -	while (i--)
> -		dma_fence_put(rcu_dereference_protected(old->shared[i],
> -						dma_resv_held(obj)));
> -
>  	dma_fence_put(old_fence);
>  }
>  EXPORT_SYMBOL(dma_resv_add_excl_fence);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
@ 2021-12-22 21:43     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:43 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
> So far we had the approach of using a directed acyclic
> graph with the dma_resv obj.
> 
> This turned out to have many downsides, especially it means
> that every single driver and user of this interface needs
> to be aware of this restriction when adding fences. If the
> rules for the DAG are not followed then we end up with
> potential hard to debug memory corruption, information
> leaks or even elephant big security holes because we allow
> userspace to access freed up memory.
> 
> Since we already took a step back from that by always
> looking at all fences we now go a step further and stop
> dropping the shared fences when a new exclusive one is
> added.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 13 -------------
>  1 file changed, 13 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 9acceabc9399..ecb2ff606bac 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c

No doc update at all!

I checked, we're not that shitty with docs, Minimally the DOC: section
header and also the struct dma_resv kerneldoc. Also there's maybe more
references and stuff I've missed on a quick look, please check for them
(e.g. dma_buf.resv kerneldoc is rather important to keep correct too).

Code itself does what it says in the commit message, but we really need
the most accurate docs we can get for this stuff, or the confusion will
persist :-/

Cheers, Daniel

> @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  {
>  	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
> -	struct dma_resv_list *old;
> -	u32 i = 0;
>  
>  	dma_resv_assert_held(obj);
>  
> -	old = dma_resv_shared_list(obj);
> -	if (old)
> -		i = old->shared_count;
> -
>  	dma_fence_get(fence);
>  
>  	write_seqcount_begin(&obj->seq);
>  	/* write_seqcount_begin provides the necessary memory barrier */
>  	RCU_INIT_POINTER(obj->fence_excl, fence);
> -	if (old)
> -		old->shared_count = 0;
>  	write_seqcount_end(&obj->seq);
>  
> -	/* inplace update, no shared fences */
> -	while (i--)
> -		dma_fence_put(rcu_dereference_protected(old->shared[i],
> -						dma_resv_held(obj)));
> -
>  	dma_fence_put(old_fence);
>  }
>  EXPORT_SYMBOL(dma_resv_add_excl_fence);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2
  2021-12-07 12:34 ` [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2 Christian König
@ 2021-12-22 21:50     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:50 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:01PM +0100, Christian König wrote:
> Audit all the users of dma_resv_add_excl_fence() and make sure they
> reserve a shared slot also when only trying to add an exclusive fence.
> 
> This is the next step towards handling the exclusive fence like a
> shared one.
> 
> v2: fix missed case in amdgpu
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Needs all the driver cc and also at least some acks/testing.

> ---
>  drivers/dma-buf/st-dma-resv.c                 | 64 +++++++++----------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  8 +++
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  8 +--
>  drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |  3 +-
>  .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  8 +--
>  .../drm/i915/gem/selftests/i915_gem_migrate.c |  5 +-
>  drivers/gpu/drm/i915/i915_vma.c               |  6 ++
>  .../drm/i915/selftests/intel_memory_region.c  |  7 ++
>  drivers/gpu/drm/lima/lima_gem.c               | 10 ++-
>  drivers/gpu/drm/msm/msm_gem_submit.c          | 18 +++---
>  drivers/gpu/drm/nouveau/nouveau_fence.c       |  9 +--
>  drivers/gpu/drm/panfrost/panfrost_job.c       |  4 ++
>  drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++-
>  drivers/gpu/drm/ttm/ttm_execbuf_util.c        | 11 ++--

vc4 seems missing?

Also I think I found one bug below in the conversions.
-Daniel


>  drivers/gpu/drm/v3d/v3d_gem.c                 | 15 +++--
>  drivers/gpu/drm/vgem/vgem_fence.c             | 12 ++--
>  drivers/gpu/drm/virtio/virtgpu_gem.c          |  9 +++
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            | 16 +++--
>  18 files changed, 133 insertions(+), 92 deletions(-)
> 
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> index cbe999c6e7a6..f33bafc78693 100644
> --- a/drivers/dma-buf/st-dma-resv.c
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -75,17 +75,16 @@ static int test_signaling(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			goto err_unlock;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		goto err_unlock;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  
>  	if (dma_resv_test_signaled(&resv, shared)) {
>  		pr_err("Resv unexpectedly signaled\n");
> @@ -134,17 +133,16 @@ static int test_for_each(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			goto err_unlock;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		goto err_unlock;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  
>  	r = -ENOENT;
>  	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
> @@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			dma_resv_unlock(&resv);
> -			goto err_free;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		dma_resv_unlock(&resv);
> +		goto err_free;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  	dma_resv_unlock(&resv);
>  
>  	r = -ENOENT;
> @@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared)
>  		goto err_resv;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			dma_resv_unlock(&resv);
> -			goto err_resv;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		dma_resv_unlock(&resv);
> +		goto err_resv;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  	dma_resv_unlock(&resv);
>  
>  	r = dma_resv_get_fences(&resv, shared, &i, &fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 4fcfc2313b8c..24a6b88afcca 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -1367,6 +1367,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
>  		     bool shared)
>  {
>  	struct dma_resv *resv = bo->tbo.base.resv;
> +	int r;
> +
> +	r = dma_resv_reserve_shared(resv, 1);
> +	if (r) {
> +		/* As last resort on OOM we block for the fence */
> +		dma_fence_wait(fence, false);
> +		return;
> +	}
>  
>  	if (shared)
>  		dma_resv_add_shared_fence(resv, fence);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 4286dc93fdaa..d4a7073190ec 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
>  		struct dma_resv *robj = bo->obj->base.resv;
>  
> -		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
> -			ret = dma_resv_reserve_shared(robj, 1);
> -			if (ret)
> -				return ret;
> -		}
> +		ret = dma_resv_reserve_shared(robj, 1);
> +		if (ret)
> +			return ret;
>  
>  		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
>  			continue;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> index f0435c6feb68..fc57ab914b60 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> @@ -100,7 +100,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
>  	trace_i915_gem_object_clflush(obj);
>  
>  	clflush = NULL;
> -	if (!(flags & I915_CLFLUSH_SYNC))
> +	if (!(flags & I915_CLFLUSH_SYNC) &&
> +	    dma_resv_reserve_shared(obj->base.resv, 1) == 0)
>  		clflush = clflush_work_create(obj);
>  	if (clflush) {
>  		i915_sw_fence_await_reservation(&clflush->base.chain,
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 4d7da07442f2..fc0e1625847c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -989,11 +989,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
>  			}
>  		}
>  
> -		if (!(ev->flags & EXEC_OBJECT_WRITE)) {
> -			err = dma_resv_reserve_shared(vma->resv, 1);
> -			if (err)
> -				return err;
> -		}
> +		err = dma_resv_reserve_shared(vma->resv, 1);
> +		if (err)
> +			return err;
>  
>  		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
>  			   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> index 28a700f08b49..2bf491fd5cdf 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> @@ -179,7 +179,10 @@ static int igt_lmem_pages_migrate(void *arg)
>  					  i915_gem_object_is_lmem(obj),
>  					  0xdeadbeaf, &rq);
>  		if (rq) {
> -			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
> +			err = dma_resv_reserve_shared(obj->base.resv, 1);
> +			if (!err)
> +				dma_resv_add_excl_fence(obj->base.resv,
> +							&rq->fence);
>  			i915_request_put(rq);
>  		}
>  		if (err)
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index bef795e265a6..5ec87de63963 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1255,6 +1255,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>  			intel_frontbuffer_put(front);
>  		}
>  
> +		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +			err = dma_resv_reserve_shared(vma->resv, 1);
> +			if (unlikely(err))
> +				return err;
> +		}
> +
>  		if (fence) {
>  			dma_resv_add_excl_fence(vma->resv, fence);
>  			obj->write_domain = I915_GEM_DOMAIN_RENDER;
> diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> index 418caae84759..b85af1672a7e 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> @@ -894,6 +894,13 @@ static int igt_lmem_write_cpu(void *arg)
>  	}
>  
>  	i915_gem_object_lock(obj, NULL);
> +
> +	err = dma_resv_reserve_shared(obj->base.resv, 1);
> +	if (err) {
> +		i915_gem_object_unlock(obj);
> +		goto out_put;
> +	}
> +
>  	/* Put the pages into a known state -- from the gpu for added fun */
>  	intel_engine_pm_get(engine);
>  	err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index f9a9198ef198..b4846007463f 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -255,13 +255,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
>  static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
>  			    bool write, bool explicit)
>  {
> -	int err = 0;
> +	int err;
>  
> -	if (!write) {
> -		err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
> -		if (err)
> -			return err;
> -	}
> +	err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
> +	if (err)
> +		return err;
>  
>  	/* explicit sync use user passed dep fence */
>  	if (explicit)
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
> index 3cb029f10925..e874d09b74ef 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
>  		struct drm_gem_object *obj = &submit->bos[i].obj->base;
>  		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
>  
> -		if (!write) {
> -			/* NOTE: _reserve_shared() must happen before
> -			 * _add_shared_fence(), which makes this a slightly
> -			 * strange place to call it.  OTOH this is a
> -			 * convenient can-fail point to hook it in.
> -			 */
> -			ret = dma_resv_reserve_shared(obj->resv, 1);
> -			if (ret)
> -				return ret;
> -		}
> +		/* NOTE: _reserve_shared() must happen before
> +		 * _add_shared_fence(), which makes this a slightly
> +		 * strange place to call it.  OTOH this is a
> +		 * convenient can-fail point to hook it in.
> +		 */
> +		ret = dma_resv_reserve_shared(obj->resv, 1);
> +		if (ret)
> +			return ret;
>  
>  		/* exclusive fences must be ordered */
>  		if (no_implicit && !write)
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index 26f9299df881..cd6715bd6d6b 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -349,12 +349,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
>  	struct nouveau_fence *f;
>  	int ret;
>  
> -	if (!exclusive) {
> -		ret = dma_resv_reserve_shared(resv, 1);
> -
> -		if (ret)
> -			return ret;
> -	}
> +	ret = dma_resv_reserve_shared(resv, 1);
> +	if (ret)
> +		return ret;
>  
>  	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
>  		struct nouveau_channel *prev = NULL;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 908d79520853..89c3fe389476 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
>  	int i, ret;
>  
>  	for (i = 0; i < bo_count; i++) {
> +		ret = dma_resv_reserve_shared(bos[i]->resv, 1);
> +		if (ret)
> +			return ret;
> +
>  		/* panfrost always uses write mode in its current uapi */
>  		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
>  							      true);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 72a94301bc95..ea9eabcc0a0c 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>  
>  	fbo->base = *bo;
>  
> -	ttm_bo_get(bo);
> -	fbo->bo = bo;
> -
>  	/**
>  	 * Fix up members that we shouldn't copy directly:
>  	 * TODO: Explicit member copy would probably be better here.
> @@ -246,6 +243,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>  	ret = dma_resv_trylock(&fbo->base.base._resv);
>  	WARN_ON(!ret);
>  
> +	ret = dma_resv_reserve_shared(&fbo->base.base._resv, 1);
> +	if (ret) {
> +		kfree(fbo);
> +		return ret;
> +	}
> +
> +	ttm_bo_get(bo);
> +	fbo->bo = bo;
> +
>  	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
>  
>  	*new_obj = &fbo->base;
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 071c48d672c6..5da922639d54 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  
>  	list_for_each_entry(entry, list, head) {
>  		struct ttm_buffer_object *bo = entry->bo;
> +		unsigned int num_fences;
>  
>  		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
>  		if (ret == -EALREADY && dups) {
> @@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  			continue;
>  		}
>  
> +		num_fences = min(entry->num_shared, 1u);
>  		if (!ret) {
> -			if (!entry->num_shared)
> -				continue;
> -
>  			ret = dma_resv_reserve_shared(bo->base.resv,
> -								entry->num_shared);
> +						      num_fences);

Needs to be at least one, otherwise you call this with 0 when we want to
install a write fence ang go boom?

>  			if (!ret)
>  				continue;
>  		}
> @@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
>  		}
>  
> -		if (!ret && entry->num_shared)
> +		if (!ret)
>  			ret = dma_resv_reserve_shared(bo->base.resv,
> -								entry->num_shared);
> +						      num_fences);
>  
>  		if (unlikely(ret != 0)) {
>  			if (ticket) {
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index c7ed2e1cbab6..1bea90e40ce1 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job,
>  		return ret;
>  
>  	for (i = 0; i < job->bo_count; i++) {
> +		ret = dma_resv_reserve_shared(job->bo[i]->resv, 1);
> +		if (ret)
> +			goto fail;
> +
>  		ret = drm_sched_job_add_implicit_dependencies(&job->base,
>  							      job->bo[i], true);
> -		if (ret) {
> -			drm_gem_unlock_reservations(job->bo, job->bo_count,
> -						    acquire_ctx);
> -			return ret;
> -		}
> +		if (ret)
> +			goto fail;
>  	}
>  
>  	return 0;
> +
> +fail:
> +	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
> +	return ret;
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> index bd6f75285fd9..a4cb296d4fcd 100644
> --- a/drivers/gpu/drm/vgem/vgem_fence.c
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
>  	}
>  
>  	/* Expose the fence via the dma-buf */
> -	ret = 0;
>  	dma_resv_lock(resv, NULL);
> -	if (arg->flags & VGEM_FENCE_WRITE)
> -		dma_resv_add_excl_fence(resv, fence);
> -	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
> -		dma_resv_add_shared_fence(resv, fence);
> +	ret = dma_resv_reserve_shared(resv, 1);
> +	if (!ret) {
> +		if (arg->flags & VGEM_FENCE_WRITE)
> +			dma_resv_add_excl_fence(resv, fence);
> +		else
> +			dma_resv_add_shared_fence(resv, fence);
> +	}
>  	dma_resv_unlock(resv);
>  
>  	/* Record the fence in our idr for later signaling */
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
> index 2de61b63ef91..aec105cdd64c 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_gem.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
>  
>  int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
>  {
> +	unsigned int i;
>  	int ret;
>  
>  	if (objs->nents == 1) {
> @@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
>  		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
>  						&objs->ticket);
>  	}
> +	if (ret)
> +		return ret;
> +
> +	for (i = 0; i < objs->nents; ++i) {
> +		ret = dma_resv_reserve_shared(objs->objs[i]->resv, 1);
> +		if (ret)
> +			return ret;
> +	}
>  	return ret;
>  }
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index c97a3d5e90ce..6d0abc2b0beb 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -1053,16 +1053,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
>  			 struct vmw_fence_obj *fence)
>  {
>  	struct ttm_device *bdev = bo->bdev;
> -
>  	struct vmw_private *dev_priv =
>  		container_of(bdev, struct vmw_private, bdev);
> +	int ret;
>  
> -	if (fence == NULL) {
> +	if (fence == NULL)
>  		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
> +	else
> +		dma_fence_get(&fence->base);
> +
> +	ret = dma_resv_reserve_shared(bo->base.resv, 1);
> +	if (!ret)
>  		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
> -		dma_fence_put(&fence->base);
> -	} else
> -		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
> +	else
> +		/* Last resort fallback when we are OOM */
> +		dma_fence_wait(&fence->base, false);
> +	dma_fence_put(&fence->base);
>  }
>  
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2
@ 2021-12-22 21:50     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:50 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:01PM +0100, Christian König wrote:
> Audit all the users of dma_resv_add_excl_fence() and make sure they
> reserve a shared slot also when only trying to add an exclusive fence.
> 
> This is the next step towards handling the exclusive fence like a
> shared one.
> 
> v2: fix missed case in amdgpu
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Needs all the driver cc and also at least some acks/testing.

> ---
>  drivers/dma-buf/st-dma-resv.c                 | 64 +++++++++----------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  8 +++
>  drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  |  8 +--
>  drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |  3 +-
>  .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  8 +--
>  .../drm/i915/gem/selftests/i915_gem_migrate.c |  5 +-
>  drivers/gpu/drm/i915/i915_vma.c               |  6 ++
>  .../drm/i915/selftests/intel_memory_region.c  |  7 ++
>  drivers/gpu/drm/lima/lima_gem.c               | 10 ++-
>  drivers/gpu/drm/msm/msm_gem_submit.c          | 18 +++---
>  drivers/gpu/drm/nouveau/nouveau_fence.c       |  9 +--
>  drivers/gpu/drm/panfrost/panfrost_job.c       |  4 ++
>  drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++-
>  drivers/gpu/drm/ttm/ttm_execbuf_util.c        | 11 ++--

vc4 seems missing?

Also I think I found one bug below in the conversions.
-Daniel


>  drivers/gpu/drm/v3d/v3d_gem.c                 | 15 +++--
>  drivers/gpu/drm/vgem/vgem_fence.c             | 12 ++--
>  drivers/gpu/drm/virtio/virtgpu_gem.c          |  9 +++
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            | 16 +++--
>  18 files changed, 133 insertions(+), 92 deletions(-)
> 
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> index cbe999c6e7a6..f33bafc78693 100644
> --- a/drivers/dma-buf/st-dma-resv.c
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -75,17 +75,16 @@ static int test_signaling(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			goto err_unlock;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		goto err_unlock;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  
>  	if (dma_resv_test_signaled(&resv, shared)) {
>  		pr_err("Resv unexpectedly signaled\n");
> @@ -134,17 +133,16 @@ static int test_for_each(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			goto err_unlock;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		goto err_unlock;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  
>  	r = -ENOENT;
>  	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
> @@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared)
>  		goto err_free;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			dma_resv_unlock(&resv);
> -			goto err_free;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		dma_resv_unlock(&resv);
> +		goto err_free;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  	dma_resv_unlock(&resv);
>  
>  	r = -ENOENT;
> @@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared)
>  		goto err_resv;
>  	}
>  
> -	if (shared) {
> -		r = dma_resv_reserve_shared(&resv, 1);
> -		if (r) {
> -			pr_err("Resv shared slot allocation failed\n");
> -			dma_resv_unlock(&resv);
> -			goto err_resv;
> -		}
> +	r = dma_resv_reserve_shared(&resv, 1);
> +	if (r) {
> +		pr_err("Resv shared slot allocation failed\n");
> +		dma_resv_unlock(&resv);
> +		goto err_resv;
> +	}
>  
> +	if (shared)
>  		dma_resv_add_shared_fence(&resv, f);
> -	} else {
> +	else
>  		dma_resv_add_excl_fence(&resv, f);
> -	}
>  	dma_resv_unlock(&resv);
>  
>  	r = dma_resv_get_fences(&resv, shared, &i, &fences);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 4fcfc2313b8c..24a6b88afcca 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -1367,6 +1367,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
>  		     bool shared)
>  {
>  	struct dma_resv *resv = bo->tbo.base.resv;
> +	int r;
> +
> +	r = dma_resv_reserve_shared(resv, 1);
> +	if (r) {
> +		/* As last resort on OOM we block for the fence */
> +		dma_fence_wait(fence, false);
> +		return;
> +	}
>  
>  	if (shared)
>  		dma_resv_add_shared_fence(resv, fence);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> index 4286dc93fdaa..d4a7073190ec 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
> @@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
>  		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
>  		struct dma_resv *robj = bo->obj->base.resv;
>  
> -		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
> -			ret = dma_resv_reserve_shared(robj, 1);
> -			if (ret)
> -				return ret;
> -		}
> +		ret = dma_resv_reserve_shared(robj, 1);
> +		if (ret)
> +			return ret;
>  
>  		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
>  			continue;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> index f0435c6feb68..fc57ab914b60 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> @@ -100,7 +100,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
>  	trace_i915_gem_object_clflush(obj);
>  
>  	clflush = NULL;
> -	if (!(flags & I915_CLFLUSH_SYNC))
> +	if (!(flags & I915_CLFLUSH_SYNC) &&
> +	    dma_resv_reserve_shared(obj->base.resv, 1) == 0)
>  		clflush = clflush_work_create(obj);
>  	if (clflush) {
>  		i915_sw_fence_await_reservation(&clflush->base.chain,
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 4d7da07442f2..fc0e1625847c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -989,11 +989,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
>  			}
>  		}
>  
> -		if (!(ev->flags & EXEC_OBJECT_WRITE)) {
> -			err = dma_resv_reserve_shared(vma->resv, 1);
> -			if (err)
> -				return err;
> -		}
> +		err = dma_resv_reserve_shared(vma->resv, 1);
> +		if (err)
> +			return err;
>  
>  		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
>  			   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> index 28a700f08b49..2bf491fd5cdf 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> @@ -179,7 +179,10 @@ static int igt_lmem_pages_migrate(void *arg)
>  					  i915_gem_object_is_lmem(obj),
>  					  0xdeadbeaf, &rq);
>  		if (rq) {
> -			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
> +			err = dma_resv_reserve_shared(obj->base.resv, 1);
> +			if (!err)
> +				dma_resv_add_excl_fence(obj->base.resv,
> +							&rq->fence);
>  			i915_request_put(rq);
>  		}
>  		if (err)
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index bef795e265a6..5ec87de63963 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1255,6 +1255,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>  			intel_frontbuffer_put(front);
>  		}
>  
> +		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +			err = dma_resv_reserve_shared(vma->resv, 1);
> +			if (unlikely(err))
> +				return err;
> +		}
> +
>  		if (fence) {
>  			dma_resv_add_excl_fence(vma->resv, fence);
>  			obj->write_domain = I915_GEM_DOMAIN_RENDER;
> diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> index 418caae84759..b85af1672a7e 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> @@ -894,6 +894,13 @@ static int igt_lmem_write_cpu(void *arg)
>  	}
>  
>  	i915_gem_object_lock(obj, NULL);
> +
> +	err = dma_resv_reserve_shared(obj->base.resv, 1);
> +	if (err) {
> +		i915_gem_object_unlock(obj);
> +		goto out_put;
> +	}
> +
>  	/* Put the pages into a known state -- from the gpu for added fun */
>  	intel_engine_pm_get(engine);
>  	err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index f9a9198ef198..b4846007463f 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -255,13 +255,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
>  static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
>  			    bool write, bool explicit)
>  {
> -	int err = 0;
> +	int err;
>  
> -	if (!write) {
> -		err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
> -		if (err)
> -			return err;
> -	}
> +	err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
> +	if (err)
> +		return err;
>  
>  	/* explicit sync use user passed dep fence */
>  	if (explicit)
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
> index 3cb029f10925..e874d09b74ef 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
>  		struct drm_gem_object *obj = &submit->bos[i].obj->base;
>  		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
>  
> -		if (!write) {
> -			/* NOTE: _reserve_shared() must happen before
> -			 * _add_shared_fence(), which makes this a slightly
> -			 * strange place to call it.  OTOH this is a
> -			 * convenient can-fail point to hook it in.
> -			 */
> -			ret = dma_resv_reserve_shared(obj->resv, 1);
> -			if (ret)
> -				return ret;
> -		}
> +		/* NOTE: _reserve_shared() must happen before
> +		 * _add_shared_fence(), which makes this a slightly
> +		 * strange place to call it.  OTOH this is a
> +		 * convenient can-fail point to hook it in.
> +		 */
> +		ret = dma_resv_reserve_shared(obj->resv, 1);
> +		if (ret)
> +			return ret;
>  
>  		/* exclusive fences must be ordered */
>  		if (no_implicit && !write)
> diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
> index 26f9299df881..cd6715bd6d6b 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
> @@ -349,12 +349,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
>  	struct nouveau_fence *f;
>  	int ret;
>  
> -	if (!exclusive) {
> -		ret = dma_resv_reserve_shared(resv, 1);
> -
> -		if (ret)
> -			return ret;
> -	}
> +	ret = dma_resv_reserve_shared(resv, 1);
> +	if (ret)
> +		return ret;
>  
>  	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
>  		struct nouveau_channel *prev = NULL;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 908d79520853..89c3fe389476 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
>  	int i, ret;
>  
>  	for (i = 0; i < bo_count; i++) {
> +		ret = dma_resv_reserve_shared(bos[i]->resv, 1);
> +		if (ret)
> +			return ret;
> +
>  		/* panfrost always uses write mode in its current uapi */
>  		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
>  							      true);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 72a94301bc95..ea9eabcc0a0c 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>  
>  	fbo->base = *bo;
>  
> -	ttm_bo_get(bo);
> -	fbo->bo = bo;
> -
>  	/**
>  	 * Fix up members that we shouldn't copy directly:
>  	 * TODO: Explicit member copy would probably be better here.
> @@ -246,6 +243,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>  	ret = dma_resv_trylock(&fbo->base.base._resv);
>  	WARN_ON(!ret);
>  
> +	ret = dma_resv_reserve_shared(&fbo->base.base._resv, 1);
> +	if (ret) {
> +		kfree(fbo);
> +		return ret;
> +	}
> +
> +	ttm_bo_get(bo);
> +	fbo->bo = bo;
> +
>  	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
>  
>  	*new_obj = &fbo->base;
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 071c48d672c6..5da922639d54 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  
>  	list_for_each_entry(entry, list, head) {
>  		struct ttm_buffer_object *bo = entry->bo;
> +		unsigned int num_fences;
>  
>  		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
>  		if (ret == -EALREADY && dups) {
> @@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  			continue;
>  		}
>  
> +		num_fences = min(entry->num_shared, 1u);
>  		if (!ret) {
> -			if (!entry->num_shared)
> -				continue;
> -
>  			ret = dma_resv_reserve_shared(bo->base.resv,
> -								entry->num_shared);
> +						      num_fences);

Needs to be at least one, otherwise you call this with 0 when we want to
install a write fence ang go boom?

>  			if (!ret)
>  				continue;
>  		}
> @@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>  			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
>  		}
>  
> -		if (!ret && entry->num_shared)
> +		if (!ret)
>  			ret = dma_resv_reserve_shared(bo->base.resv,
> -								entry->num_shared);
> +						      num_fences);
>  
>  		if (unlikely(ret != 0)) {
>  			if (ticket) {
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index c7ed2e1cbab6..1bea90e40ce1 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job,
>  		return ret;
>  
>  	for (i = 0; i < job->bo_count; i++) {
> +		ret = dma_resv_reserve_shared(job->bo[i]->resv, 1);
> +		if (ret)
> +			goto fail;
> +
>  		ret = drm_sched_job_add_implicit_dependencies(&job->base,
>  							      job->bo[i], true);
> -		if (ret) {
> -			drm_gem_unlock_reservations(job->bo, job->bo_count,
> -						    acquire_ctx);
> -			return ret;
> -		}
> +		if (ret)
> +			goto fail;
>  	}
>  
>  	return 0;
> +
> +fail:
> +	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
> +	return ret;
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> index bd6f75285fd9..a4cb296d4fcd 100644
> --- a/drivers/gpu/drm/vgem/vgem_fence.c
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
>  	}
>  
>  	/* Expose the fence via the dma-buf */
> -	ret = 0;
>  	dma_resv_lock(resv, NULL);
> -	if (arg->flags & VGEM_FENCE_WRITE)
> -		dma_resv_add_excl_fence(resv, fence);
> -	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
> -		dma_resv_add_shared_fence(resv, fence);
> +	ret = dma_resv_reserve_shared(resv, 1);
> +	if (!ret) {
> +		if (arg->flags & VGEM_FENCE_WRITE)
> +			dma_resv_add_excl_fence(resv, fence);
> +		else
> +			dma_resv_add_shared_fence(resv, fence);
> +	}
>  	dma_resv_unlock(resv);
>  
>  	/* Record the fence in our idr for later signaling */
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
> index 2de61b63ef91..aec105cdd64c 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_gem.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
>  
>  int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
>  {
> +	unsigned int i;
>  	int ret;
>  
>  	if (objs->nents == 1) {
> @@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
>  		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
>  						&objs->ticket);
>  	}
> +	if (ret)
> +		return ret;
> +
> +	for (i = 0; i < objs->nents; ++i) {
> +		ret = dma_resv_reserve_shared(objs->objs[i]->resv, 1);
> +		if (ret)
> +			return ret;
> +	}
>  	return ret;
>  }
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index c97a3d5e90ce..6d0abc2b0beb 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -1053,16 +1053,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
>  			 struct vmw_fence_obj *fence)
>  {
>  	struct ttm_device *bdev = bo->bdev;
> -
>  	struct vmw_private *dev_priv =
>  		container_of(bdev, struct vmw_private, bdev);
> +	int ret;
>  
> -	if (fence == NULL) {
> +	if (fence == NULL)
>  		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
> +	else
> +		dma_fence_get(&fence->base);
> +
> +	ret = dma_resv_reserve_shared(bo->base.resv, 1);
> +	if (!ret)
>  		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
> -		dma_fence_put(&fence->base);
> -	} else
> -		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
> +	else
> +		/* Last resort fallback when we are OOM */
> +		dma_fence_wait(&fence->base, false);
> +	dma_fence_put(&fence->base);
>  }
>  
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb
  2021-12-07 12:34 ` [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb Christian König
@ 2021-12-22 21:51     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:51 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:02PM +0100, Christian König wrote:
> Use dma_resv_get_singleton() here to eventually get more than one write
> fence as single fence.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Patch title should be drm/atomic-helper: prefix, not just drm:

With that nit:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/drm_gem_atomic_helper.c | 18 +++++++-----------
>  1 file changed, 7 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index c3189afe10cb..9338ddb7edff 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -143,25 +143,21 @@
>   */
>  int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  {
> -	struct dma_resv_iter cursor;
>  	struct drm_gem_object *obj;
>  	struct dma_fence *fence;
> +	int ret;
>  
>  	if (!state->fb)
>  		return 0;
>  
>  	obj = drm_gem_fb_get_obj(state->fb, 0);
> -	dma_resv_iter_begin(&cursor, obj->resv, false);
> -	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> -		/* TODO: Currently there should be only one write fence, so this
> -		 * here works fine. But drm_atomic_set_fence_for_plane() should
> -		 * be changed to be able to handle more fences in general for
> -		 * multiple BOs per fb anyway. */
> -		dma_fence_get(fence);
> -		break;
> -	}
> -	dma_resv_iter_end(&cursor);
> +	ret = dma_resv_get_singleton(obj->resv, false, &fence);
> +	if (ret)
> +		return ret;
>  
> +	/* TODO: drm_atomic_set_fence_for_plane() should be changed to be able
> +	 * to handle more fences in general for multiple BOs per fb.
> +	 */
>  	drm_atomic_set_fence_for_plane(state, fence);
>  	return 0;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb
@ 2021-12-22 21:51     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:51 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:02PM +0100, Christian König wrote:
> Use dma_resv_get_singleton() here to eventually get more than one write
> fence as single fence.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Patch title should be drm/atomic-helper: prefix, not just drm:

With that nit:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/drm_gem_atomic_helper.c | 18 +++++++-----------
>  1 file changed, 7 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index c3189afe10cb..9338ddb7edff 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -143,25 +143,21 @@
>   */
>  int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  {
> -	struct dma_resv_iter cursor;
>  	struct drm_gem_object *obj;
>  	struct dma_fence *fence;
> +	int ret;
>  
>  	if (!state->fb)
>  		return 0;
>  
>  	obj = drm_gem_fb_get_obj(state->fb, 0);
> -	dma_resv_iter_begin(&cursor, obj->resv, false);
> -	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> -		/* TODO: Currently there should be only one write fence, so this
> -		 * here works fine. But drm_atomic_set_fence_for_plane() should
> -		 * be changed to be able to handle more fences in general for
> -		 * multiple BOs per fb anyway. */
> -		dma_fence_get(fence);
> -		break;
> -	}
> -	dma_resv_iter_end(&cursor);
> +	ret = dma_resv_get_singleton(obj->resv, false, &fence);
> +	if (ret)
> +		return ret;
>  
> +	/* TODO: drm_atomic_set_fence_for_plane() should be changed to be able
> +	 * to handle more fences in general for multiple BOs per fb.
> +	 */
>  	drm_atomic_set_fence_for_plane(state, fence);
>  	return 0;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb
  2021-12-07 12:34 ` [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb Christian König
@ 2021-12-22 21:52     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:52 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:03PM +0100, Christian König wrote:
> Use dma_resv_get_singleton() here to eventually get more than one write
> fence as single fence.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/nouveau/dispnv50/wndw.c | 14 +++++---------
>  1 file changed, 5 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> index 133c8736426a..b55a8a723581 100644
> --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> @@ -536,8 +536,6 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  	struct nouveau_bo *nvbo;
>  	struct nv50_head_atom *asyh;
>  	struct nv50_wndw_ctxdma *ctxdma;
> -	struct dma_resv_iter cursor;
> -	struct dma_fence *fence;
>  	int ret;
>  
>  	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
> @@ -560,13 +558,11 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  			asyw->image.handle[0] = ctxdma->object.handle;
>  	}
>  
> -	dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
> -	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> -		/* TODO: We only use the first writer here */
> -		asyw->state.fence = dma_fence_get(fence);
> -		break;
> -	}
> -	dma_resv_iter_end(&cursor);
> +	ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
> +				     &asyw->state.fence);

Needs nouveau-ack, but otherwise lgtm.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +	if (ret)
> +		return ret;
> +
>  	asyw->image.offset[0] = nvbo->offset;
>  
>  	if (wndw->func->prepare) {
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb
@ 2021-12-22 21:52     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:52 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:03PM +0100, Christian König wrote:
> Use dma_resv_get_singleton() here to eventually get more than one write
> fence as single fence.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/nouveau/dispnv50/wndw.c | 14 +++++---------
>  1 file changed, 5 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> index 133c8736426a..b55a8a723581 100644
> --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
> @@ -536,8 +536,6 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  	struct nouveau_bo *nvbo;
>  	struct nv50_head_atom *asyh;
>  	struct nv50_wndw_ctxdma *ctxdma;
> -	struct dma_resv_iter cursor;
> -	struct dma_fence *fence;
>  	int ret;
>  
>  	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
> @@ -560,13 +558,11 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  			asyw->image.handle[0] = ctxdma->object.handle;
>  	}
>  
> -	dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
> -	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> -		/* TODO: We only use the first writer here */
> -		asyw->state.fence = dma_fence_get(fence);
> -		break;
> -	}
> -	dma_resv_iter_end(&cursor);
> +	ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
> +				     &asyw->state.fence);

Needs nouveau-ack, but otherwise lgtm.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +	if (ret)
> +		return ret;
> +
>  	asyw->image.offset[0] = nvbo->offset;
>  
>  	if (wndw->func->prepare) {
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb
  2021-12-07 12:34 ` [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb Christian König
@ 2021-12-22 21:53     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:53 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:04PM +0100, Christian König wrote:
> Makes the code a bit more simpler.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 23 +++--------------------
>  1 file changed, 3 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index be48487e2ca7..888d97143177 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -107,36 +107,19 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
>  void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  			       u32 pasid)
>  {
> -	struct dma_fence *fence, **fences;
>  	struct amdgpu_pasid_cb *cb;
> -	unsigned count;
> +	struct dma_fence *fence;
>  	int r;
>  
> -	r = dma_resv_get_fences(resv, true, &count, &fences);
> +	r = dma_resv_get_singleton(resv, true, &fence);
>  	if (r)
>  		goto fallback;
>  
> -	if (count == 0) {
> +	if (!fence) {
>  		amdgpu_pasid_free(pasid);
>  		return;
>  	}
>  
> -	if (count == 1) {
> -		fence = fences[0];
> -		kfree(fences);
> -	} else {
> -		uint64_t context = dma_fence_context_alloc(1);
> -		struct dma_fence_array *array;
> -
> -		array = dma_fence_array_create(count, fences, context,
> -					       1, false);
> -		if (!array) {
> -			kfree(fences);
> -			goto fallback;
> -		}
> -		fence = &array->base;
> -	}
> -
>  	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
>  	if (!cb) {
>  		/* Last resort when we are OOM */
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb
@ 2021-12-22 21:53     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 21:53 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:04PM +0100, Christian König wrote:
> Makes the code a bit more simpler.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 23 +++--------------------
>  1 file changed, 3 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index be48487e2ca7..888d97143177 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -107,36 +107,19 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
>  void amdgpu_pasid_free_delayed(struct dma_resv *resv,
>  			       u32 pasid)
>  {
> -	struct dma_fence *fence, **fences;
>  	struct amdgpu_pasid_cb *cb;
> -	unsigned count;
> +	struct dma_fence *fence;
>  	int r;
>  
> -	r = dma_resv_get_fences(resv, true, &count, &fences);
> +	r = dma_resv_get_singleton(resv, true, &fence);
>  	if (r)
>  		goto fallback;
>  
> -	if (count == 0) {
> +	if (!fence) {
>  		amdgpu_pasid_free(pasid);
>  		return;
>  	}
>  
> -	if (count == 1) {
> -		fence = fences[0];
> -		kfree(fences);
> -	} else {
> -		uint64_t context = dma_fence_context_alloc(1);
> -		struct dma_fence_array *array;
> -
> -		array = dma_fence_array_create(count, fences, context,
> -					       1, false);
> -		if (!array) {
> -			kfree(fences);
> -			goto fallback;
> -		}
> -		fence = &array->base;
> -	}
> -
>  	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
>  	if (!cb) {
>  		/* Last resort when we are OOM */
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 18/24] dma-buf: add enum dma_resv_usage v3
  2021-12-07 12:34 ` [PATCH 18/24] dma-buf: add enum dma_resv_usage v3 Christian König
@ 2021-12-22 22:00     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:00 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:05PM +0100, Christian König wrote:
> This change adds the dma_resv_usage enum and allows us to specify why a
> dma_resv object is queried for its containing fences.
> 
> Additional to that a dma_resv_usage_rw() helper function is added to aid
> retrieving the fences for a read or write userspace submission.
> 
> This is then deployed to the different query functions of the dma_resv
> object and all of their users. When the write paratermer was previously
> true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.
> 
> v2: add KERNEL/OTHER in separate patch
> v3: some kerneldoc suggestions by Daniel
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Just commenting on the kerneldoc here.

> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 40ac9d486f8f..d96d8ca9af56 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -49,6 +49,49 @@ extern struct ww_class reservation_ww_class;
>  
>  struct dma_resv_list;
>  
> +/**
> + * enum dma_resv_usage - how the fences from a dma_resv obj are used
> + *
> + * This enum describes the different use cases for a dma_resv object and
> + * controls which fences are returned when queried.

We need to link here to both dma_buf.resv and from there to here.

Also we had a fair amount of text in the old dma_resv fields which should
probably be included here.

> + */
> +enum dma_resv_usage {
> +	/**
> +	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
> +	 *
> +	 * This should only be used for userspace command submissions which add
> +	 * an implicit write dependency.
> +	 */
> +	DMA_RESV_USAGE_WRITE,
> +
> +	/**
> +	 * @DMA_RESV_USAGE_READ: Implicit read synchronization.
> +	 *
> +	 * This should only be used for userspace command submissions which add
> +	 * an implicit read dependency.

I think the above would benefit from at least a link each to &dma_buf.resv
for further discusion.

Plus the READ flag needs a huge warning that in general it does _not_
guarantee that neither there's no writes possible, nor that the writes can
be assumed mistakes and dropped (on buffer moves e.g.).

Drivers can only make further assumptions for driver-internal dma_resv
objects (e.g. on vm/pagetables) or when the fences are all fences of the
same driver (e.g. the special sync rules amd has that takes the fence
owner into account).

We have this documented in the dma_buf.resv rules, but since it came up
again in a discussion with Thomas H. somewhere, it's better to hammer this
in a few more time. Specically in generally ignoring READ fences for
buffer moves (well the copy job, memory freeing still has to wait for all
of them) is a correctness bug.

Maybe include a big warning that really the difference between READ and
WRITE should only matter for implicit sync, and _not_ for anything else
the kernel does.

I'm assuming the actual replacement is all mechanical, so I skipped that
one for now, that's for next year :-)
-Daniel

> +	 */
> +	DMA_RESV_USAGE_READ,
> +};
> +
> +/**
> + * dma_resv_usage_rw - helper for implicit sync
> + * @write: true if we create a new implicit sync write
> + *
> + * This returns the implicit synchronization usage for write or read accesses,
> + * see enum dma_resv_usage.
> + */
> +static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
> +{
> +	/* This looks confusing at first sight, but is indeed correct.
> +	 *
> +	 * The rational is that new write operations needs to wait for the
> +	 * existing read and write operations to finish.
> +	 * But a new read operation only needs to wait for the existing write
> +	 * operations to finish.
> +	 */
> +	return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
> +}
> +
>  /**
>   * struct dma_resv - a reservation object manages fences for a buffer
>   *
> @@ -147,8 +190,8 @@ struct dma_resv_iter {
>  	/** @obj: The dma_resv object we iterate over */
>  	struct dma_resv *obj;
>  
> -	/** @all_fences: If all fences should be returned */
> -	bool all_fences;
> +	/** @usage: Controls which fences are returned */
> +	enum dma_resv_usage usage;
>  
>  	/** @fence: the currently handled fence */
>  	struct dma_fence *fence;
> @@ -178,14 +221,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
>   * dma_resv_iter_begin - initialize a dma_resv_iter object
>   * @cursor: The dma_resv_iter object to initialize
>   * @obj: The dma_resv object which we want to iterate over
> - * @all_fences: If all fences should be returned or just the exclusive one
> + * @usage: controls which fences to include, see enum dma_resv_usage.
>   */
>  static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
>  				       struct dma_resv *obj,
> -				       bool all_fences)
> +				       enum dma_resv_usage usage)
>  {
>  	cursor->obj = obj;
> -	cursor->all_fences = all_fences;
> +	cursor->usage = usage;
>  	cursor->fence = NULL;
>  }
>  
> @@ -242,7 +285,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   * dma_resv_for_each_fence - fence iterator
>   * @cursor: a struct dma_resv_iter pointer
>   * @obj: a dma_resv object pointer
> - * @all_fences: true if all fences should be returned
> + * @usage: controls which fences to return
>   * @fence: the current fence
>   *
>   * Iterate over the fences in a struct dma_resv object while holding the
> @@ -251,8 +294,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   * valid as long as the lock is held and so no extra reference to the fence is
>   * taken.
>   */
> -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> -	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +#define dma_resv_for_each_fence(cursor, obj, usage, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, usage),	\
>  	     fence = dma_resv_iter_first(cursor); fence;	\
>  	     fence = dma_resv_iter_next(cursor))
>  
> @@ -419,14 +462,14 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
>  			unsigned int *num_fences, struct dma_fence ***fences);
> -int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
>  			   struct dma_fence **fence);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> -			   unsigned long timeout);
> -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
> +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
> +			   bool intr, unsigned long timeout);
> +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
>  void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
>  
>  #endif /* _LINUX_RESERVATION_H */
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 18/24] dma-buf: add enum dma_resv_usage v3
@ 2021-12-22 22:00     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:00 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:05PM +0100, Christian König wrote:
> This change adds the dma_resv_usage enum and allows us to specify why a
> dma_resv object is queried for its containing fences.
> 
> Additional to that a dma_resv_usage_rw() helper function is added to aid
> retrieving the fences for a read or write userspace submission.
> 
> This is then deployed to the different query functions of the dma_resv
> object and all of their users. When the write paratermer was previously
> true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.
> 
> v2: add KERNEL/OTHER in separate patch
> v3: some kerneldoc suggestions by Daniel
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Just commenting on the kerneldoc here.

> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 40ac9d486f8f..d96d8ca9af56 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -49,6 +49,49 @@ extern struct ww_class reservation_ww_class;
>  
>  struct dma_resv_list;
>  
> +/**
> + * enum dma_resv_usage - how the fences from a dma_resv obj are used
> + *
> + * This enum describes the different use cases for a dma_resv object and
> + * controls which fences are returned when queried.

We need to link here to both dma_buf.resv and from there to here.

Also we had a fair amount of text in the old dma_resv fields which should
probably be included here.

> + */
> +enum dma_resv_usage {
> +	/**
> +	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
> +	 *
> +	 * This should only be used for userspace command submissions which add
> +	 * an implicit write dependency.
> +	 */
> +	DMA_RESV_USAGE_WRITE,
> +
> +	/**
> +	 * @DMA_RESV_USAGE_READ: Implicit read synchronization.
> +	 *
> +	 * This should only be used for userspace command submissions which add
> +	 * an implicit read dependency.

I think the above would benefit from at least a link each to &dma_buf.resv
for further discusion.

Plus the READ flag needs a huge warning that in general it does _not_
guarantee that neither there's no writes possible, nor that the writes can
be assumed mistakes and dropped (on buffer moves e.g.).

Drivers can only make further assumptions for driver-internal dma_resv
objects (e.g. on vm/pagetables) or when the fences are all fences of the
same driver (e.g. the special sync rules amd has that takes the fence
owner into account).

We have this documented in the dma_buf.resv rules, but since it came up
again in a discussion with Thomas H. somewhere, it's better to hammer this
in a few more time. Specically in generally ignoring READ fences for
buffer moves (well the copy job, memory freeing still has to wait for all
of them) is a correctness bug.

Maybe include a big warning that really the difference between READ and
WRITE should only matter for implicit sync, and _not_ for anything else
the kernel does.

I'm assuming the actual replacement is all mechanical, so I skipped that
one for now, that's for next year :-)
-Daniel

> +	 */
> +	DMA_RESV_USAGE_READ,
> +};
> +
> +/**
> + * dma_resv_usage_rw - helper for implicit sync
> + * @write: true if we create a new implicit sync write
> + *
> + * This returns the implicit synchronization usage for write or read accesses,
> + * see enum dma_resv_usage.
> + */
> +static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
> +{
> +	/* This looks confusing at first sight, but is indeed correct.
> +	 *
> +	 * The rational is that new write operations needs to wait for the
> +	 * existing read and write operations to finish.
> +	 * But a new read operation only needs to wait for the existing write
> +	 * operations to finish.
> +	 */
> +	return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
> +}
> +
>  /**
>   * struct dma_resv - a reservation object manages fences for a buffer
>   *
> @@ -147,8 +190,8 @@ struct dma_resv_iter {
>  	/** @obj: The dma_resv object we iterate over */
>  	struct dma_resv *obj;
>  
> -	/** @all_fences: If all fences should be returned */
> -	bool all_fences;
> +	/** @usage: Controls which fences are returned */
> +	enum dma_resv_usage usage;
>  
>  	/** @fence: the currently handled fence */
>  	struct dma_fence *fence;
> @@ -178,14 +221,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
>   * dma_resv_iter_begin - initialize a dma_resv_iter object
>   * @cursor: The dma_resv_iter object to initialize
>   * @obj: The dma_resv object which we want to iterate over
> - * @all_fences: If all fences should be returned or just the exclusive one
> + * @usage: controls which fences to include, see enum dma_resv_usage.
>   */
>  static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
>  				       struct dma_resv *obj,
> -				       bool all_fences)
> +				       enum dma_resv_usage usage)
>  {
>  	cursor->obj = obj;
> -	cursor->all_fences = all_fences;
> +	cursor->usage = usage;
>  	cursor->fence = NULL;
>  }
>  
> @@ -242,7 +285,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   * dma_resv_for_each_fence - fence iterator
>   * @cursor: a struct dma_resv_iter pointer
>   * @obj: a dma_resv object pointer
> - * @all_fences: true if all fences should be returned
> + * @usage: controls which fences to return
>   * @fence: the current fence
>   *
>   * Iterate over the fences in a struct dma_resv object while holding the
> @@ -251,8 +294,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   * valid as long as the lock is held and so no extra reference to the fence is
>   * taken.
>   */
> -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> -	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +#define dma_resv_for_each_fence(cursor, obj, usage, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, usage),	\
>  	     fence = dma_resv_iter_first(cursor); fence;	\
>  	     fence = dma_resv_iter_next(cursor))
>  
> @@ -419,14 +462,14 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>  			     struct dma_fence *fence);
>  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> -int dma_resv_get_fences(struct dma_resv *obj, bool write,
> +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
>  			unsigned int *num_fences, struct dma_fence ***fences);
> -int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
>  			   struct dma_fence **fence);
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> -			   unsigned long timeout);
> -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
> +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
> +			   bool intr, unsigned long timeout);
> +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
>  void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
>  
>  #endif /* _LINUX_RESERVATION_H */
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL
  2021-12-07 12:34 ` [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL Christian König
@ 2021-12-22 22:05     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:05 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:07PM +0100, Christian König wrote:
> Add an usage for kernel submissions. Waiting for those
> are mandatory for dynamic DMA-bufs.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Again just skipping to the doc bikeshedding, maybe with more cc others
help with some code review too.

>  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 4f3a6abf43c4..29d799991496 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -54,8 +54,30 @@ struct dma_resv_list;
>   *
>   * This enum describes the different use cases for a dma_resv object and
>   * controls which fences are returned when queried.
> + *
> + * An important fact is that there is the order KERNEL<WRITE<READ and
> + * when the dma_resv object is asked for fences for one use case the fences
> + * for the lower use case are returned as well.
> + *
> + * For example when asking for WRITE fences then the KERNEL fences are returned
> + * as well. Similar when asked for READ fences then both WRITE and KERNEL
> + * fences are returned as well.
>   */
>  enum dma_resv_usage {
> +	/**
> +	 * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
> +	 *
> +	 * This should only be used for things like copying or clearing memory
> +	 * with a DMA hardware engine for the purpose of kernel memory
> +	 * management.
> +	 *
> +         * Drivers *always* need to wait for those fences before accessing the

s/need to/must/ to stay with usual RFC wording. It's a hard requirement or
there's a security bug somewhere.

> +	 * resource protected by the dma_resv object. The only exception for
> +	 * that is when the resource is known to be locked down in place by
> +	 * pinning it previously.

Is this true? This sounds more confusing than helpful, because afaik in
general our pin interfaces do not block for any kernel fences. dma_buf_pin
doesn't do that for sure. And I don't think ttm does that either.

I think the only safe thing here is to state that it's safe if a) the
resource is pinned down and b) the callers has previously waited for the
kernel fences.

I also think we should put that wait for kernel fences into dma_buf_pin(),
but that's maybe a later patch.
-Daniel



> +	 */
> +	DMA_RESV_USAGE_KERNEL,
> +
>  	/**
>  	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
>  	 *
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL
@ 2021-12-22 22:05     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:05 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:07PM +0100, Christian König wrote:
> Add an usage for kernel submissions. Waiting for those
> are mandatory for dynamic DMA-bufs.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Again just skipping to the doc bikeshedding, maybe with more cc others
help with some code review too.

>  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 4f3a6abf43c4..29d799991496 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -54,8 +54,30 @@ struct dma_resv_list;
>   *
>   * This enum describes the different use cases for a dma_resv object and
>   * controls which fences are returned when queried.
> + *
> + * An important fact is that there is the order KERNEL<WRITE<READ and
> + * when the dma_resv object is asked for fences for one use case the fences
> + * for the lower use case are returned as well.
> + *
> + * For example when asking for WRITE fences then the KERNEL fences are returned
> + * as well. Similar when asked for READ fences then both WRITE and KERNEL
> + * fences are returned as well.
>   */
>  enum dma_resv_usage {
> +	/**
> +	 * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
> +	 *
> +	 * This should only be used for things like copying or clearing memory
> +	 * with a DMA hardware engine for the purpose of kernel memory
> +	 * management.
> +	 *
> +         * Drivers *always* need to wait for those fences before accessing the

s/need to/must/ to stay with usual RFC wording. It's a hard requirement or
there's a security bug somewhere.

> +	 * resource protected by the dma_resv object. The only exception for
> +	 * that is when the resource is known to be locked down in place by
> +	 * pinning it previously.

Is this true? This sounds more confusing than helpful, because afaik in
general our pin interfaces do not block for any kernel fences. dma_buf_pin
doesn't do that for sure. And I don't think ttm does that either.

I think the only safe thing here is to state that it's safe if a) the
resource is pinned down and b) the callers has previously waited for the
kernel fences.

I also think we should put that wait for kernel fences into dma_buf_pin(),
but that's maybe a later patch.
-Daniel



> +	 */
> +	DMA_RESV_USAGE_KERNEL,
> +
>  	/**
>  	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
>  	 *
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP
  2021-12-07 12:34 ` [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP Christian König
@ 2021-12-22 22:10     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:10 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:08PM +0100, Christian König wrote:
> Add an usage for submissions independent of implicit sync but still
> interesting for memory management.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Focusing on the kerneldoc first to get semantics agreed.

> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 29d799991496..07ae5b00c1fa 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -55,7 +55,7 @@ struct dma_resv_list;
>   * This enum describes the different use cases for a dma_resv object and
>   * controls which fences are returned when queried.
>   *
> - * An important fact is that there is the order KERNEL<WRITE<READ and
> + * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
>   * when the dma_resv object is asked for fences for one use case the fences
>   * for the lower use case are returned as well.
>   *
> @@ -93,6 +93,22 @@ enum dma_resv_usage {
>  	 * an implicit read dependency.
>  	 */
>  	DMA_RESV_USAGE_READ,
> +
> +	/**
> +	 * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
> +	 *
> +	 * This should be used by submissions which don't want to participate in
> +	 * implicit synchronization.

Uh we might still have a disagreement, because that isn't really what
drivers which added opt-in implicit sync have done thus far. Minimally we
need a note that some drivers also use _READ for this.

> +	 *
> +	 * The most common case are submissions with explicit synchronization,
> +	 * but also things like preemption fences as well as page table updates
> +	 * might use this.
> +	 *
> +	 * The kernel memory management *always* need to wait for those fences
> +	 * before moving or freeing the resource protected by the dma_resv
> +	 * object.

Yeah this is the comment I wanted to see for READ, and which now is in
bookkeeping (where it's correct in the end). I think we still should have
something in the READ comment (and here) explaining that there could very
well be writes hiding behind this, and that the kernel cannot assume
anything about what's going on in general (maybe some drivers enforce
read/write through command parsers).

Also all the text in dma_buf.resv needs to be updated to use the right
constants instead of words.
-Daniel


> +	 */
> +	DMA_RESV_USAGE_BOOKKEEP
>  };
>  
>  /**
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP
@ 2021-12-22 22:10     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:10 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:08PM +0100, Christian König wrote:
> Add an usage for submissions independent of implicit sync but still
> interesting for memory management.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Focusing on the kerneldoc first to get semantics agreed.

> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 29d799991496..07ae5b00c1fa 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -55,7 +55,7 @@ struct dma_resv_list;
>   * This enum describes the different use cases for a dma_resv object and
>   * controls which fences are returned when queried.
>   *
> - * An important fact is that there is the order KERNEL<WRITE<READ and
> + * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
>   * when the dma_resv object is asked for fences for one use case the fences
>   * for the lower use case are returned as well.
>   *
> @@ -93,6 +93,22 @@ enum dma_resv_usage {
>  	 * an implicit read dependency.
>  	 */
>  	DMA_RESV_USAGE_READ,
> +
> +	/**
> +	 * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
> +	 *
> +	 * This should be used by submissions which don't want to participate in
> +	 * implicit synchronization.

Uh we might still have a disagreement, because that isn't really what
drivers which added opt-in implicit sync have done thus far. Minimally we
need a note that some drivers also use _READ for this.

> +	 *
> +	 * The most common case are submissions with explicit synchronization,
> +	 * but also things like preemption fences as well as page table updates
> +	 * might use this.
> +	 *
> +	 * The kernel memory management *always* need to wait for those fences
> +	 * before moving or freeing the resource protected by the dma_resv
> +	 * object.

Yeah this is the comment I wanted to see for READ, and which now is in
bookkeeping (where it's correct in the end). I think we still should have
something in the READ comment (and here) explaining that there could very
well be writes hiding behind this, and that the kernel cannot assume
anything about what's going on in general (maybe some drivers enforce
read/write through command parsers).

Also all the text in dma_buf.resv needs to be updated to use the right
constants instead of words.
-Daniel


> +	 */
> +	DMA_RESV_USAGE_BOOKKEEP
>  };
>  
>  /**
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 22/24] dma-buf: wait for map to complete for static attachments
  2021-12-07 12:34 ` [PATCH 22/24] dma-buf: wait for map to complete for static attachments Christian König
@ 2021-12-22 22:16     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:16 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Tue, Dec 07, 2021 at 01:34:09PM +0100, Christian König wrote:
> We have previously done that in the individual drivers but it is
> more defensive to move that into the common code.
> 
> Dynamic attachments should wait for map operations to complete by themselves.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

i915 should probably stop reinveinting so much stuff here and align more
...

I do wonder whether we want the same for dma_buf_pin(), or at least
document that for dynamic attachments, you still need to sync even if it's
pinned. Especially since your kerneldoc for the usage flags suggests that
waiting isn't needed, but after this patch waiting _is_ needed even for
dynamic importers.

So there is a gap here I think, and I deleted my r-b tag that I already
typed again. Or do I miss something?

Minimally needs accurate docs, but I'm leaning towards an unconditional
dma_resv_wait() in dma_buf_pin() for safety's sake.


> ---
>  drivers/dma-buf/dma-buf.c                   | 18 +++++++++++++++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 14 +-------------
>  drivers/gpu/drm/nouveau/nouveau_prime.c     | 17 +----------------
>  drivers/gpu/drm/radeon/radeon_prime.c       | 16 +++-------------
>  4 files changed, 20 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 528983d3ba64..d3dd602c4753 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -660,12 +660,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
>  				       enum dma_data_direction direction)
>  {
>  	struct sg_table *sg_table;
> +	signed long ret;
>  
>  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> +	if (IS_ERR_OR_NULL(sg_table))
> +		return sg_table;
> +
> +	if (!dma_buf_attachment_is_dynamic(attach)) {
> +		ret = dma_resv_wait_timeout(attach->dmabuf->resv,

Another place where this dma_resv_wait() wrapper would be good. I think we
should have it :-)

Cheers, Daniel

> +					    DMA_RESV_USAGE_KERNEL, true,
> +					    MAX_SCHEDULE_TIMEOUT);
> +		if (ret < 0) {
> +			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> +							   direction);
> +			return ERR_PTR(ret);
> +		}
> +	}
>  
> -	if (!IS_ERR_OR_NULL(sg_table))
> -		mangle_sg_table(sg_table);
> -
> +	mangle_sg_table(sg_table);
>  	return sg_table;
>  }
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 4896c876ffec..33127bd56c64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
>  {
>  	struct drm_gem_object *obj = attach->dmabuf->priv;
>  	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
> -	int r;
>  
>  	/* pin buffer into GTT */
> -	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
> -	if (r)
> -		return r;
> -
> -	if (bo->tbo.moving) {
> -		r = dma_fence_wait(bo->tbo.moving, true);
> -		if (r) {
> -			amdgpu_bo_unpin(bo);
> -			return r;
> -		}
> -	}
> -	return 0;
> +	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
> index 60019d0532fc..347488685f74 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
> @@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
>  	if (ret)
>  		return -EINVAL;
>  
> -	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
> -	if (ret)
> -		goto error;
> -
> -	if (nvbo->bo.moving)
> -		ret = dma_fence_wait(nvbo->bo.moving, true);
> -
> -	ttm_bo_unreserve(&nvbo->bo);
> -	if (ret)
> -		goto error;
> -
> -	return ret;
> -
> -error:
> -	nouveau_bo_unpin(nvbo);
> -	return ret;
> +	return 0;
>  }
>  
>  void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
> index 4a90807351e7..42a87948e28c 100644
> --- a/drivers/gpu/drm/radeon/radeon_prime.c
> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
> @@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
>  
>  	/* pin buffer into GTT */
>  	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
> -	if (unlikely(ret))
> -		goto error;
> -
> -	if (bo->tbo.moving) {
> -		ret = dma_fence_wait(bo->tbo.moving, false);
> -		if (unlikely(ret)) {
> -			radeon_bo_unpin(bo);
> -			goto error;
> -		}
> -	}
> -
> -	bo->prime_shared_count++;
> -error:
> +	if (likely(ret == 0))
> +		bo->prime_shared_count++;
> +
>  	radeon_bo_unreserve(bo);
>  	return ret;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 22/24] dma-buf: wait for map to complete for static attachments
@ 2021-12-22 22:16     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:16 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Dec 07, 2021 at 01:34:09PM +0100, Christian König wrote:
> We have previously done that in the individual drivers but it is
> more defensive to move that into the common code.
> 
> Dynamic attachments should wait for map operations to complete by themselves.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

i915 should probably stop reinveinting so much stuff here and align more
...

I do wonder whether we want the same for dma_buf_pin(), or at least
document that for dynamic attachments, you still need to sync even if it's
pinned. Especially since your kerneldoc for the usage flags suggests that
waiting isn't needed, but after this patch waiting _is_ needed even for
dynamic importers.

So there is a gap here I think, and I deleted my r-b tag that I already
typed again. Or do I miss something?

Minimally needs accurate docs, but I'm leaning towards an unconditional
dma_resv_wait() in dma_buf_pin() for safety's sake.


> ---
>  drivers/dma-buf/dma-buf.c                   | 18 +++++++++++++++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 14 +-------------
>  drivers/gpu/drm/nouveau/nouveau_prime.c     | 17 +----------------
>  drivers/gpu/drm/radeon/radeon_prime.c       | 16 +++-------------
>  4 files changed, 20 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 528983d3ba64..d3dd602c4753 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -660,12 +660,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
>  				       enum dma_data_direction direction)
>  {
>  	struct sg_table *sg_table;
> +	signed long ret;
>  
>  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> +	if (IS_ERR_OR_NULL(sg_table))
> +		return sg_table;
> +
> +	if (!dma_buf_attachment_is_dynamic(attach)) {
> +		ret = dma_resv_wait_timeout(attach->dmabuf->resv,

Another place where this dma_resv_wait() wrapper would be good. I think we
should have it :-)

Cheers, Daniel

> +					    DMA_RESV_USAGE_KERNEL, true,
> +					    MAX_SCHEDULE_TIMEOUT);
> +		if (ret < 0) {
> +			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> +							   direction);
> +			return ERR_PTR(ret);
> +		}
> +	}
>  
> -	if (!IS_ERR_OR_NULL(sg_table))
> -		mangle_sg_table(sg_table);
> -
> +	mangle_sg_table(sg_table);
>  	return sg_table;
>  }
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 4896c876ffec..33127bd56c64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
>  {
>  	struct drm_gem_object *obj = attach->dmabuf->priv;
>  	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
> -	int r;
>  
>  	/* pin buffer into GTT */
> -	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
> -	if (r)
> -		return r;
> -
> -	if (bo->tbo.moving) {
> -		r = dma_fence_wait(bo->tbo.moving, true);
> -		if (r) {
> -			amdgpu_bo_unpin(bo);
> -			return r;
> -		}
> -	}
> -	return 0;
> +	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
> index 60019d0532fc..347488685f74 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
> @@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
>  	if (ret)
>  		return -EINVAL;
>  
> -	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
> -	if (ret)
> -		goto error;
> -
> -	if (nvbo->bo.moving)
> -		ret = dma_fence_wait(nvbo->bo.moving, true);
> -
> -	ttm_bo_unreserve(&nvbo->bo);
> -	if (ret)
> -		goto error;
> -
> -	return ret;
> -
> -error:
> -	nouveau_bo_unpin(nvbo);
> -	return ret;
> +	return 0;
>  }
>  
>  void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
> index 4a90807351e7..42a87948e28c 100644
> --- a/drivers/gpu/drm/radeon/radeon_prime.c
> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
> @@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
>  
>  	/* pin buffer into GTT */
>  	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
> -	if (unlikely(ret))
> -		goto error;
> -
> -	if (bo->tbo.moving) {
> -		ret = dma_fence_wait(bo->tbo.moving, false);
> -		if (unlikely(ret)) {
> -			radeon_bo_unpin(bo);
> -			goto error;
> -		}
> -	}
> -
> -	bo->prime_shared_count++;
> -error:
> +	if (likely(ret == 0))
> +		bo->prime_shared_count++;
> +
>  	radeon_bo_unreserve(bo);
>  	return ret;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
  2021-12-17 14:39 ` completely rework the dma_resv semantic Christian König
@ 2021-12-22 22:17     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:17 UTC (permalink / raw)
  To: Christian König; +Cc: daniel, dri-devel, linux-media, linaro-mm-sig

On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
> Hi Daniel,
> 
> looks like this is going nowhere and you don't seem to have time to review.
> 
> What can we do?

cc more people, you didn't cc any of the driver folks :-)

Also I did find some review before I disappeared, back on 10th Jan.

Cheers, Daniel

> 
> Thanks,
> Christian.
> 
> Am 07.12.21 um 13:33 schrieb Christian König:
> > Hi Daniel,
> > 
> > just a gentle ping that you wanted to take a look at this.
> > 
> > Not much changed compared to the last version, only a minor bugfix in
> > the dma_resv_get_singleton error handling.
> > 
> > Regards,
> > Christian.
> > 
> > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
@ 2021-12-22 22:17     ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2021-12-22 22:17 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
> Hi Daniel,
> 
> looks like this is going nowhere and you don't seem to have time to review.
> 
> What can we do?

cc more people, you didn't cc any of the driver folks :-)

Also I did find some review before I disappeared, back on 10th Jan.

Cheers, Daniel

> 
> Thanks,
> Christian.
> 
> Am 07.12.21 um 13:33 schrieb Christian König:
> > Hi Daniel,
> > 
> > just a gentle ping that you wanted to take a look at this.
> > 
> > Not much changed compared to the last version, only a minor bugfix in
> > the dma_resv_get_singleton error handling.
> > 
> > Regards,
> > Christian.
> > 
> > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
  2021-12-22 22:17     ` Daniel Vetter
@ 2021-12-23  9:11       ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2021-12-23  9:11 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig

Am 22.12.21 um 23:17 schrieb Daniel Vetter:
> On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
>> Hi Daniel,
>>
>> looks like this is going nowhere and you don't seem to have time to review.
>>
>> What can we do?
> cc more people, you didn't cc any of the driver folks :-)

Well I've CCed more people and lists and the first round of the patches. 
Just wanted to get some more comments from you first before widening the 
audience.

> Also I did find some review before I disappeared, back on 10th Jan.

Good, then I have at least something todo for the first week on January.

Happy holidays,
Christian.

>
> Cheers, Daniel
>
>> Thanks,
>> Christian.
>>
>> Am 07.12.21 um 13:33 schrieb Christian König:
>>> Hi Daniel,
>>>
>>> just a gentle ping that you wanted to take a look at this.
>>>
>>> Not much changed compared to the last version, only a minor bugfix in
>>> the dma_resv_get_singleton error handling.
>>>
>>> Regards,
>>> Christian.
>>>
>>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
@ 2021-12-23  9:11       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2021-12-23  9:11 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media

Am 22.12.21 um 23:17 schrieb Daniel Vetter:
> On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
>> Hi Daniel,
>>
>> looks like this is going nowhere and you don't seem to have time to review.
>>
>> What can we do?
> cc more people, you didn't cc any of the driver folks :-)

Well I've CCed more people and lists and the first round of the patches. 
Just wanted to get some more comments from you first before widening the 
audience.

> Also I did find some review before I disappeared, back on 10th Jan.

Good, then I have at least something todo for the first week on January.

Happy holidays,
Christian.

>
> Cheers, Daniel
>
>> Thanks,
>> Christian.
>>
>> Am 07.12.21 um 13:33 schrieb Christian König:
>>> Hi Daniel,
>>>
>>> just a gentle ping that you wanted to take a look at this.
>>>
>>> Not much changed compared to the last version, only a minor bugfix in
>>> the dma_resv_get_singleton error handling.
>>>
>>> Regards,
>>> Christian.
>>>
>>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
  2021-12-22 21:05     ` Daniel Vetter
@ 2022-01-03 10:48       ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 10:48 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig

Am 22.12.21 um 22:05 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
>> This function allows to replace fences from the shared fence list when
>> we can gurantee that the operation represented by the original fence has
>> finished or no accesses to the resources protected by the dma_resv
>> object any more when the new fence finishes.
>>
>> Then use this function in the amdkfd code when BOs are unmapped from the
>> process.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
>>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
>>   include/linux/dma-resv.h                      |  2 +
>>   3 files changed, 52 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 4deea75c0b9c..a688dbded3d3 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_shared_fence);
>>   
>> +/**
>> + * dma_resv_replace_fences - replace fences in the dma_resv obj
>> + * @obj: the reservation object
>> + * @context: the context of the fences to replace
>> + * @replacement: the new fence to use instead
>> + *
>> + * Replace fences with a specified context with a new fence. Only valid if the
>> + * operation represented by the original fences is completed or has no longer
>> + * access to the resources protected by the dma_resv object when the new fence
>> + * completes.
>> + */
>> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>> +			     struct dma_fence *replacement)
>> +{
>> +	struct dma_resv_list *list;
>> +	struct dma_fence *old;
>> +	unsigned int i;
>> +
>> +	dma_resv_assert_held(obj);
>> +
>> +	write_seqcount_begin(&obj->seq);
>> +
>> +	old = dma_resv_excl_fence(obj);
>> +	if (old->context == context) {
>> +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
>> +		dma_fence_put(old);
>> +	}
>> +
>> +	list = dma_resv_shared_list(obj);
>> +	for (i = 0; list && i < list->shared_count; ++i) {
>> +		old = rcu_dereference_protected(list->shared[i],
>> +						dma_resv_held(obj));
>> +		if (old->context != context)
>> +			continue;
>> +
>> +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
>> +		dma_fence_put(old);
> Since the fences are all guaranteed to be from the same context, maybe we
> should have a WARN_ON(__dma_fence_is_later()); here just to be safe?

First of all happy new year!

Then to answer your question, no :)

This here is the case where we replace an preemption fence with a VM 
page table update fence. So both fences are not from the same context.

But since you ask that means that I somehow need to improve the 
documentation.

Regards,
Christian.

>
> With that added:
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>> +	}
>> +
>> +	write_seqcount_end(&obj->seq);
>> +}
>> +EXPORT_SYMBOL(dma_resv_replace_fences);
>> +
>>   /**
>>    * dma_resv_add_excl_fence - Add an exclusive fence.
>>    * @obj: the reservation object
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index 71acd577803e..b558ef0f8c4a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
>>   static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>   					struct amdgpu_amdkfd_fence *ef)
>>   {
>> -	struct dma_resv *resv = bo->tbo.base.resv;
>> -	struct dma_resv_list *old, *new;
>> -	unsigned int i, j, k;
>> +	struct dma_fence *replacement;
>>   
>>   	if (!ef)
>>   		return -EINVAL;
>>   
>> -	old = dma_resv_shared_list(resv);
>> -	if (!old)
>> -		return 0;
>> -
>> -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
>> -	if (!new)
>> -		return -ENOMEM;
>> -
>> -	/* Go through all the shared fences in the resevation object and sort
>> -	 * the interesting ones to the end of the list.
>> +	/* TODO: Instead of block before we should use the fence of the page
>> +	 * table update and TLB flush here directly.
>>   	 */
>> -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
>> -		struct dma_fence *f;
>> -
>> -		f = rcu_dereference_protected(old->shared[i],
>> -					      dma_resv_held(resv));
>> -
>> -		if (f->context == ef->base.context)
>> -			RCU_INIT_POINTER(new->shared[--j], f);
>> -		else
>> -			RCU_INIT_POINTER(new->shared[k++], f);
>> -	}
>> -	new->shared_max = old->shared_max;
>> -	new->shared_count = k;
>> -
>> -	/* Install the new fence list, seqcount provides the barriers */
>> -	write_seqcount_begin(&resv->seq);
>> -	RCU_INIT_POINTER(resv->fence, new);
>> -	write_seqcount_end(&resv->seq);
>> -
>> -	/* Drop the references to the removed fences or move them to ef_list */
>> -	for (i = j; i < old->shared_count; ++i) {
>> -		struct dma_fence *f;
>> -
>> -		f = rcu_dereference_protected(new->shared[i],
>> -					      dma_resv_held(resv));
>> -		dma_fence_put(f);
>> -	}
>> -	kfree_rcu(old, rcu);
>> -
>> +	replacement = dma_fence_get_stub();
>> +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
>> +				replacement);
>> +	dma_fence_put(replacement);
>>   	return 0;
>>   }
>>   
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index eebf04325b34..e0be34265eae 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
>>   void dma_resv_fini(struct dma_resv *obj);
>>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>>   void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>> +			     struct dma_fence *fence);
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>   int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>>   			unsigned *pshared_count, struct dma_fence ***pshared);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
@ 2022-01-03 10:48       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 10:48 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media

Am 22.12.21 um 22:05 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
>> This function allows to replace fences from the shared fence list when
>> we can gurantee that the operation represented by the original fence has
>> finished or no accesses to the resources protected by the dma_resv
>> object any more when the new fence finishes.
>>
>> Then use this function in the amdkfd code when BOs are unmapped from the
>> process.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
>>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
>>   include/linux/dma-resv.h                      |  2 +
>>   3 files changed, 52 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 4deea75c0b9c..a688dbded3d3 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_shared_fence);
>>   
>> +/**
>> + * dma_resv_replace_fences - replace fences in the dma_resv obj
>> + * @obj: the reservation object
>> + * @context: the context of the fences to replace
>> + * @replacement: the new fence to use instead
>> + *
>> + * Replace fences with a specified context with a new fence. Only valid if the
>> + * operation represented by the original fences is completed or has no longer
>> + * access to the resources protected by the dma_resv object when the new fence
>> + * completes.
>> + */
>> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>> +			     struct dma_fence *replacement)
>> +{
>> +	struct dma_resv_list *list;
>> +	struct dma_fence *old;
>> +	unsigned int i;
>> +
>> +	dma_resv_assert_held(obj);
>> +
>> +	write_seqcount_begin(&obj->seq);
>> +
>> +	old = dma_resv_excl_fence(obj);
>> +	if (old->context == context) {
>> +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
>> +		dma_fence_put(old);
>> +	}
>> +
>> +	list = dma_resv_shared_list(obj);
>> +	for (i = 0; list && i < list->shared_count; ++i) {
>> +		old = rcu_dereference_protected(list->shared[i],
>> +						dma_resv_held(obj));
>> +		if (old->context != context)
>> +			continue;
>> +
>> +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
>> +		dma_fence_put(old);
> Since the fences are all guaranteed to be from the same context, maybe we
> should have a WARN_ON(__dma_fence_is_later()); here just to be safe?

First of all happy new year!

Then to answer your question, no :)

This here is the case where we replace an preemption fence with a VM 
page table update fence. So both fences are not from the same context.

But since you ask that means that I somehow need to improve the 
documentation.

Regards,
Christian.

>
> With that added:
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>> +	}
>> +
>> +	write_seqcount_end(&obj->seq);
>> +}
>> +EXPORT_SYMBOL(dma_resv_replace_fences);
>> +
>>   /**
>>    * dma_resv_add_excl_fence - Add an exclusive fence.
>>    * @obj: the reservation object
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index 71acd577803e..b558ef0f8c4a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
>>   static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>   					struct amdgpu_amdkfd_fence *ef)
>>   {
>> -	struct dma_resv *resv = bo->tbo.base.resv;
>> -	struct dma_resv_list *old, *new;
>> -	unsigned int i, j, k;
>> +	struct dma_fence *replacement;
>>   
>>   	if (!ef)
>>   		return -EINVAL;
>>   
>> -	old = dma_resv_shared_list(resv);
>> -	if (!old)
>> -		return 0;
>> -
>> -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
>> -	if (!new)
>> -		return -ENOMEM;
>> -
>> -	/* Go through all the shared fences in the resevation object and sort
>> -	 * the interesting ones to the end of the list.
>> +	/* TODO: Instead of block before we should use the fence of the page
>> +	 * table update and TLB flush here directly.
>>   	 */
>> -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
>> -		struct dma_fence *f;
>> -
>> -		f = rcu_dereference_protected(old->shared[i],
>> -					      dma_resv_held(resv));
>> -
>> -		if (f->context == ef->base.context)
>> -			RCU_INIT_POINTER(new->shared[--j], f);
>> -		else
>> -			RCU_INIT_POINTER(new->shared[k++], f);
>> -	}
>> -	new->shared_max = old->shared_max;
>> -	new->shared_count = k;
>> -
>> -	/* Install the new fence list, seqcount provides the barriers */
>> -	write_seqcount_begin(&resv->seq);
>> -	RCU_INIT_POINTER(resv->fence, new);
>> -	write_seqcount_end(&resv->seq);
>> -
>> -	/* Drop the references to the removed fences or move them to ef_list */
>> -	for (i = j; i < old->shared_count; ++i) {
>> -		struct dma_fence *f;
>> -
>> -		f = rcu_dereference_protected(new->shared[i],
>> -					      dma_resv_held(resv));
>> -		dma_fence_put(f);
>> -	}
>> -	kfree_rcu(old, rcu);
>> -
>> +	replacement = dma_fence_get_stub();
>> +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
>> +				replacement);
>> +	dma_fence_put(replacement);
>>   	return 0;
>>   }
>>   
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index eebf04325b34..e0be34265eae 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
>>   void dma_resv_fini(struct dma_resv *obj);
>>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>>   void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
>> +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>> +			     struct dma_fence *fence);
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>   int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
>>   			unsigned *pshared_count, struct dma_fence ***pshared);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2021-12-22 21:21     ` Daniel Vetter
@ 2022-01-03 11:13       ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 11:13 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media

Am 22.12.21 um 22:21 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
>> Add a function to simplify getting a single fence for all the fences in
>> the dma_resv object.
>>
>> v2: fix ref leak in error handling
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>>   include/linux/dma-resv.h   |  2 ++
>>   2 files changed, 54 insertions(+)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 480c305554a1..694716a3d66d 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -34,6 +34,7 @@
>>    */
>>   
>>   #include <linux/dma-resv.h>
>> +#include <linux/dma-fence-array.h>
>>   #include <linux/export.h>
>>   #include <linux/mm.h>
>>   #include <linux/sched/mm.h>
>> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>   }
>>   EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>>   
>> +/**
>> + * dma_resv_get_singleton - Get a single fence for all the fences
>> + * @obj: the reservation object
>> + * @write: true if we should return all fences
>> + * @fence: the resulting fence
>> + *
>> + * Get a single fence representing all the fences inside the resv object.
>> + * Returns either 0 for success or -ENOMEM.
>> + *
>> + * Warning: This can't be used like this when adding the fence back to the resv
>> + * object since that can lead to stack corruption when finalizing the
>> + * dma_fence_array.
> Uh I don't get this one? I thought the only problem with nested fences is
> the signalling recursion, which we work around with the irq_work?

Nope, the main problem is finalizing the dma_fence_array.

E.g. imagine that you build up a chain of dma_fence_array objects like this:
a<-b<-c<-d<-e<-f.....

With each one referencing the previous dma_fence_array and then you call 
dma_fence_put() on the last one. That in turn will cause calling 
dma_fence_put() on the previous one, which in turn will cause 
dma_fence_put() one the one before the previous one etc....

In other words you recurse because each dma_fence_array instance drops 
the last reference of it's predecessor.

What we could do is to delegate dropping the reference to the containing 
fences in a dma_fence_array as well, but that would require some changes 
to the irq_work_run_list() function to be halve way efficient.

> Also if there's really an issue with dma_fence_array fences, then that
> warning should be on the dma_resv kerneldoc, not somewhere hidden like
> this. And finally I really don't see what can go wrong, sure we'll end up
> with the same fence once in the dma_resv_list and then once more in the
> fence array. But they're all refcounted, so really shouldn't matter.
>
> The code itself looks correct, but me not understanding what even goes
> wrong here freaks me out a bit.

Yeah, IIRC we already discussed that with Jason in length as well.

Essentially what you can't do is to put a dma_fence_array into another 
dma_fence_array without causing issues.

So I think we should maybe just add a WARN_ON() into 
dma_fence_array_init() to make sure that this never happens.

Regards,
Christian.

>
> I guess something to figure out next year, I kinda hoped I could squeeze a
> review in before I disappear :-/
> -Daniel
>
>> + */
>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>> +			   struct dma_fence **fence)
>> +{
>> +	struct dma_fence_array *array;
>> +	struct dma_fence **fences;
>> +	unsigned count;
>> +	int r;
>> +
>> +	r = dma_resv_get_fences(obj, write, &count, &fences);
>> +        if (r)
>> +		return r;
>> +
>> +	if (count == 0) {
>> +		*fence = NULL;
>> +		return 0;
>> +	}
>> +
>> +	if (count == 1) {
>> +		*fence = fences[0];
>> +		kfree(fences);
>> +		return 0;
>> +	}
>> +
>> +	array = dma_fence_array_create(count, fences,
>> +				       dma_fence_context_alloc(1),
>> +				       1, false);
>> +	if (!array) {
>> +		while (count--)
>> +			dma_fence_put(fences[count]);
>> +		kfree(fences);
>> +		return -ENOMEM;
>> +	}
>> +
>> +	*fence = &array->base;
>> +	return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
>> +
>>   /**
>>    * dma_resv_wait_timeout - Wait on reservation's objects
>>    * shared and/or exclusive fences.
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index fa2002939b19..cdfbbda6f600 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>   int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>   			unsigned int *num_fences, struct dma_fence ***fences);
>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>> +			   struct dma_fence **fence);
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>>   long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>   			   unsigned long timeout);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
@ 2022-01-03 11:13       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 11:13 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig

Am 22.12.21 um 22:21 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
>> Add a function to simplify getting a single fence for all the fences in
>> the dma_resv object.
>>
>> v2: fix ref leak in error handling
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>>   include/linux/dma-resv.h   |  2 ++
>>   2 files changed, 54 insertions(+)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 480c305554a1..694716a3d66d 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -34,6 +34,7 @@
>>    */
>>   
>>   #include <linux/dma-resv.h>
>> +#include <linux/dma-fence-array.h>
>>   #include <linux/export.h>
>>   #include <linux/mm.h>
>>   #include <linux/sched/mm.h>
>> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>   }
>>   EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>>   
>> +/**
>> + * dma_resv_get_singleton - Get a single fence for all the fences
>> + * @obj: the reservation object
>> + * @write: true if we should return all fences
>> + * @fence: the resulting fence
>> + *
>> + * Get a single fence representing all the fences inside the resv object.
>> + * Returns either 0 for success or -ENOMEM.
>> + *
>> + * Warning: This can't be used like this when adding the fence back to the resv
>> + * object since that can lead to stack corruption when finalizing the
>> + * dma_fence_array.
> Uh I don't get this one? I thought the only problem with nested fences is
> the signalling recursion, which we work around with the irq_work?

Nope, the main problem is finalizing the dma_fence_array.

E.g. imagine that you build up a chain of dma_fence_array objects like this:
a<-b<-c<-d<-e<-f.....

With each one referencing the previous dma_fence_array and then you call 
dma_fence_put() on the last one. That in turn will cause calling 
dma_fence_put() on the previous one, which in turn will cause 
dma_fence_put() one the one before the previous one etc....

In other words you recurse because each dma_fence_array instance drops 
the last reference of it's predecessor.

What we could do is to delegate dropping the reference to the containing 
fences in a dma_fence_array as well, but that would require some changes 
to the irq_work_run_list() function to be halve way efficient.

> Also if there's really an issue with dma_fence_array fences, then that
> warning should be on the dma_resv kerneldoc, not somewhere hidden like
> this. And finally I really don't see what can go wrong, sure we'll end up
> with the same fence once in the dma_resv_list and then once more in the
> fence array. But they're all refcounted, so really shouldn't matter.
>
> The code itself looks correct, but me not understanding what even goes
> wrong here freaks me out a bit.

Yeah, IIRC we already discussed that with Jason in length as well.

Essentially what you can't do is to put a dma_fence_array into another 
dma_fence_array without causing issues.

So I think we should maybe just add a WARN_ON() into 
dma_fence_array_init() to make sure that this never happens.

Regards,
Christian.

>
> I guess something to figure out next year, I kinda hoped I could squeeze a
> review in before I disappear :-/
> -Daniel
>
>> + */
>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>> +			   struct dma_fence **fence)
>> +{
>> +	struct dma_fence_array *array;
>> +	struct dma_fence **fences;
>> +	unsigned count;
>> +	int r;
>> +
>> +	r = dma_resv_get_fences(obj, write, &count, &fences);
>> +        if (r)
>> +		return r;
>> +
>> +	if (count == 0) {
>> +		*fence = NULL;
>> +		return 0;
>> +	}
>> +
>> +	if (count == 1) {
>> +		*fence = fences[0];
>> +		kfree(fences);
>> +		return 0;
>> +	}
>> +
>> +	array = dma_fence_array_create(count, fences,
>> +				       dma_fence_context_alloc(1),
>> +				       1, false);
>> +	if (!array) {
>> +		while (count--)
>> +			dma_fence_put(fences[count]);
>> +		kfree(fences);
>> +		return -ENOMEM;
>> +	}
>> +
>> +	*fence = &array->base;
>> +	return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
>> +
>>   /**
>>    * dma_resv_wait_timeout - Wait on reservation's objects
>>    * shared and/or exclusive fences.
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index fa2002939b19..cdfbbda6f600 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>   int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>   			unsigned int *num_fences, struct dma_fence ***fences);
>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>> +			   struct dma_fence **fence);
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>>   long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>   			   unsigned long timeout);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround
  2021-12-22 21:37     ` Daniel Vetter
@ 2022-01-03 12:24       ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 12:24 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig



Am 22.12.21 um 22:37 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:58PM +0100, Christian König wrote:
>> Get the write fence using dma_resv_for_each_fence instead of accessing
>> it manually.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
>>   1 file changed, 6 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index 53e407ea4c89..7facd614e50a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -1268,6 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>>   	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
>>   		struct dma_resv *resv = e->tv.bo->base.resv;
>>   		struct dma_fence_chain *chain = e->chain;
>> +		struct dma_resv_iter cursor;
>> +		struct dma_fence *fence;
>>   
>>   		if (!chain)
>>   			continue;
>> @@ -1277,9 +1279,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>>   		 * submission in a dma_fence_chain and add it as exclusive
>>   		 * fence.
>>   		 */
>> -		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
>> -				     dma_fence_get(p->fence), 1);
>> -
>> +		dma_resv_for_each_fence(&cursor, resv, false, fence) {
>> +			break;
>> +		}
>> +		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
> Uh this needs a TODO. I'm assuming you'll fix this up later on when
> there's more than write fence, but in case of bisect or whatever this is a
> bit too clever. Like you just replace one "dig around in dma-resv
> implementation details" with one that's not even a documented interface
> :-)

Ah, yes. There is a rather big TODO just above this, but I should 
probably make that even more stronger.

>
> With an adequately loud comment added interim:
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Thanks,
Christian.

>
>>   		rcu_assign_pointer(resv->fence_excl, &chain->base);
>>   		e->chain = NULL;
>>   	}
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround
@ 2022-01-03 12:24       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-03 12:24 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media



Am 22.12.21 um 22:37 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:33:58PM +0100, Christian König wrote:
>> Get the write fence using dma_resv_for_each_fence instead of accessing
>> it manually.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
>>   1 file changed, 6 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index 53e407ea4c89..7facd614e50a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -1268,6 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>>   	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
>>   		struct dma_resv *resv = e->tv.bo->base.resv;
>>   		struct dma_fence_chain *chain = e->chain;
>> +		struct dma_resv_iter cursor;
>> +		struct dma_fence *fence;
>>   
>>   		if (!chain)
>>   			continue;
>> @@ -1277,9 +1279,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>>   		 * submission in a dma_fence_chain and add it as exclusive
>>   		 * fence.
>>   		 */
>> -		dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
>> -				     dma_fence_get(p->fence), 1);
>> -
>> +		dma_resv_for_each_fence(&cursor, resv, false, fence) {
>> +			break;
>> +		}
>> +		dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
> Uh this needs a TODO. I'm assuming you'll fix this up later on when
> there's more than write fence, but in case of bisect or whatever this is a
> bit too clever. Like you just replace one "dig around in dma-resv
> implementation details" with one that's not even a documented interface
> :-)

Ah, yes. There is a rather big TODO just above this, but I should 
probably make that even more stronger.

>
> With an adequately loud comment added interim:
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Thanks,
Christian.

>
>>   		rcu_assign_pointer(resv->fence_excl, &chain->base);
>>   		e->chain = NULL;
>>   	}
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
  2021-12-22 21:43     ` Daniel Vetter
@ 2022-01-04 15:08       ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-04 15:08 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media

Am 22.12.21 um 22:43 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
>> So far we had the approach of using a directed acyclic
>> graph with the dma_resv obj.
>>
>> This turned out to have many downsides, especially it means
>> that every single driver and user of this interface needs
>> to be aware of this restriction when adding fences. If the
>> rules for the DAG are not followed then we end up with
>> potential hard to debug memory corruption, information
>> leaks or even elephant big security holes because we allow
>> userspace to access freed up memory.
>>
>> Since we already took a step back from that by always
>> looking at all fences we now go a step further and stop
>> dropping the shared fences when a new exclusive one is
>> added.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 13 -------------
>>   1 file changed, 13 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 9acceabc9399..ecb2ff606bac 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
> No doc update at all!

Scratching my head I'm pretty sure I've updated at least the kerneldoc 
for dma_resv_add_excl_fence(). Must have gone lost in some rebase.

>
> I checked, we're not that shitty with docs,

Well I wouldn't say shitty, but they are not perfect either.

>   Minimally the DOC: section
> header and also the struct dma_resv kerneldoc. Also there's maybe more
> references and stuff I've missed on a quick look, please check for them
> (e.g. dma_buf.resv kerneldoc is rather important to keep correct too).
>
> Code itself does what it says in the commit message, but we really need
> the most accurate docs we can get for this stuff, or the confusion will
> persist :-/

Yeah completely agree, going to fix that.

Thanks,
Christian.

>
> Cheers, Daniel
>
>> @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   {
>>   	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
>> -	struct dma_resv_list *old;
>> -	u32 i = 0;
>>   
>>   	dma_resv_assert_held(obj);
>>   
>> -	old = dma_resv_shared_list(obj);
>> -	if (old)
>> -		i = old->shared_count;
>> -
>>   	dma_fence_get(fence);
>>   
>>   	write_seqcount_begin(&obj->seq);
>>   	/* write_seqcount_begin provides the necessary memory barrier */
>>   	RCU_INIT_POINTER(obj->fence_excl, fence);
>> -	if (old)
>> -		old->shared_count = 0;
>>   	write_seqcount_end(&obj->seq);
>>   
>> -	/* inplace update, no shared fences */
>> -	while (i--)
>> -		dma_fence_put(rcu_dereference_protected(old->shared[i],
>> -						dma_resv_held(obj)));
>> -
>>   	dma_fence_put(old_fence);
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
@ 2022-01-04 15:08       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-04 15:08 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig

Am 22.12.21 um 22:43 schrieb Daniel Vetter:
> On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
>> So far we had the approach of using a directed acyclic
>> graph with the dma_resv obj.
>>
>> This turned out to have many downsides, especially it means
>> that every single driver and user of this interface needs
>> to be aware of this restriction when adding fences. If the
>> rules for the DAG are not followed then we end up with
>> potential hard to debug memory corruption, information
>> leaks or even elephant big security holes because we allow
>> userspace to access freed up memory.
>>
>> Since we already took a step back from that by always
>> looking at all fences we now go a step further and stop
>> dropping the shared fences when a new exclusive one is
>> added.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 13 -------------
>>   1 file changed, 13 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 9acceabc9399..ecb2ff606bac 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
> No doc update at all!

Scratching my head I'm pretty sure I've updated at least the kerneldoc 
for dma_resv_add_excl_fence(). Must have gone lost in some rebase.

>
> I checked, we're not that shitty with docs,

Well I wouldn't say shitty, but they are not perfect either.

>   Minimally the DOC: section
> header and also the struct dma_resv kerneldoc. Also there's maybe more
> references and stuff I've missed on a quick look, please check for them
> (e.g. dma_buf.resv kerneldoc is rather important to keep correct too).
>
> Code itself does what it says in the commit message, but we really need
> the most accurate docs we can get for this stuff, or the confusion will
> persist :-/

Yeah completely agree, going to fix that.

Thanks,
Christian.

>
> Cheers, Daniel
>
>> @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
>>   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   {
>>   	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
>> -	struct dma_resv_list *old;
>> -	u32 i = 0;
>>   
>>   	dma_resv_assert_held(obj);
>>   
>> -	old = dma_resv_shared_list(obj);
>> -	if (old)
>> -		i = old->shared_count;
>> -
>>   	dma_fence_get(fence);
>>   
>>   	write_seqcount_begin(&obj->seq);
>>   	/* write_seqcount_begin provides the necessary memory barrier */
>>   	RCU_INIT_POINTER(obj->fence_excl, fence);
>> -	if (old)
>> -		old->shared_count = 0;
>>   	write_seqcount_end(&obj->seq);
>>   
>> -	/* inplace update, no shared fences */
>> -	while (i--)
>> -		dma_fence_put(rcu_dereference_protected(old->shared[i],
>> -						dma_resv_held(obj)));
>> -
>>   	dma_fence_put(old_fence);
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
  2022-01-03 10:48       ` Christian König
@ 2022-01-14 16:28         ` Daniel Vetter
  -1 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:28 UTC (permalink / raw)
  To: Christian König; +Cc: Daniel Vetter, dri-devel, linux-media, linaro-mm-sig

On Mon, Jan 03, 2022 at 11:48:25AM +0100, Christian König wrote:
> Am 22.12.21 um 22:05 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
> > > This function allows to replace fences from the shared fence list when
> > > we can gurantee that the operation represented by the original fence has
> > > finished or no accesses to the resources protected by the dma_resv
> > > object any more when the new fence finishes.
> > > 
> > > Then use this function in the amdkfd code when BOs are unmapped from the
> > > process.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
> > >   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
> > >   include/linux/dma-resv.h                      |  2 +
> > >   3 files changed, 52 insertions(+), 42 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 4deea75c0b9c..a688dbded3d3 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > > @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
> > >   }
> > >   EXPORT_SYMBOL(dma_resv_add_shared_fence);
> > > +/**
> > > + * dma_resv_replace_fences - replace fences in the dma_resv obj
> > > + * @obj: the reservation object
> > > + * @context: the context of the fences to replace
> > > + * @replacement: the new fence to use instead
> > > + *
> > > + * Replace fences with a specified context with a new fence. Only valid if the
> > > + * operation represented by the original fences is completed or has no longer
> > > + * access to the resources protected by the dma_resv object when the new fence
> > > + * completes.
> > > + */
> > > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > > +			     struct dma_fence *replacement)
> > > +{
> > > +	struct dma_resv_list *list;
> > > +	struct dma_fence *old;
> > > +	unsigned int i;
> > > +
> > > +	dma_resv_assert_held(obj);
> > > +
> > > +	write_seqcount_begin(&obj->seq);
> > > +
> > > +	old = dma_resv_excl_fence(obj);
> > > +	if (old->context == context) {
> > > +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
> > > +		dma_fence_put(old);
> > > +	}
> > > +
> > > +	list = dma_resv_shared_list(obj);
> > > +	for (i = 0; list && i < list->shared_count; ++i) {
> > > +		old = rcu_dereference_protected(list->shared[i],
> > > +						dma_resv_held(obj));
> > > +		if (old->context != context)
> > > +			continue;
> > > +
> > > +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
> > > +		dma_fence_put(old);
> > Since the fences are all guaranteed to be from the same context, maybe we
> > should have a WARN_ON(__dma_fence_is_later()); here just to be safe?
> 
> First of all happy new year!

Happy new year to you too!

Also I'm only still catching up.

> Then to answer your question, no :)
> 
> This here is the case where we replace an preemption fence with a VM page
> table update fence. So both fences are not from the same context.
> 
> But since you ask that means that I somehow need to improve the
> documentation.

Hm yeah then I'm confused, since right above you have the context check.
And I thought if the contexts are equal, then the fences must be ordered,
and since you're adding a new one it must be a later fences.

But now you're saying this is to replace a fence with a totally different
context one (which can totally make sense for the special fences compute
mode contexts all need), but then I honestly don't get why you even check
for the context.

Maybe more docs help explain what's going on, or maybe we should have the
is_later check only if the new fences is from the same context. amdkfd
might not benefit, but this is a new generic interface and other drivers
might horrendously screw this up :-) Plus then a big comment that if it's
a different fence timeline context the driver must guarantee that the new
fence is guaranteed to signal after anything we're replacing here.

I think it might also be good to just include the specific amdkfd use case
with a short intro to wth are preempt-ctx and page table fences, to
explain when this function is actually useful.

It's definitely a very special case function, and I'm worried driver
authors might come up with creative abuses for it that cause trouble.
-Daniel

> 
> Regards,
> Christian.
> 
> > 
> > With that added:
> > 
> > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > 
> > > +	}
> > > +
> > > +	write_seqcount_end(&obj->seq);
> > > +}
> > > +EXPORT_SYMBOL(dma_resv_replace_fences);
> > > +
> > >   /**
> > >    * dma_resv_add_excl_fence - Add an exclusive fence.
> > >    * @obj: the reservation object
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > index 71acd577803e..b558ef0f8c4a 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
> > >   static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
> > >   					struct amdgpu_amdkfd_fence *ef)
> > >   {
> > > -	struct dma_resv *resv = bo->tbo.base.resv;
> > > -	struct dma_resv_list *old, *new;
> > > -	unsigned int i, j, k;
> > > +	struct dma_fence *replacement;
> > >   	if (!ef)
> > >   		return -EINVAL;
> > > -	old = dma_resv_shared_list(resv);
> > > -	if (!old)
> > > -		return 0;
> > > -
> > > -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
> > > -	if (!new)
> > > -		return -ENOMEM;
> > > -
> > > -	/* Go through all the shared fences in the resevation object and sort
> > > -	 * the interesting ones to the end of the list.
> > > +	/* TODO: Instead of block before we should use the fence of the page
> > > +	 * table update and TLB flush here directly.
> > >   	 */
> > > -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
> > > -		struct dma_fence *f;
> > > -
> > > -		f = rcu_dereference_protected(old->shared[i],
> > > -					      dma_resv_held(resv));
> > > -
> > > -		if (f->context == ef->base.context)
> > > -			RCU_INIT_POINTER(new->shared[--j], f);
> > > -		else
> > > -			RCU_INIT_POINTER(new->shared[k++], f);
> > > -	}
> > > -	new->shared_max = old->shared_max;
> > > -	new->shared_count = k;
> > > -
> > > -	/* Install the new fence list, seqcount provides the barriers */
> > > -	write_seqcount_begin(&resv->seq);
> > > -	RCU_INIT_POINTER(resv->fence, new);
> > > -	write_seqcount_end(&resv->seq);
> > > -
> > > -	/* Drop the references to the removed fences or move them to ef_list */
> > > -	for (i = j; i < old->shared_count; ++i) {
> > > -		struct dma_fence *f;
> > > -
> > > -		f = rcu_dereference_protected(new->shared[i],
> > > -					      dma_resv_held(resv));
> > > -		dma_fence_put(f);
> > > -	}
> > > -	kfree_rcu(old, rcu);
> > > -
> > > +	replacement = dma_fence_get_stub();
> > > +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
> > > +				replacement);
> > > +	dma_fence_put(replacement);
> > >   	return 0;
> > >   }
> > > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > > index eebf04325b34..e0be34265eae 100644
> > > --- a/include/linux/dma-resv.h
> > > +++ b/include/linux/dma-resv.h
> > > @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
> > >   void dma_resv_fini(struct dma_resv *obj);
> > >   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> > >   void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> > > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > > +			     struct dma_fence *fence);
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> > >   int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> > >   			unsigned *pshared_count, struct dma_fence ***pshared);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 01/24] dma-buf: add dma_resv_replace_fences
@ 2022-01-14 16:28         ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:28 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Mon, Jan 03, 2022 at 11:48:25AM +0100, Christian König wrote:
> Am 22.12.21 um 22:05 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:33:48PM +0100, Christian König wrote:
> > > This function allows to replace fences from the shared fence list when
> > > we can gurantee that the operation represented by the original fence has
> > > finished or no accesses to the resources protected by the dma_resv
> > > object any more when the new fence finishes.
> > > 
> > > Then use this function in the amdkfd code when BOs are unmapped from the
> > > process.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c                    | 43 ++++++++++++++++
> > >   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 49 +++----------------
> > >   include/linux/dma-resv.h                      |  2 +
> > >   3 files changed, 52 insertions(+), 42 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 4deea75c0b9c..a688dbded3d3 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > > @@ -284,6 +284,49 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
> > >   }
> > >   EXPORT_SYMBOL(dma_resv_add_shared_fence);
> > > +/**
> > > + * dma_resv_replace_fences - replace fences in the dma_resv obj
> > > + * @obj: the reservation object
> > > + * @context: the context of the fences to replace
> > > + * @replacement: the new fence to use instead
> > > + *
> > > + * Replace fences with a specified context with a new fence. Only valid if the
> > > + * operation represented by the original fences is completed or has no longer
> > > + * access to the resources protected by the dma_resv object when the new fence
> > > + * completes.
> > > + */
> > > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > > +			     struct dma_fence *replacement)
> > > +{
> > > +	struct dma_resv_list *list;
> > > +	struct dma_fence *old;
> > > +	unsigned int i;
> > > +
> > > +	dma_resv_assert_held(obj);
> > > +
> > > +	write_seqcount_begin(&obj->seq);
> > > +
> > > +	old = dma_resv_excl_fence(obj);
> > > +	if (old->context == context) {
> > > +		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
> > > +		dma_fence_put(old);
> > > +	}
> > > +
> > > +	list = dma_resv_shared_list(obj);
> > > +	for (i = 0; list && i < list->shared_count; ++i) {
> > > +		old = rcu_dereference_protected(list->shared[i],
> > > +						dma_resv_held(obj));
> > > +		if (old->context != context)
> > > +			continue;
> > > +
> > > +		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
> > > +		dma_fence_put(old);
> > Since the fences are all guaranteed to be from the same context, maybe we
> > should have a WARN_ON(__dma_fence_is_later()); here just to be safe?
> 
> First of all happy new year!

Happy new year to you too!

Also I'm only still catching up.

> Then to answer your question, no :)
> 
> This here is the case where we replace an preemption fence with a VM page
> table update fence. So both fences are not from the same context.
> 
> But since you ask that means that I somehow need to improve the
> documentation.

Hm yeah then I'm confused, since right above you have the context check.
And I thought if the contexts are equal, then the fences must be ordered,
and since you're adding a new one it must be a later fences.

But now you're saying this is to replace a fence with a totally different
context one (which can totally make sense for the special fences compute
mode contexts all need), but then I honestly don't get why you even check
for the context.

Maybe more docs help explain what's going on, or maybe we should have the
is_later check only if the new fences is from the same context. amdkfd
might not benefit, but this is a new generic interface and other drivers
might horrendously screw this up :-) Plus then a big comment that if it's
a different fence timeline context the driver must guarantee that the new
fence is guaranteed to signal after anything we're replacing here.

I think it might also be good to just include the specific amdkfd use case
with a short intro to wth are preempt-ctx and page table fences, to
explain when this function is actually useful.

It's definitely a very special case function, and I'm worried driver
authors might come up with creative abuses for it that cause trouble.
-Daniel

> 
> Regards,
> Christian.
> 
> > 
> > With that added:
> > 
> > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > 
> > > +	}
> > > +
> > > +	write_seqcount_end(&obj->seq);
> > > +}
> > > +EXPORT_SYMBOL(dma_resv_replace_fences);
> > > +
> > >   /**
> > >    * dma_resv_add_excl_fence - Add an exclusive fence.
> > >    * @obj: the reservation object
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > index 71acd577803e..b558ef0f8c4a 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > > @@ -236,53 +236,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
> > >   static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
> > >   					struct amdgpu_amdkfd_fence *ef)
> > >   {
> > > -	struct dma_resv *resv = bo->tbo.base.resv;
> > > -	struct dma_resv_list *old, *new;
> > > -	unsigned int i, j, k;
> > > +	struct dma_fence *replacement;
> > >   	if (!ef)
> > >   		return -EINVAL;
> > > -	old = dma_resv_shared_list(resv);
> > > -	if (!old)
> > > -		return 0;
> > > -
> > > -	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
> > > -	if (!new)
> > > -		return -ENOMEM;
> > > -
> > > -	/* Go through all the shared fences in the resevation object and sort
> > > -	 * the interesting ones to the end of the list.
> > > +	/* TODO: Instead of block before we should use the fence of the page
> > > +	 * table update and TLB flush here directly.
> > >   	 */
> > > -	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
> > > -		struct dma_fence *f;
> > > -
> > > -		f = rcu_dereference_protected(old->shared[i],
> > > -					      dma_resv_held(resv));
> > > -
> > > -		if (f->context == ef->base.context)
> > > -			RCU_INIT_POINTER(new->shared[--j], f);
> > > -		else
> > > -			RCU_INIT_POINTER(new->shared[k++], f);
> > > -	}
> > > -	new->shared_max = old->shared_max;
> > > -	new->shared_count = k;
> > > -
> > > -	/* Install the new fence list, seqcount provides the barriers */
> > > -	write_seqcount_begin(&resv->seq);
> > > -	RCU_INIT_POINTER(resv->fence, new);
> > > -	write_seqcount_end(&resv->seq);
> > > -
> > > -	/* Drop the references to the removed fences or move them to ef_list */
> > > -	for (i = j; i < old->shared_count; ++i) {
> > > -		struct dma_fence *f;
> > > -
> > > -		f = rcu_dereference_protected(new->shared[i],
> > > -					      dma_resv_held(resv));
> > > -		dma_fence_put(f);
> > > -	}
> > > -	kfree_rcu(old, rcu);
> > > -
> > > +	replacement = dma_fence_get_stub();
> > > +	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
> > > +				replacement);
> > > +	dma_fence_put(replacement);
> > >   	return 0;
> > >   }
> > > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > > index eebf04325b34..e0be34265eae 100644
> > > --- a/include/linux/dma-resv.h
> > > +++ b/include/linux/dma-resv.h
> > > @@ -457,6 +457,8 @@ void dma_resv_init(struct dma_resv *obj);
> > >   void dma_resv_fini(struct dma_resv *obj);
> > >   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> > >   void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
> > > +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > > +			     struct dma_fence *fence);
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> > >   int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> > >   			unsigned *pshared_count, struct dma_fence ***pshared);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2022-01-03 11:13       ` Christian König
@ 2022-01-14 16:31         ` Daniel Vetter
  -1 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:31 UTC (permalink / raw)
  To: Christian König; +Cc: Daniel Vetter, dri-devel, linux-media, linaro-mm-sig

On Mon, Jan 03, 2022 at 12:13:41PM +0100, Christian König wrote:
> Am 22.12.21 um 22:21 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
> > > Add a function to simplify getting a single fence for all the fences in
> > > the dma_resv object.
> > > 
> > > v2: fix ref leak in error handling
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
> > >   include/linux/dma-resv.h   |  2 ++
> > >   2 files changed, 54 insertions(+)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 480c305554a1..694716a3d66d 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > > @@ -34,6 +34,7 @@
> > >    */
> > >   #include <linux/dma-resv.h>
> > > +#include <linux/dma-fence-array.h>
> > >   #include <linux/export.h>
> > >   #include <linux/mm.h>
> > >   #include <linux/sched/mm.h>
> > > @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
> > >   }
> > >   EXPORT_SYMBOL_GPL(dma_resv_get_fences);
> > > +/**
> > > + * dma_resv_get_singleton - Get a single fence for all the fences
> > > + * @obj: the reservation object
> > > + * @write: true if we should return all fences
> > > + * @fence: the resulting fence
> > > + *
> > > + * Get a single fence representing all the fences inside the resv object.
> > > + * Returns either 0 for success or -ENOMEM.
> > > + *
> > > + * Warning: This can't be used like this when adding the fence back to the resv
> > > + * object since that can lead to stack corruption when finalizing the
> > > + * dma_fence_array.
> > Uh I don't get this one? I thought the only problem with nested fences is
> > the signalling recursion, which we work around with the irq_work?
> 
> Nope, the main problem is finalizing the dma_fence_array.
> 
> E.g. imagine that you build up a chain of dma_fence_array objects like this:
> a<-b<-c<-d<-e<-f.....
> 
> With each one referencing the previous dma_fence_array and then you call
> dma_fence_put() on the last one. That in turn will cause calling
> dma_fence_put() on the previous one, which in turn will cause
> dma_fence_put() one the one before the previous one etc....
> 
> In other words you recurse because each dma_fence_array instance drops the
> last reference of it's predecessor.
> 
> What we could do is to delegate dropping the reference to the containing
> fences in a dma_fence_array as well, but that would require some changes to
> the irq_work_run_list() function to be halve way efficient.o
> 
> > Also if there's really an issue with dma_fence_array fences, then that
> > warning should be on the dma_resv kerneldoc, not somewhere hidden like
> > this. And finally I really don't see what can go wrong, sure we'll end up
> > with the same fence once in the dma_resv_list and then once more in the
> > fence array. But they're all refcounted, so really shouldn't matter.
> > 
> > The code itself looks correct, but me not understanding what even goes
> > wrong here freaks me out a bit.
> 
> Yeah, IIRC we already discussed that with Jason in length as well.
> 
> Essentially what you can't do is to put a dma_fence_array into another
> dma_fence_array without causing issues.
> 
> So I think we should maybe just add a WARN_ON() into dma_fence_array_init()
> to make sure that this never happens.

Yeah I think this would be much clearer instead of sprinkling half the
story as a scary&confusing warning over all kinds of users which
internally use dma fence arrays.

And then if it goes boom I guess we could fix it internally in
dma_fence_array_init by flattening fences down again. But only if actually
needed.

What confused me is why dma_resv is special, and from your reply it sounds
like it really isn't.
-Daniel


> 
> Regards,
> Christian.
> 
> > 
> > I guess something to figure out next year, I kinda hoped I could squeeze a
> > review in before I disappear :-/
> > -Daniel
> > 
> > > + */
> > > +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> > > +			   struct dma_fence **fence)
> > > +{
> > > +	struct dma_fence_array *array;
> > > +	struct dma_fence **fences;
> > > +	unsigned count;
> > > +	int r;
> > > +
> > > +	r = dma_resv_get_fences(obj, write, &count, &fences);
> > > +        if (r)
> > > +		return r;
> > > +
> > > +	if (count == 0) {
> > > +		*fence = NULL;
> > > +		return 0;
> > > +	}
> > > +
> > > +	if (count == 1) {
> > > +		*fence = fences[0];
> > > +		kfree(fences);
> > > +		return 0;
> > > +	}
> > > +
> > > +	array = dma_fence_array_create(count, fences,
> > > +				       dma_fence_context_alloc(1),
> > > +				       1, false);
> > > +	if (!array) {
> > > +		while (count--)
> > > +			dma_fence_put(fences[count]);
> > > +		kfree(fences);
> > > +		return -ENOMEM;
> > > +	}
> > > +
> > > +	*fence = &array->base;
> > > +	return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
> > > +
> > >   /**
> > >    * dma_resv_wait_timeout - Wait on reservation's objects
> > >    * shared and/or exclusive fences.
> > > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > > index fa2002939b19..cdfbbda6f600 100644
> > > --- a/include/linux/dma-resv.h
> > > +++ b/include/linux/dma-resv.h
> > > @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> > >   int dma_resv_get_fences(struct dma_resv *obj, bool write,
> > >   			unsigned int *num_fences, struct dma_fence ***fences);
> > > +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> > > +			   struct dma_fence **fence);
> > >   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> > >   long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> > >   			   unsigned long timeout);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
@ 2022-01-14 16:31         ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:31 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Mon, Jan 03, 2022 at 12:13:41PM +0100, Christian König wrote:
> Am 22.12.21 um 22:21 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
> > > Add a function to simplify getting a single fence for all the fences in
> > > the dma_resv object.
> > > 
> > > v2: fix ref leak in error handling
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
> > >   include/linux/dma-resv.h   |  2 ++
> > >   2 files changed, 54 insertions(+)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 480c305554a1..694716a3d66d 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > > @@ -34,6 +34,7 @@
> > >    */
> > >   #include <linux/dma-resv.h>
> > > +#include <linux/dma-fence-array.h>
> > >   #include <linux/export.h>
> > >   #include <linux/mm.h>
> > >   #include <linux/sched/mm.h>
> > > @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
> > >   }
> > >   EXPORT_SYMBOL_GPL(dma_resv_get_fences);
> > > +/**
> > > + * dma_resv_get_singleton - Get a single fence for all the fences
> > > + * @obj: the reservation object
> > > + * @write: true if we should return all fences
> > > + * @fence: the resulting fence
> > > + *
> > > + * Get a single fence representing all the fences inside the resv object.
> > > + * Returns either 0 for success or -ENOMEM.
> > > + *
> > > + * Warning: This can't be used like this when adding the fence back to the resv
> > > + * object since that can lead to stack corruption when finalizing the
> > > + * dma_fence_array.
> > Uh I don't get this one? I thought the only problem with nested fences is
> > the signalling recursion, which we work around with the irq_work?
> 
> Nope, the main problem is finalizing the dma_fence_array.
> 
> E.g. imagine that you build up a chain of dma_fence_array objects like this:
> a<-b<-c<-d<-e<-f.....
> 
> With each one referencing the previous dma_fence_array and then you call
> dma_fence_put() on the last one. That in turn will cause calling
> dma_fence_put() on the previous one, which in turn will cause
> dma_fence_put() one the one before the previous one etc....
> 
> In other words you recurse because each dma_fence_array instance drops the
> last reference of it's predecessor.
> 
> What we could do is to delegate dropping the reference to the containing
> fences in a dma_fence_array as well, but that would require some changes to
> the irq_work_run_list() function to be halve way efficient.o
> 
> > Also if there's really an issue with dma_fence_array fences, then that
> > warning should be on the dma_resv kerneldoc, not somewhere hidden like
> > this. And finally I really don't see what can go wrong, sure we'll end up
> > with the same fence once in the dma_resv_list and then once more in the
> > fence array. But they're all refcounted, so really shouldn't matter.
> > 
> > The code itself looks correct, but me not understanding what even goes
> > wrong here freaks me out a bit.
> 
> Yeah, IIRC we already discussed that with Jason in length as well.
> 
> Essentially what you can't do is to put a dma_fence_array into another
> dma_fence_array without causing issues.
> 
> So I think we should maybe just add a WARN_ON() into dma_fence_array_init()
> to make sure that this never happens.

Yeah I think this would be much clearer instead of sprinkling half the
story as a scary&confusing warning over all kinds of users which
internally use dma fence arrays.

And then if it goes boom I guess we could fix it internally in
dma_fence_array_init by flattening fences down again. But only if actually
needed.

What confused me is why dma_resv is special, and from your reply it sounds
like it really isn't.
-Daniel


> 
> Regards,
> Christian.
> 
> > 
> > I guess something to figure out next year, I kinda hoped I could squeeze a
> > review in before I disappear :-/
> > -Daniel
> > 
> > > + */
> > > +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> > > +			   struct dma_fence **fence)
> > > +{
> > > +	struct dma_fence_array *array;
> > > +	struct dma_fence **fences;
> > > +	unsigned count;
> > > +	int r;
> > > +
> > > +	r = dma_resv_get_fences(obj, write, &count, &fences);
> > > +        if (r)
> > > +		return r;
> > > +
> > > +	if (count == 0) {
> > > +		*fence = NULL;
> > > +		return 0;
> > > +	}
> > > +
> > > +	if (count == 1) {
> > > +		*fence = fences[0];
> > > +		kfree(fences);
> > > +		return 0;
> > > +	}
> > > +
> > > +	array = dma_fence_array_create(count, fences,
> > > +				       dma_fence_context_alloc(1),
> > > +				       1, false);
> > > +	if (!array) {
> > > +		while (count--)
> > > +			dma_fence_put(fences[count]);
> > > +		kfree(fences);
> > > +		return -ENOMEM;
> > > +	}
> > > +
> > > +	*fence = &array->base;
> > > +	return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
> > > +
> > >   /**
> > >    * dma_resv_wait_timeout - Wait on reservation's objects
> > >    * shared and/or exclusive fences.
> > > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > > index fa2002939b19..cdfbbda6f600 100644
> > > --- a/include/linux/dma-resv.h
> > > +++ b/include/linux/dma-resv.h
> > > @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
> > >   int dma_resv_get_fences(struct dma_resv *obj, bool write,
> > >   			unsigned int *num_fences, struct dma_fence ***fences);
> > > +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> > > +			   struct dma_fence **fence);
> > >   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> > >   long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
> > >   			   unsigned long timeout);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
  2022-01-04 15:08       ` Christian König
@ 2022-01-14 16:33         ` Daniel Vetter
  -1 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:33 UTC (permalink / raw)
  To: Christian König; +Cc: Daniel Vetter, dri-devel, linux-media, linaro-mm-sig

On Tue, Jan 04, 2022 at 04:08:20PM +0100, Christian König wrote:
> Am 22.12.21 um 22:43 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
> > > So far we had the approach of using a directed acyclic
> > > graph with the dma_resv obj.
> > > 
> > > This turned out to have many downsides, especially it means
> > > that every single driver and user of this interface needs
> > > to be aware of this restriction when adding fences. If the
> > > rules for the DAG are not followed then we end up with
> > > potential hard to debug memory corruption, information
> > > leaks or even elephant big security holes because we allow
> > > userspace to access freed up memory.
> > > 
> > > Since we already took a step back from that by always
> > > looking at all fences we now go a step further and stop
> > > dropping the shared fences when a new exclusive one is
> > > added.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c | 13 -------------
> > >   1 file changed, 13 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 9acceabc9399..ecb2ff606bac 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > No doc update at all!
> 
> Scratching my head I'm pretty sure I've updated at least the kerneldoc for
> dma_resv_add_excl_fence(). Must have gone lost in some rebase.
> 
> > 
> > I checked, we're not that shitty with docs,
> 
> Well I wouldn't say shitty, but they are not perfect either.

This was sarcasm, I meant to say that despite the struggles the docs
in-tree are pretty good nowadays. Email just sucks sometimes for
communication.

> >   Minimally the DOC: section
> > header and also the struct dma_resv kerneldoc. Also there's maybe more
> > references and stuff I've missed on a quick look, please check for them
> > (e.g. dma_buf.resv kerneldoc is rather important to keep correct too).
> > 
> > Code itself does what it says in the commit message, but we really need
> > the most accurate docs we can get for this stuff, or the confusion will
> > persist :-/
> 
> Yeah completely agree, going to fix that.

Awesome!

Cheers, Daniel

> 
> Thanks,
> Christian.
> 
> > 
> > Cheers, Daniel
> > 
> > > @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> > >   {
> > >   	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
> > > -	struct dma_resv_list *old;
> > > -	u32 i = 0;
> > >   	dma_resv_assert_held(obj);
> > > -	old = dma_resv_shared_list(obj);
> > > -	if (old)
> > > -		i = old->shared_count;
> > > -
> > >   	dma_fence_get(fence);
> > >   	write_seqcount_begin(&obj->seq);
> > >   	/* write_seqcount_begin provides the necessary memory barrier */
> > >   	RCU_INIT_POINTER(obj->fence_excl, fence);
> > > -	if (old)
> > > -		old->shared_count = 0;
> > >   	write_seqcount_end(&obj->seq);
> > > -	/* inplace update, no shared fences */
> > > -	while (i--)
> > > -		dma_fence_put(rcu_dereference_protected(old->shared[i],
> > > -						dma_resv_held(obj)));
> > > -
> > >   	dma_fence_put(old_fence);
> > >   }
> > >   EXPORT_SYMBOL(dma_resv_add_excl_fence);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object
@ 2022-01-14 16:33         ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:33 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Tue, Jan 04, 2022 at 04:08:20PM +0100, Christian König wrote:
> Am 22.12.21 um 22:43 schrieb Daniel Vetter:
> > On Tue, Dec 07, 2021 at 01:34:00PM +0100, Christian König wrote:
> > > So far we had the approach of using a directed acyclic
> > > graph with the dma_resv obj.
> > > 
> > > This turned out to have many downsides, especially it means
> > > that every single driver and user of this interface needs
> > > to be aware of this restriction when adding fences. If the
> > > rules for the DAG are not followed then we end up with
> > > potential hard to debug memory corruption, information
> > > leaks or even elephant big security holes because we allow
> > > userspace to access freed up memory.
> > > 
> > > Since we already took a step back from that by always
> > > looking at all fences we now go a step further and stop
> > > dropping the shared fences when a new exclusive one is
> > > added.
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >   drivers/dma-buf/dma-resv.c | 13 -------------
> > >   1 file changed, 13 deletions(-)
> > > 
> > > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > > index 9acceabc9399..ecb2ff606bac 100644
> > > --- a/drivers/dma-buf/dma-resv.c
> > > +++ b/drivers/dma-buf/dma-resv.c
> > No doc update at all!
> 
> Scratching my head I'm pretty sure I've updated at least the kerneldoc for
> dma_resv_add_excl_fence(). Must have gone lost in some rebase.
> 
> > 
> > I checked, we're not that shitty with docs,
> 
> Well I wouldn't say shitty, but they are not perfect either.

This was sarcasm, I meant to say that despite the struggles the docs
in-tree are pretty good nowadays. Email just sucks sometimes for
communication.

> >   Minimally the DOC: section
> > header and also the struct dma_resv kerneldoc. Also there's maybe more
> > references and stuff I've missed on a quick look, please check for them
> > (e.g. dma_buf.resv kerneldoc is rather important to keep correct too).
> > 
> > Code itself does what it says in the commit message, but we really need
> > the most accurate docs we can get for this stuff, or the confusion will
> > persist :-/
> 
> Yeah completely agree, going to fix that.

Awesome!

Cheers, Daniel

> 
> Thanks,
> Christian.
> 
> > 
> > Cheers, Daniel
> > 
> > > @@ -383,29 +383,16 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
> > >   void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> > >   {
> > >   	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
> > > -	struct dma_resv_list *old;
> > > -	u32 i = 0;
> > >   	dma_resv_assert_held(obj);
> > > -	old = dma_resv_shared_list(obj);
> > > -	if (old)
> > > -		i = old->shared_count;
> > > -
> > >   	dma_fence_get(fence);
> > >   	write_seqcount_begin(&obj->seq);
> > >   	/* write_seqcount_begin provides the necessary memory barrier */
> > >   	RCU_INIT_POINTER(obj->fence_excl, fence);
> > > -	if (old)
> > > -		old->shared_count = 0;
> > >   	write_seqcount_end(&obj->seq);
> > > -	/* inplace update, no shared fences */
> > > -	while (i--)
> > > -		dma_fence_put(rcu_dereference_protected(old->shared[i],
> > > -						dma_resv_held(obj)));
> > > -
> > >   	dma_fence_put(old_fence);
> > >   }
> > >   EXPORT_SYMBOL(dma_resv_add_excl_fence);
> > > -- 
> > > 2.25.1
> > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
  2021-12-23  9:11       ` Christian König
@ 2022-01-14 16:35         ` Daniel Vetter
  -1 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:35 UTC (permalink / raw)
  To: Christian König; +Cc: Daniel Vetter, dri-devel, linux-media, linaro-mm-sig

On Thu, Dec 23, 2021 at 10:11:20AM +0100, Christian König wrote:
> Am 22.12.21 um 23:17 schrieb Daniel Vetter:
> > On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
> > > Hi Daniel,
> > > 
> > > looks like this is going nowhere and you don't seem to have time to review.
> > > 
> > > What can we do?
> > cc more people, you didn't cc any of the driver folks :-)
> 
> Well I've CCed more people and lists and the first round of the patches.
> Just wanted to get some more comments from you first before widening the
> audience.

Ime it's good to just always spam driver authors on big stuff like this,
increases the odds more folks get involved. And in the end we need the
entire subsystem to understand this (or at least not accidentally break
the rules you roll out now like we've done in the past).

Plus you'll get the driver acks faster that way :-)
-Daniel


> > Also I did find some review before I disappeared, back on 10th Jan.
> 
> Good, then I have at least something todo for the first week on January.
> 
> Happy holidays,
> Christian.
> 
> > 
> > Cheers, Daniel
> > 
> > > Thanks,
> > > Christian.
> > > 
> > > Am 07.12.21 um 13:33 schrieb Christian König:
> > > > Hi Daniel,
> > > > 
> > > > just a gentle ping that you wanted to take a look at this.
> > > > 
> > > > Not much changed compared to the last version, only a minor bugfix in
> > > > the dma_resv_get_singleton error handling.
> > > > 
> > > > Regards,
> > > > Christian.
> > > > 
> > > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: completely rework the dma_resv semantic
@ 2022-01-14 16:35         ` Daniel Vetter
  0 siblings, 0 replies; 95+ messages in thread
From: Daniel Vetter @ 2022-01-14 16:35 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, dri-devel, linux-media

On Thu, Dec 23, 2021 at 10:11:20AM +0100, Christian König wrote:
> Am 22.12.21 um 23:17 schrieb Daniel Vetter:
> > On Fri, Dec 17, 2021 at 03:39:52PM +0100, Christian König wrote:
> > > Hi Daniel,
> > > 
> > > looks like this is going nowhere and you don't seem to have time to review.
> > > 
> > > What can we do?
> > cc more people, you didn't cc any of the driver folks :-)
> 
> Well I've CCed more people and lists and the first round of the patches.
> Just wanted to get some more comments from you first before widening the
> audience.

Ime it's good to just always spam driver authors on big stuff like this,
increases the odds more folks get involved. And in the end we need the
entire subsystem to understand this (or at least not accidentally break
the rules you roll out now like we've done in the past).

Plus you'll get the driver acks faster that way :-)
-Daniel


> > Also I did find some review before I disappeared, back on 10th Jan.
> 
> Good, then I have at least something todo for the first week on January.
> 
> Happy holidays,
> Christian.
> 
> > 
> > Cheers, Daniel
> > 
> > > Thanks,
> > > Christian.
> > > 
> > > Am 07.12.21 um 13:33 schrieb Christian König:
> > > > Hi Daniel,
> > > > 
> > > > just a gentle ping that you wanted to take a look at this.
> > > > 
> > > > Not much changed compared to the last version, only a minor bugfix in
> > > > the dma_resv_get_singleton error handling.
> > > > 
> > > > Regards,
> > > > Christian.
> > > > 
> > > > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2022-01-14 16:31         ` Daniel Vetter
@ 2022-01-17 11:26           ` Christian König
  -1 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-17 11:26 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel, linux-media, linaro-mm-sig

Am 14.01.22 um 17:31 schrieb Daniel Vetter:
> On Mon, Jan 03, 2022 at 12:13:41PM +0100, Christian König wrote:
>> Am 22.12.21 um 22:21 schrieb Daniel Vetter:
>>> On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
>>>> Add a function to simplify getting a single fence for all the fences in
>>>> the dma_resv object.
>>>>
>>>> v2: fix ref leak in error handling
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>    drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>>>>    include/linux/dma-resv.h   |  2 ++
>>>>    2 files changed, 54 insertions(+)
>>>>
>>>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>>>> index 480c305554a1..694716a3d66d 100644
>>>> --- a/drivers/dma-buf/dma-resv.c
>>>> +++ b/drivers/dma-buf/dma-resv.c
>>>> @@ -34,6 +34,7 @@
>>>>     */
>>>>    #include <linux/dma-resv.h>
>>>> +#include <linux/dma-fence-array.h>
>>>>    #include <linux/export.h>
>>>>    #include <linux/mm.h>
>>>>    #include <linux/sched/mm.h>
>>>> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>>>    }
>>>>    EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>>>> +/**
>>>> + * dma_resv_get_singleton - Get a single fence for all the fences
>>>> + * @obj: the reservation object
>>>> + * @write: true if we should return all fences
>>>> + * @fence: the resulting fence
>>>> + *
>>>> + * Get a single fence representing all the fences inside the resv object.
>>>> + * Returns either 0 for success or -ENOMEM.
>>>> + *
>>>> + * Warning: This can't be used like this when adding the fence back to the resv
>>>> + * object since that can lead to stack corruption when finalizing the
>>>> + * dma_fence_array.
>>> Uh I don't get this one? I thought the only problem with nested fences is
>>> the signalling recursion, which we work around with the irq_work?
>> Nope, the main problem is finalizing the dma_fence_array.
>>
>> E.g. imagine that you build up a chain of dma_fence_array objects like this:
>> a<-b<-c<-d<-e<-f.....
>>
>> With each one referencing the previous dma_fence_array and then you call
>> dma_fence_put() on the last one. That in turn will cause calling
>> dma_fence_put() on the previous one, which in turn will cause
>> dma_fence_put() one the one before the previous one etc....
>>
>> In other words you recurse because each dma_fence_array instance drops the
>> last reference of it's predecessor.
>>
>> What we could do is to delegate dropping the reference to the containing
>> fences in a dma_fence_array as well, but that would require some changes to
>> the irq_work_run_list() function to be halve way efficient.o
>>
>>> Also if there's really an issue with dma_fence_array fences, then that
>>> warning should be on the dma_resv kerneldoc, not somewhere hidden like
>>> this. And finally I really don't see what can go wrong, sure we'll end up
>>> with the same fence once in the dma_resv_list and then once more in the
>>> fence array. But they're all refcounted, so really shouldn't matter.
>>>
>>> The code itself looks correct, but me not understanding what even goes
>>> wrong here freaks me out a bit.
>> Yeah, IIRC we already discussed that with Jason in length as well.
>>
>> Essentially what you can't do is to put a dma_fence_array into another
>> dma_fence_array without causing issues.
>>
>> So I think we should maybe just add a WARN_ON() into dma_fence_array_init()
>> to make sure that this never happens.
> Yeah I think this would be much clearer instead of sprinkling half the
> story as a scary&confusing warning over all kinds of users which
> internally use dma fence arrays.
>
> And then if it goes boom I guess we could fix it internally in
> dma_fence_array_init by flattening fences down again. But only if actually
> needed.

Ok, going to do that first then.

>
> What confused me is why dma_resv is special, and from your reply it sounds
> like it really isn't.

Well, it isn't special in any way. It's just something very obvious 
which could go wrong.

Regards,
Christian.

> -Daniel
>
>
>> Regards,
>> Christian.
>>
>>> I guess something to figure out next year, I kinda hoped I could squeeze a
>>> review in before I disappear :-/
>>> -Daniel
>>>
>>>> + */
>>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>>>> +			   struct dma_fence **fence)
>>>> +{
>>>> +	struct dma_fence_array *array;
>>>> +	struct dma_fence **fences;
>>>> +	unsigned count;
>>>> +	int r;
>>>> +
>>>> +	r = dma_resv_get_fences(obj, write, &count, &fences);
>>>> +        if (r)
>>>> +		return r;
>>>> +
>>>> +	if (count == 0) {
>>>> +		*fence = NULL;
>>>> +		return 0;
>>>> +	}
>>>> +
>>>> +	if (count == 1) {
>>>> +		*fence = fences[0];
>>>> +		kfree(fences);
>>>> +		return 0;
>>>> +	}
>>>> +
>>>> +	array = dma_fence_array_create(count, fences,
>>>> +				       dma_fence_context_alloc(1),
>>>> +				       1, false);
>>>> +	if (!array) {
>>>> +		while (count--)
>>>> +			dma_fence_put(fences[count]);
>>>> +		kfree(fences);
>>>> +		return -ENOMEM;
>>>> +	}
>>>> +
>>>> +	*fence = &array->base;
>>>> +	return 0;
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
>>>> +
>>>>    /**
>>>>     * dma_resv_wait_timeout - Wait on reservation's objects
>>>>     * shared and/or exclusive fences.
>>>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>>>> index fa2002939b19..cdfbbda6f600 100644
>>>> --- a/include/linux/dma-resv.h
>>>> +++ b/include/linux/dma-resv.h
>>>> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>>>>    void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>>>    int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>>>    			unsigned int *num_fences, struct dma_fence ***fences);
>>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>>>> +			   struct dma_fence **fence);
>>>>    int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>>>>    long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>>>    			   unsigned long timeout);
>>>> -- 
>>>> 2.25.1
>>>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
@ 2022-01-17 11:26           ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-01-17 11:26 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media

Am 14.01.22 um 17:31 schrieb Daniel Vetter:
> On Mon, Jan 03, 2022 at 12:13:41PM +0100, Christian König wrote:
>> Am 22.12.21 um 22:21 schrieb Daniel Vetter:
>>> On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
>>>> Add a function to simplify getting a single fence for all the fences in
>>>> the dma_resv object.
>>>>
>>>> v2: fix ref leak in error handling
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>    drivers/dma-buf/dma-resv.c | 52 ++++++++++++++++++++++++++++++++++++++
>>>>    include/linux/dma-resv.h   |  2 ++
>>>>    2 files changed, 54 insertions(+)
>>>>
>>>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>>>> index 480c305554a1..694716a3d66d 100644
>>>> --- a/drivers/dma-buf/dma-resv.c
>>>> +++ b/drivers/dma-buf/dma-resv.c
>>>> @@ -34,6 +34,7 @@
>>>>     */
>>>>    #include <linux/dma-resv.h>
>>>> +#include <linux/dma-fence-array.h>
>>>>    #include <linux/export.h>
>>>>    #include <linux/mm.h>
>>>>    #include <linux/sched/mm.h>
>>>> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>>>    }
>>>>    EXPORT_SYMBOL_GPL(dma_resv_get_fences);
>>>> +/**
>>>> + * dma_resv_get_singleton - Get a single fence for all the fences
>>>> + * @obj: the reservation object
>>>> + * @write: true if we should return all fences
>>>> + * @fence: the resulting fence
>>>> + *
>>>> + * Get a single fence representing all the fences inside the resv object.
>>>> + * Returns either 0 for success or -ENOMEM.
>>>> + *
>>>> + * Warning: This can't be used like this when adding the fence back to the resv
>>>> + * object since that can lead to stack corruption when finalizing the
>>>> + * dma_fence_array.
>>> Uh I don't get this one? I thought the only problem with nested fences is
>>> the signalling recursion, which we work around with the irq_work?
>> Nope, the main problem is finalizing the dma_fence_array.
>>
>> E.g. imagine that you build up a chain of dma_fence_array objects like this:
>> a<-b<-c<-d<-e<-f.....
>>
>> With each one referencing the previous dma_fence_array and then you call
>> dma_fence_put() on the last one. That in turn will cause calling
>> dma_fence_put() on the previous one, which in turn will cause
>> dma_fence_put() one the one before the previous one etc....
>>
>> In other words you recurse because each dma_fence_array instance drops the
>> last reference of it's predecessor.
>>
>> What we could do is to delegate dropping the reference to the containing
>> fences in a dma_fence_array as well, but that would require some changes to
>> the irq_work_run_list() function to be halve way efficient.o
>>
>>> Also if there's really an issue with dma_fence_array fences, then that
>>> warning should be on the dma_resv kerneldoc, not somewhere hidden like
>>> this. And finally I really don't see what can go wrong, sure we'll end up
>>> with the same fence once in the dma_resv_list and then once more in the
>>> fence array. But they're all refcounted, so really shouldn't matter.
>>>
>>> The code itself looks correct, but me not understanding what even goes
>>> wrong here freaks me out a bit.
>> Yeah, IIRC we already discussed that with Jason in length as well.
>>
>> Essentially what you can't do is to put a dma_fence_array into another
>> dma_fence_array without causing issues.
>>
>> So I think we should maybe just add a WARN_ON() into dma_fence_array_init()
>> to make sure that this never happens.
> Yeah I think this would be much clearer instead of sprinkling half the
> story as a scary&confusing warning over all kinds of users which
> internally use dma fence arrays.
>
> And then if it goes boom I guess we could fix it internally in
> dma_fence_array_init by flattening fences down again. But only if actually
> needed.

Ok, going to do that first then.

>
> What confused me is why dma_resv is special, and from your reply it sounds
> like it really isn't.

Well, it isn't special in any way. It's just something very obvious 
which could go wrong.

Regards,
Christian.

> -Daniel
>
>
>> Regards,
>> Christian.
>>
>>> I guess something to figure out next year, I kinda hoped I could squeeze a
>>> review in before I disappear :-/
>>> -Daniel
>>>
>>>> + */
>>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>>>> +			   struct dma_fence **fence)
>>>> +{
>>>> +	struct dma_fence_array *array;
>>>> +	struct dma_fence **fences;
>>>> +	unsigned count;
>>>> +	int r;
>>>> +
>>>> +	r = dma_resv_get_fences(obj, write, &count, &fences);
>>>> +        if (r)
>>>> +		return r;
>>>> +
>>>> +	if (count == 0) {
>>>> +		*fence = NULL;
>>>> +		return 0;
>>>> +	}
>>>> +
>>>> +	if (count == 1) {
>>>> +		*fence = fences[0];
>>>> +		kfree(fences);
>>>> +		return 0;
>>>> +	}
>>>> +
>>>> +	array = dma_fence_array_create(count, fences,
>>>> +				       dma_fence_context_alloc(1),
>>>> +				       1, false);
>>>> +	if (!array) {
>>>> +		while (count--)
>>>> +			dma_fence_put(fences[count]);
>>>> +		kfree(fences);
>>>> +		return -ENOMEM;
>>>> +	}
>>>> +
>>>> +	*fence = &array->base;
>>>> +	return 0;
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
>>>> +
>>>>    /**
>>>>     * dma_resv_wait_timeout - Wait on reservation's objects
>>>>     * shared and/or exclusive fences.
>>>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>>>> index fa2002939b19..cdfbbda6f600 100644
>>>> --- a/include/linux/dma-resv.h
>>>> +++ b/include/linux/dma-resv.h
>>>> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
>>>>    void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
>>>>    int dma_resv_get_fences(struct dma_resv *obj, bool write,
>>>>    			unsigned int *num_fences, struct dma_fence ***fences);
>>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>>>> +			   struct dma_fence **fence);
>>>>    int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
>>>>    long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>>>    			   unsigned long timeout);
>>>> -- 
>>>> 2.25.1
>>>>


^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2
  2022-01-17 11:26           ` Christian König
  (?)
@ 2022-03-02 17:39           ` Jason Ekstrand
  -1 siblings, 0 replies; 95+ messages in thread
From: Jason Ekstrand @ 2022-03-02 17:39 UTC (permalink / raw)
  To: Christian König
  Cc: moderated list:DMA BUFFER SHARING FRAMEWORK,
	Maling list - DRI developers,
	open list:DMA BUFFER SHARING FRAMEWORK

[-- Attachment #1: Type: text/plain, Size: 7521 bytes --]

On Mon, Jan 17, 2022 at 5:26 AM Christian König <
ckoenig.leichtzumerken@gmail.com> wrote:

> Am 14.01.22 um 17:31 schrieb Daniel Vetter:
> > On Mon, Jan 03, 2022 at 12:13:41PM +0100, Christian König wrote:
> >> Am 22.12.21 um 22:21 schrieb Daniel Vetter:
> >>> On Tue, Dec 07, 2021 at 01:33:51PM +0100, Christian König wrote:
> >>>> Add a function to simplify getting a single fence for all the fences
> in
> >>>> the dma_resv object.
> >>>>
> >>>> v2: fix ref leak in error handling
> >>>>
> >>>> Signed-off-by: Christian König <christian.koenig@amd.com>
> >>>> ---
> >>>>    drivers/dma-buf/dma-resv.c | 52
> ++++++++++++++++++++++++++++++++++++++
> >>>>    include/linux/dma-resv.h   |  2 ++
> >>>>    2 files changed, 54 insertions(+)
> >>>>
> >>>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> >>>> index 480c305554a1..694716a3d66d 100644
> >>>> --- a/drivers/dma-buf/dma-resv.c
> >>>> +++ b/drivers/dma-buf/dma-resv.c
> >>>> @@ -34,6 +34,7 @@
> >>>>     */
> >>>>    #include <linux/dma-resv.h>
> >>>> +#include <linux/dma-fence-array.h>
> >>>>    #include <linux/export.h>
> >>>>    #include <linux/mm.h>
> >>>>    #include <linux/sched/mm.h>
> >>>> @@ -657,6 +658,57 @@ int dma_resv_get_fences(struct dma_resv *obj,
> bool write,
> >>>>    }
> >>>>    EXPORT_SYMBOL_GPL(dma_resv_get_fences);
> >>>> +/**
> >>>> + * dma_resv_get_singleton - Get a single fence for all the fences
> >>>> + * @obj: the reservation object
> >>>> + * @write: true if we should return all fences
> >>>> + * @fence: the resulting fence
> >>>> + *
> >>>> + * Get a single fence representing all the fences inside the resv
> object.
> >>>> + * Returns either 0 for success or -ENOMEM.
> >>>> + *
> >>>> + * Warning: This can't be used like this when adding the fence back
> to the resv
> >>>> + * object since that can lead to stack corruption when finalizing the
> >>>> + * dma_fence_array.
> >>> Uh I don't get this one? I thought the only problem with nested fences
> is
> >>> the signalling recursion, which we work around with the irq_work?
> >> Nope, the main problem is finalizing the dma_fence_array.
> >>
> >> E.g. imagine that you build up a chain of dma_fence_array objects like
> this:
> >> a<-b<-c<-d<-e<-f.....
> >>
> >> With each one referencing the previous dma_fence_array and then you call
> >> dma_fence_put() on the last one. That in turn will cause calling
> >> dma_fence_put() on the previous one, which in turn will cause
> >> dma_fence_put() one the one before the previous one etc....
> >>
> >> In other words you recurse because each dma_fence_array instance drops
> the
> >> last reference of it's predecessor.
> >>
> >> What we could do is to delegate dropping the reference to the containing
> >> fences in a dma_fence_array as well, but that would require some
> changes to
> >> the irq_work_run_list() function to be halve way efficient.o
> >>
> >>> Also if there's really an issue with dma_fence_array fences, then that
> >>> warning should be on the dma_resv kerneldoc, not somewhere hidden like
> >>> this. And finally I really don't see what can go wrong, sure we'll end
> up
> >>> with the same fence once in the dma_resv_list and then once more in the
> >>> fence array. But they're all refcounted, so really shouldn't matter.
> >>>
> >>> The code itself looks correct, but me not understanding what even goes
> >>> wrong here freaks me out a bit.
> >> Yeah, IIRC we already discussed that with Jason in length as well.
> >>
> >> Essentially what you can't do is to put a dma_fence_array into another
> >> dma_fence_array without causing issues.
> >>
> >> So I think we should maybe just add a WARN_ON() into
> dma_fence_array_init()
> >> to make sure that this never happens.
> > Yeah I think this would be much clearer instead of sprinkling half the
> > story as a scary&confusing warning over all kinds of users which
> > internally use dma fence arrays.
>

Agreed.  WARN_ON in dma_fence_array_init() would be better for everyone, I
think.


> > And then if it goes boom I guess we could fix it internally in
> > dma_fence_array_init by flattening fences down again. But only if
> actually
> > needed.
>
> Ok, going to do that first then.
>

Sounds good.  This patch looks pretty reasonable to me.  I do have a bit of
a concern with how it's being used to replace calls to
dma_resv_excl_fence() in later patches, though.  In particular, this may
allocate memory whereas dma_resv_excl_fence() does not so we need to be
really careful in each of the replacements that doing so is safe.  That's a
job for the per-driver reviewers but I thought I'd drop a note here so
we're all aware of and watching for it.

--Jason


> >
> > What confused me is why dma_resv is special, and from your reply it
> sounds
> > like it really isn't.
>
> Well, it isn't special in any way. It's just something very obvious
> which could go wrong.
>
> Regards,
> Christian.
>
> > -Daniel
> >
> >
> >> Regards,
> >> Christian.
> >>
> >>> I guess something to figure out next year, I kinda hoped I could
> squeeze a
> >>> review in before I disappear :-/
> >>> -Daniel
> >>>
> >>>> + */
> >>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> >>>> +                     struct dma_fence **fence)
> >>>> +{
> >>>> +  struct dma_fence_array *array;
> >>>> +  struct dma_fence **fences;
> >>>> +  unsigned count;
> >>>> +  int r;
> >>>> +
> >>>> +  r = dma_resv_get_fences(obj, write, &count, &fences);
> >>>> +        if (r)
> >>>> +          return r;
> >>>> +
> >>>> +  if (count == 0) {
> >>>> +          *fence = NULL;
> >>>> +          return 0;
> >>>> +  }
> >>>> +
> >>>> +  if (count == 1) {
> >>>> +          *fence = fences[0];
> >>>> +          kfree(fences);
> >>>> +          return 0;
> >>>> +  }
> >>>> +
> >>>> +  array = dma_fence_array_create(count, fences,
> >>>> +                                 dma_fence_context_alloc(1),
> >>>> +                                 1, false);
> >>>> +  if (!array) {
> >>>> +          while (count--)
> >>>> +                  dma_fence_put(fences[count]);
> >>>> +          kfree(fences);
> >>>> +          return -ENOMEM;
> >>>> +  }
> >>>> +
> >>>> +  *fence = &array->base;
> >>>> +  return 0;
> >>>> +}
> >>>> +EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
> >>>> +
> >>>>    /**
> >>>>     * dma_resv_wait_timeout - Wait on reservation's objects
> >>>>     * shared and/or exclusive fences.
> >>>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> >>>> index fa2002939b19..cdfbbda6f600 100644
> >>>> --- a/include/linux/dma-resv.h
> >>>> +++ b/include/linux/dma-resv.h
> >>>> @@ -438,6 +438,8 @@ void dma_resv_replace_fences(struct dma_resv
> *obj, uint64_t context,
> >>>>    void dma_resv_add_excl_fence(struct dma_resv *obj, struct
> dma_fence *fence);
> >>>>    int dma_resv_get_fences(struct dma_resv *obj, bool write,
> >>>>                            unsigned int *num_fences, struct dma_fence
> ***fences);
> >>>> +int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> >>>> +                     struct dma_fence **fence);
> >>>>    int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv
> *src);
> >>>>    long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all,
> bool intr,
> >>>>                               unsigned long timeout);
> >>>> --
> >>>> 2.25.1
> >>>>
>
>

[-- Attachment #2: Type: text/html, Size: 10310 bytes --]

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 18/24] dma-buf: add enum dma_resv_usage v3
  2021-12-22 22:00     ` Daniel Vetter
  (?)
@ 2022-03-02 17:55     ` Jason Ekstrand
  2022-03-03 13:13       ` Christian König
  -1 siblings, 1 reply; 95+ messages in thread
From: Jason Ekstrand @ 2022-03-02 17:55 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: moderated list:DMA BUFFER SHARING FRAMEWORK,
	Christian König, Maling list - DRI developers,
	open list:DMA BUFFER SHARING FRAMEWORK

[-- Attachment #1: Type: text/plain, Size: 9325 bytes --]

On Wed, Dec 22, 2021 at 4:00 PM Daniel Vetter <daniel@ffwll.ch> wrote:

> On Tue, Dec 07, 2021 at 01:34:05PM +0100, Christian König wrote:
> > This change adds the dma_resv_usage enum and allows us to specify why a
> > dma_resv object is queried for its containing fences.
> >
> > Additional to that a dma_resv_usage_rw() helper function is added to aid
> > retrieving the fences for a read or write userspace submission.
> >
> > This is then deployed to the different query functions of the dma_resv
> > object and all of their users. When the write paratermer was previously
> > true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.
> >
> > v2: add KERNEL/OTHER in separate patch
> > v3: some kerneldoc suggestions by Daniel
> >
> > Signed-off-by: Christian König <christian.koenig@amd.com>
>
> Just commenting on the kerneldoc here.
>
> > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > index ecb2ff606bac..33a17db89fb4 100644
> > --- a/drivers/dma-buf/dma-resv.c
> > +++ b/drivers/dma-buf/dma-resv.c
> > @@ -408,7 +408,7 @@ static void dma_resv_iter_restart_unlocked(struct
> dma_resv_iter *cursor)
> >         cursor->seq = read_seqcount_begin(&cursor->obj->seq);
> >         cursor->index = -1;
> >         cursor->shared_count = 0;
> > -       if (cursor->all_fences) {
> > +       if (cursor->usage >= DMA_RESV_USAGE_READ) {
>

If we're going to do this....


> >                 cursor->fences = dma_resv_shared_list(cursor->obj);
> >                 if (cursor->fences)
> >                         cursor->shared_count =
> cursor->fences->shared_count;


> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index 40ac9d486f8f..d96d8ca9af56 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -49,6 +49,49 @@ extern struct ww_class reservation_ww_class;
> >
> >  struct dma_resv_list;
> >
> > +/**
> > + * enum dma_resv_usage - how the fences from a dma_resv obj are used
> > + *
>

We probably want a note in here about the ordering of this enum.  I'm not
even sure that comparing enum values is good or that all values will have a
strict ordering that can be useful.  It would definitely make me nervous if
anything outside dma-resv.c is doing comparisons on these values.

--Jason


> > + * This enum describes the different use cases for a dma_resv object and
> > + * controls which fences are returned when queried.
>
> We need to link here to both dma_buf.resv and from there to here.
>
> Also we had a fair amount of text in the old dma_resv fields which should
> probably be included here.
>
> > + */
> > +enum dma_resv_usage {
> > +     /**
> > +      * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
> > +      *
> > +      * This should only be used for userspace command submissions
> which add
> > +      * an implicit write dependency.
> > +      */
> > +     DMA_RESV_USAGE_WRITE,
> > +
> > +     /**
> > +      * @DMA_RESV_USAGE_READ: Implicit read synchronization.
> > +      *
> > +      * This should only be used for userspace command submissions
> which add
> > +      * an implicit read dependency.
>
> I think the above would benefit from at least a link each to &dma_buf.resv
> for further discusion.
>
> Plus the READ flag needs a huge warning that in general it does _not_
> guarantee that neither there's no writes possible, nor that the writes can
> be assumed mistakes and dropped (on buffer moves e.g.).
>
> Drivers can only make further assumptions for driver-internal dma_resv
> objects (e.g. on vm/pagetables) or when the fences are all fences of the
> same driver (e.g. the special sync rules amd has that takes the fence
> owner into account).
>
> We have this documented in the dma_buf.resv rules, but since it came up
> again in a discussion with Thomas H. somewhere, it's better to hammer this
> in a few more time. Specically in generally ignoring READ fences for
> buffer moves (well the copy job, memory freeing still has to wait for all
> of them) is a correctness bug.
>
> Maybe include a big warning that really the difference between READ and
> WRITE should only matter for implicit sync, and _not_ for anything else
> the kernel does.
>
> I'm assuming the actual replacement is all mechanical, so I skipped that
> one for now, that's for next year :-)
> -Daniel
>
> > +      */
> > +     DMA_RESV_USAGE_READ,
> > +};
> > +
> > +/**
> > + * dma_resv_usage_rw - helper for implicit sync
> > + * @write: true if we create a new implicit sync write
> > + *
> > + * This returns the implicit synchronization usage for write or read
> accesses,
> > + * see enum dma_resv_usage.
> > + */
> > +static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
> > +{
> > +     /* This looks confusing at first sight, but is indeed correct.
> > +      *
> > +      * The rational is that new write operations needs to wait for the
> > +      * existing read and write operations to finish.
> > +      * But a new read operation only needs to wait for the existing
> write
> > +      * operations to finish.
> > +      */
> > +     return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
> > +}
> > +
> >  /**
> >   * struct dma_resv - a reservation object manages fences for a buffer
> >   *
> > @@ -147,8 +190,8 @@ struct dma_resv_iter {
> >       /** @obj: The dma_resv object we iterate over */
> >       struct dma_resv *obj;
> >
> > -     /** @all_fences: If all fences should be returned */
> > -     bool all_fences;
> > +     /** @usage: Controls which fences are returned */
> > +     enum dma_resv_usage usage;
> >
> >       /** @fence: the currently handled fence */
> >       struct dma_fence *fence;
> > @@ -178,14 +221,14 @@ struct dma_fence *dma_resv_iter_next(struct
> dma_resv_iter *cursor);
> >   * dma_resv_iter_begin - initialize a dma_resv_iter object
> >   * @cursor: The dma_resv_iter object to initialize
> >   * @obj: The dma_resv object which we want to iterate over
> > - * @all_fences: If all fences should be returned or just the exclusive
> one
> > + * @usage: controls which fences to include, see enum dma_resv_usage.
> >   */
> >  static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
> >                                      struct dma_resv *obj,
> > -                                    bool all_fences)
> > +                                    enum dma_resv_usage usage)
> >  {
> >       cursor->obj = obj;
> > -     cursor->all_fences = all_fences;
> > +     cursor->usage = usage;
> >       cursor->fence = NULL;
> >  }
> >
> > @@ -242,7 +285,7 @@ static inline bool dma_resv_iter_is_restarted(struct
> dma_resv_iter *cursor)
> >   * dma_resv_for_each_fence - fence iterator
> >   * @cursor: a struct dma_resv_iter pointer
> >   * @obj: a dma_resv object pointer
> > - * @all_fences: true if all fences should be returned
> > + * @usage: controls which fences to return
> >   * @fence: the current fence
> >   *
> >   * Iterate over the fences in a struct dma_resv object while holding the
> > @@ -251,8 +294,8 @@ static inline bool dma_resv_iter_is_restarted(struct
> dma_resv_iter *cursor)
> >   * valid as long as the lock is held and so no extra reference to the
> fence is
> >   * taken.
> >   */
> > -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)      \
> > -     for (dma_resv_iter_begin(cursor, obj, all_fences),      \
> > +#define dma_resv_for_each_fence(cursor, obj, usage, fence)   \
> > +     for (dma_resv_iter_begin(cursor, obj, usage),   \
> >            fence = dma_resv_iter_first(cursor); fence;        \
> >            fence = dma_resv_iter_next(cursor))
> >
> > @@ -419,14 +462,14 @@ void dma_resv_add_shared_fence(struct dma_resv
> *obj, struct dma_fence *fence);
> >  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
> >                            struct dma_fence *fence);
> >  void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence
> *fence);
> > -int dma_resv_get_fences(struct dma_resv *obj, bool write,
> > +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
> >                       unsigned int *num_fences, struct dma_fence
> ***fences);
> > -int dma_resv_get_singleton(struct dma_resv *obj, bool write,
> > +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage
> usage,
> >                          struct dma_fence **fence);
> >  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
> > -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool
> intr,
> > -                        unsigned long timeout);
> > -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
> > +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage
> usage,
> > +                        bool intr, unsigned long timeout);
> > +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage
> usage);
> >  void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
> >
> >  #endif /* _LINUX_RESERVATION_H */
> > --
> > 2.25.1
> >
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
>

[-- Attachment #2: Type: text/html, Size: 11623 bytes --]

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL
  2021-12-22 22:05     ` Daniel Vetter
  (?)
@ 2022-03-02 18:11     ` Jason Ekstrand
  2022-03-03 13:49       ` Christian König
  -1 siblings, 1 reply; 95+ messages in thread
From: Jason Ekstrand @ 2022-03-02 18:11 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: moderated list:DMA BUFFER SHARING FRAMEWORK,
	Christian König, Maling list - DRI developers,
	open list:DMA BUFFER SHARING FRAMEWORK

[-- Attachment #1: Type: text/plain, Size: 5097 bytes --]

On Wed, Dec 22, 2021 at 4:05 PM Daniel Vetter <daniel@ffwll.ch> wrote:

> On Tue, Dec 07, 2021 at 01:34:07PM +0100, Christian König wrote:
> > Add an usage for kernel submissions. Waiting for those
> > are mandatory for dynamic DMA-bufs.
> >
> > Signed-off-by: Christian König <christian.koenig@amd.com>
>
> Again just skipping to the doc bikeshedding, maybe with more cc others
> help with some code review too.
>
> >  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index 4f3a6abf43c4..29d799991496 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -54,8 +54,30 @@ struct dma_resv_list;
> >   *
> >   * This enum describes the different use cases for a dma_resv object and
> >   * controls which fences are returned when queried.
> > + *
> > + * An important fact is that there is the order KERNEL<WRITE<READ and
> > + * when the dma_resv object is asked for fences for one use case the
> fences
> > + * for the lower use case are returned as well.
> > + *
> > + * For example when asking for WRITE fences then the KERNEL fences are
> returned
> > + * as well. Similar when asked for READ fences then both WRITE and
> KERNEL
> > + * fences are returned as well.
> >   */
> >  enum dma_resv_usage {
> > +     /**
> > +      * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
> > +      *
> > +      * This should only be used for things like copying or clearing
> memory
> > +      * with a DMA hardware engine for the purpose of kernel memory
> > +      * management.
> > +      *
> > +         * Drivers *always* need to wait for those fences before
> accessing the
>

super-nit: Your whitespace is wrong here.


> s/need to/must/ to stay with usual RFC wording. It's a hard requirement or
> there's a security bug somewhere.
>

Yeah, probably.  I like *must* but that's because that's what we use in the
VK spec.  Do whatever's usual for kernel docs.

Not sure where to put this comment but I feel like the way things are
framed is a bit the wrong way around.  Specifically, I don't think we
should be talking about what fences you must wait on so much as what fences
you can safely skip.  In the previous model, the exclusive fence had to be
waited on at all times and the shared fences could be skipped unless you
were doing something that would result in a new exclusive fence.  In this
new world of "it's just a bucket of fences", we need to be very sure the
waiting is happening on the right things.  It sounds (I could be wrong)
like USAGE_KERNEL is the new exclusive fence.  If so, we need to make it
virtually impossible to ignore.

Sorry if that's a bit of a ramble.  I think what I'm saying is this:  In
whatever helpers or iterators we have, be that get_singleton or iter_begin
or whatever, we need to be sure we specify things in terms of exclusion and
not inclusion.  "Give me everything except implicit sync read fences"
rather than "give me implicit sync write fences".  If having a single,
well-ordered enum is sufficient for that, great.  If we think we'll ever
end up with something other than a strict ordering, we may need to re-think
a bit.

Concerning well-ordering... I'm a bit surprised to only see three values
here.  I expected 4:

 - kernel exclusive, used for memory moves and the like
 - kernel shared, used for "I'm using this right now, don't yank it out
from under me" which may not have any implicit sync implications whatsoever
 - implicit sync write
 - implicit sync read

If we had those four, I don't think the strict ordering works anymore.
From the POV of implicit sync, they would look at the implicit sync
read/write fences and maybe not even kernel exclusive.  From the POV of
some doing a BO move, they'd look at all of them.  From the POV of holding
on to memory while Vulkan is using it, you want to set a kernel shared
fence but it doesn't need to interact with implicit sync at all.  Am I
missing something obvious here?

--Jason



> > +      * resource protected by the dma_resv object. The only exception
> for
> > +      * that is when the resource is known to be locked down in place by
> > +      * pinning it previously.
>
> Is this true? This sounds more confusing than helpful, because afaik in
> general our pin interfaces do not block for any kernel fences. dma_buf_pin
> doesn't do that for sure. And I don't think ttm does that either.
>
> I think the only safe thing here is to state that it's safe if a) the
> resource is pinned down and b) the callers has previously waited for the
> kernel fences.
>
> I also think we should put that wait for kernel fences into dma_buf_pin(),
> but that's maybe a later patch.
> -Daniel
>
>
>
> > +      */
> > +     DMA_RESV_USAGE_KERNEL,
> > +
> >       /**
> >        * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
> >        *
> > --
> > 2.25.1
> >
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
>

[-- Attachment #2: Type: text/html, Size: 6466 bytes --]

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 18/24] dma-buf: add enum dma_resv_usage v3
  2022-03-02 17:55     ` Jason Ekstrand
@ 2022-03-03 13:13       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-03-03 13:13 UTC (permalink / raw)
  To: Jason Ekstrand, Daniel Vetter
  Cc: moderated list:DMA BUFFER SHARING FRAMEWORK,
	Maling list - DRI developers,
	open list:DMA BUFFER SHARING FRAMEWORK

[-- Attachment #1: Type: text/plain, Size: 10621 bytes --]

Am 02.03.22 um 18:55 schrieb Jason Ekstrand:
>
> On Wed, Dec 22, 2021 at 4:00 PM Daniel Vetter <daniel@ffwll.ch> wrote:
>
>     On Tue, Dec 07, 2021 at 01:34:05PM +0100, Christian König wrote:
>     > This change adds the dma_resv_usage enum and allows us to
>     specify why a
>     > dma_resv object is queried for its containing fences.
>     >
>     > Additional to that a dma_resv_usage_rw() helper function is
>     added to aid
>     > retrieving the fences for a read or write userspace submission.
>     >
>     > This is then deployed to the different query functions of the
>     dma_resv
>     > object and all of their users. When the write paratermer was
>     previously
>     > true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ
>     otherwise.
>     >
>     > v2: add KERNEL/OTHER in separate patch
>     > v3: some kerneldoc suggestions by Daniel
>     >
>     > Signed-off-by: Christian König <christian.koenig@amd.com>
>
>     Just commenting on the kerneldoc here.
>
>     > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>     > index ecb2ff606bac..33a17db89fb4 100644
>     > --- a/drivers/dma-buf/dma-resv.c
>     > +++ b/drivers/dma-buf/dma-resv.c
>     > @@ -408,7 +408,7 @@ static void
>     dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
>     >         cursor->seq = read_seqcount_begin(&cursor->obj->seq);
>     >         cursor->index = -1;
>     >         cursor->shared_count = 0;
>     > -       if (cursor->all_fences) {
>     > +       if (cursor->usage >= DMA_RESV_USAGE_READ) {
>
>
> If we're going to do this....
>
>     >                 cursor->fences = dma_resv_shared_list(cursor->obj);
>     >                 if (cursor->fences)
>     >                         cursor->shared_count =
>     cursor->fences->shared_count; 
>
>
>     > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>     > index 40ac9d486f8f..d96d8ca9af56 100644
>     > --- a/include/linux/dma-resv.h
>     > +++ b/include/linux/dma-resv.h
>     > @@ -49,6 +49,49 @@ extern struct ww_class reservation_ww_class;
>     >
>     >  struct dma_resv_list;
>     >
>     > +/**
>     > + * enum dma_resv_usage - how the fences from a dma_resv obj are
>     used
>     > + *
>
>
> We probably want a note in here about the ordering of this enum.  I'm 
> not even sure that comparing enum values is good or that all values 
> will have a strict ordering that can be useful.  It would definitely 
> make me nervous if anything outside dma-resv.c is doing comparisons on 
> these values.

Exactly that is added in the follow up patch which adds 
DMA_RESV_USAGE_KERNEL.

I've just had everything in one single patch originally.

Christian.

>
> --Jason
>
>     > + * This enum describes the different use cases for a dma_resv
>     object and
>     > + * controls which fences are returned when queried.
>
>     We need to link here to both dma_buf.resv and from there to here.
>
>     Also we had a fair amount of text in the old dma_resv fields which
>     should
>     probably be included here.
>
>     > + */
>     > +enum dma_resv_usage {
>     > +     /**
>     > +      * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
>     > +      *
>     > +      * This should only be used for userspace command
>     submissions which add
>     > +      * an implicit write dependency.
>     > +      */
>     > +     DMA_RESV_USAGE_WRITE,
>     > +
>     > +     /**
>     > +      * @DMA_RESV_USAGE_READ: Implicit read synchronization.
>     > +      *
>     > +      * This should only be used for userspace command
>     submissions which add
>     > +      * an implicit read dependency.
>
>     I think the above would benefit from at least a link each to
>     &dma_buf.resv
>     for further discusion.
>
>     Plus the READ flag needs a huge warning that in general it does _not_
>     guarantee that neither there's no writes possible, nor that the
>     writes can
>     be assumed mistakes and dropped (on buffer moves e.g.).
>
>     Drivers can only make further assumptions for driver-internal dma_resv
>     objects (e.g. on vm/pagetables) or when the fences are all fences
>     of the
>     same driver (e.g. the special sync rules amd has that takes the fence
>     owner into account).
>
>     We have this documented in the dma_buf.resv rules, but since it
>     came up
>     again in a discussion with Thomas H. somewhere, it's better to
>     hammer this
>     in a few more time. Specically in generally ignoring READ fences for
>     buffer moves (well the copy job, memory freeing still has to wait
>     for all
>     of them) is a correctness bug.
>
>     Maybe include a big warning that really the difference between
>     READ and
>     WRITE should only matter for implicit sync, and _not_ for anything
>     else
>     the kernel does.
>
>     I'm assuming the actual replacement is all mechanical, so I
>     skipped that
>     one for now, that's for next year :-)
>     -Daniel
>
>     > +      */
>     > +     DMA_RESV_USAGE_READ,
>     > +};
>     > +
>     > +/**
>     > + * dma_resv_usage_rw - helper for implicit sync
>     > + * @write: true if we create a new implicit sync write
>     > + *
>     > + * This returns the implicit synchronization usage for write or
>     read accesses,
>     > + * see enum dma_resv_usage.
>     > + */
>     > +static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
>     > +{
>     > +     /* This looks confusing at first sight, but is indeed correct.
>     > +      *
>     > +      * The rational is that new write operations needs to wait
>     for the
>     > +      * existing read and write operations to finish.
>     > +      * But a new read operation only needs to wait for the
>     existing write
>     > +      * operations to finish.
>     > +      */
>     > +     return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
>     > +}
>     > +
>     >  /**
>     >   * struct dma_resv - a reservation object manages fences for a
>     buffer
>     >   *
>     > @@ -147,8 +190,8 @@ struct dma_resv_iter {
>     >       /** @obj: The dma_resv object we iterate over */
>     >       struct dma_resv *obj;
>     >
>     > -     /** @all_fences: If all fences should be returned */
>     > -     bool all_fences;
>     > +     /** @usage: Controls which fences are returned */
>     > +     enum dma_resv_usage usage;
>     >
>     >       /** @fence: the currently handled fence */
>     >       struct dma_fence *fence;
>     > @@ -178,14 +221,14 @@ struct dma_fence
>     *dma_resv_iter_next(struct dma_resv_iter *cursor);
>     >   * dma_resv_iter_begin - initialize a dma_resv_iter object
>     >   * @cursor: The dma_resv_iter object to initialize
>     >   * @obj: The dma_resv object which we want to iterate over
>     > - * @all_fences: If all fences should be returned or just the
>     exclusive one
>     > + * @usage: controls which fences to include, see enum
>     dma_resv_usage.
>     >   */
>     >  static inline void dma_resv_iter_begin(struct dma_resv_iter
>     *cursor,
>     >                                      struct dma_resv *obj,
>     > -                                    bool all_fences)
>     > +                                    enum dma_resv_usage usage)
>     >  {
>     >       cursor->obj = obj;
>     > -     cursor->all_fences = all_fences;
>     > +     cursor->usage = usage;
>     >       cursor->fence = NULL;
>     >  }
>     >
>     > @@ -242,7 +285,7 @@ static inline bool
>     dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>     >   * dma_resv_for_each_fence - fence iterator
>     >   * @cursor: a struct dma_resv_iter pointer
>     >   * @obj: a dma_resv object pointer
>     > - * @all_fences: true if all fences should be returned
>     > + * @usage: controls which fences to return
>     >   * @fence: the current fence
>     >   *
>     >   * Iterate over the fences in a struct dma_resv object while
>     holding the
>     > @@ -251,8 +294,8 @@ static inline bool
>     dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>     >   * valid as long as the lock is held and so no extra reference
>     to the fence is
>     >   * taken.
>     >   */
>     > -#define dma_resv_for_each_fence(cursor, obj, all_fences,
>     fence)      \
>     > -     for (dma_resv_iter_begin(cursor, obj, all_fences),      \
>     > +#define dma_resv_for_each_fence(cursor, obj, usage, fence)   \
>     > +     for (dma_resv_iter_begin(cursor, obj, usage),   \
>     >            fence = dma_resv_iter_first(cursor); fence;       \
>     >            fence = dma_resv_iter_next(cursor))
>     >
>     > @@ -419,14 +462,14 @@ void dma_resv_add_shared_fence(struct
>     dma_resv *obj, struct dma_fence *fence);
>     >  void dma_resv_replace_fences(struct dma_resv *obj, uint64_t
>     context,
>     >                            struct dma_fence *fence);
>     >  void dma_resv_add_excl_fence(struct dma_resv *obj, struct
>     dma_fence *fence);
>     > -int dma_resv_get_fences(struct dma_resv *obj, bool write,
>     > +int dma_resv_get_fences(struct dma_resv *obj, enum
>     dma_resv_usage usage,
>     >                       unsigned int *num_fences, struct dma_fence
>     ***fences);
>     > -int dma_resv_get_singleton(struct dma_resv *obj, bool write,
>     > +int dma_resv_get_singleton(struct dma_resv *obj, enum
>     dma_resv_usage usage,
>     >                          struct dma_fence **fence);
>     >  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv
>     *src);
>     > -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all,
>     bool intr,
>     > -                        unsigned long timeout);
>     > -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
>     > +long dma_resv_wait_timeout(struct dma_resv *obj, enum
>     dma_resv_usage usage,
>     > +                        bool intr, unsigned long timeout);
>     > +bool dma_resv_test_signaled(struct dma_resv *obj, enum
>     dma_resv_usage usage);
>     >  void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
>     >
>     >  #endif /* _LINUX_RESERVATION_H */
>     > --
>     > 2.25.1
>     >
>
>     -- 
>     Daniel Vetter
>     Software Engineer, Intel Corporation
>     http://blog.ffwll.ch
>

[-- Attachment #2: Type: text/html, Size: 16177 bytes --]

^ permalink raw reply	[flat|nested] 95+ messages in thread

* Re: [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL
  2022-03-02 18:11     ` Jason Ekstrand
@ 2022-03-03 13:49       ` Christian König
  0 siblings, 0 replies; 95+ messages in thread
From: Christian König @ 2022-03-03 13:49 UTC (permalink / raw)
  To: Jason Ekstrand, Daniel Vetter
  Cc: moderated list:DMA BUFFER SHARING FRAMEWORK,
	Maling list - DRI developers,
	open list:DMA BUFFER SHARING FRAMEWORK

[-- Attachment #1: Type: text/plain, Size: 7605 bytes --]

Am 02.03.22 um 19:11 schrieb Jason Ekstrand:
> On Wed, Dec 22, 2021 at 4:05 PM Daniel Vetter <daniel@ffwll.ch> wrote:
>
>     On Tue, Dec 07, 2021 at 01:34:07PM +0100, Christian König wrote:
>     > Add an usage for kernel submissions. Waiting for those
>     > are mandatory for dynamic DMA-bufs.
>     >
>     > Signed-off-by: Christian König <christian.koenig@amd.com>
>
>     Again just skipping to the doc bikeshedding, maybe with more cc others
>     help with some code review too.
>
>     >  EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
>     > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>     > index 4f3a6abf43c4..29d799991496 100644
>     > --- a/include/linux/dma-resv.h
>     > +++ b/include/linux/dma-resv.h
>     > @@ -54,8 +54,30 @@ struct dma_resv_list;
>     >   *
>     >   * This enum describes the different use cases for a dma_resv
>     object and
>     >   * controls which fences are returned when queried.
>     > + *
>     > + * An important fact is that there is the order
>     KERNEL<WRITE<READ and
>     > + * when the dma_resv object is asked for fences for one use
>     case the fences
>     > + * for the lower use case are returned as well.
>     > + *
>     > + * For example when asking for WRITE fences then the KERNEL
>     fences are returned
>     > + * as well. Similar when asked for READ fences then both WRITE
>     and KERNEL
>     > + * fences are returned as well.
>     >   */
>     >  enum dma_resv_usage {
>     > +     /**
>     > +      * @DMA_RESV_USAGE_KERNEL: For in kernel memory management
>     only.
>     > +      *
>     > +      * This should only be used for things like copying or
>     clearing memory
>     > +      * with a DMA hardware engine for the purpose of kernel memory
>     > +      * management.
>     > +      *
>     > +         * Drivers *always* need to wait for those fences
>     before accessing the
>
>
> super-nit: Your whitespace is wrong here.

Fixed, thanks.

>     s/need to/must/ to stay with usual RFC wording. It's a hard
>     requirement or
>     there's a security bug somewhere.
>
>
> Yeah, probably.  I like *must* but that's because that's what we use 
> in the VK spec.  Do whatever's usual for kernel docs.

I agree, must sounds better and is already fixed.

>
> Not sure where to put this comment but I feel like the way things are 
> framed is a bit the wrong way around. Specifically, I don't think we 
> should be talking about what fences you must wait on so much as what 
> fences you can safely skip.  In the previous model, the exclusive 
> fence had to be waited on at all times and the shared fences could be 
> skipped unless you were doing something that would result in a new 
> exclusive fence.

Well exactly that's what we unfortunately didn't do, as Daniel explained 
some drivers just ignored the exclusive fence sometimes.

> In this new world of "it's just a bucket of fences", we need to be 
> very sure the waiting is happening on the right things.  It sounds (I 
> could be wrong) like USAGE_KERNEL is the new exclusive fence.  If so, 
> we need to make it virtually impossible to ignore.

Yes, exactly that's the goal here.

>
> Sorry if that's a bit of a ramble.  I think what I'm saying is this:  
> In whatever helpers or iterators we have, be that get_singleton or 
> iter_begin or whatever, we need to be sure we specify things in terms 
> of exclusion and not inclusion.  "Give me everything except implicit 
> sync read fences" rather than "give me implicit sync write fences".

Mhm, exactly that's what I tried to avoid. The basic idea here is that 
the driver and memory management components tells the framework what use 
case it has and the framework returns the appropriate fences for that.

So when the use case is mmap() the buffer on the CPU without any further 
sync (for example) you only get the kernel fences.

When the use case is you want to add a CS which is an implicit read you 
get all kernel fences plus all writers (see function dma_resv_usage_rw).

When the use case is you want to add a CS which is an implicit write you 
get all kernel fences, other writers as well as readers.

And last when you are the memory management which wants to move a buffer 
around you get everything.

>   If having a single, well-ordered enum is sufficient for that, 
> great.  If we think we'll ever end up with something other than a 
> strict ordering, we may need to re-think a bit.

I actually started with a matrix which gives you an indicator when to 
sync with what, but at least for now the well-ordered enum seems to get 
the job done as well and is far less complex.

> Concerning well-ordering... I'm a bit surprised to only see three 
> values here.  I expected 4:
>
>  - kernel exclusive, used for memory moves and the like
>  - kernel shared, used for "I'm using this right now, don't yank it 
> out from under me" which may not have any implicit sync implications 
> whatsoever
>  - implicit sync write
>  - implicit sync read

See the follow up patch which adds DMA_RESV_USAGE_BOOKKEEP. That's the 
4th one you are missing.

> If we had those four, I don't think the strict ordering works 
> anymore.  From the POV of implicit sync, they would look at the 
> implicit sync read/write fences and maybe not even kernel exclusive.  
> From the POV of some doing a BO move, they'd look at all of them.  
> From the POV of holding on to memory while Vulkan is using it, you 
> want to set a kernel shared fence but it doesn't need to interact with 
> implicit sync at all.  Am I missing something obvious here?

Yeah, sounds like you didn't looked at patch 21 :)

My thinking is more or less exactly the same. Only difference is that 
I've put the BOOKKEEP usage after the implicit read and write usages. 
This way you can keep the strict ordering since the implicit submissions 
won't ask for the BOOKKEEP usage.

The order is then KERNEL<WRITE<READ<BOOKKEEP. See the final 
documentation here as well:

  * An important fact is that there is the order 
KERNEL<WRITE<READ<BOOKKEEP and
  * when the dma_resv object is asked for fences for one use case the fences
  * for the lower use case are returned as well.
  *
  * For example when asking for WRITE fences then the KERNEL fences are 
returned
  * as well. Similar when asked for READ fences then both WRITE and KERNEL
  * fences are returned as well.

Regards,
Christian.


>
> --Jason
>
>
>     > +      * resource protected by the dma_resv object. The only
>     exception for
>     > +      * that is when the resource is known to be locked down in
>     place by
>     > +      * pinning it previously.
>
>     Is this true? This sounds more confusing than helpful, because
>     afaik in
>     general our pin interfaces do not block for any kernel fences.
>     dma_buf_pin
>     doesn't do that for sure. And I don't think ttm does that either.
>
>     I think the only safe thing here is to state that it's safe if a) the
>     resource is pinned down and b) the callers has previously waited
>     for the
>     kernel fences.
>
>     I also think we should put that wait for kernel fences into
>     dma_buf_pin(),
>     but that's maybe a later patch.
>     -Daniel
>
>
>
>     > +      */
>     > +     DMA_RESV_USAGE_KERNEL,
>     > +
>     >       /**
>     >        * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
>     >        *
>     > --
>     > 2.25.1
>     >
>
>     -- 
>     Daniel Vetter
>     Software Engineer, Intel Corporation
>     http://blog.ffwll.ch
>

[-- Attachment #2: Type: text/html, Size: 13122 bytes --]

^ permalink raw reply	[flat|nested] 95+ messages in thread

end of thread, other threads:[~2022-03-03 13:49 UTC | newest]

Thread overview: 95+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-07 12:33 completely rework the dma_resv semantic Christian König
2021-12-07 12:33 ` [PATCH 01/24] dma-buf: add dma_resv_replace_fences Christian König
2021-12-22 21:05   ` Daniel Vetter
2021-12-22 21:05     ` Daniel Vetter
2022-01-03 10:48     ` Christian König
2022-01-03 10:48       ` Christian König
2022-01-14 16:28       ` Daniel Vetter
2022-01-14 16:28         ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 02/24] dma-buf: finally make the dma_resv_list private Christian König
2021-12-22 21:08   ` Daniel Vetter
2021-12-22 21:08     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 03/24] dma-buf: drop excl_fence parameter from dma_resv_get_fences Christian König
2021-12-22 21:13   ` Daniel Vetter
2021-12-22 21:13     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 04/24] dma-buf: add dma_resv_get_singleton v2 Christian König
2021-12-22 21:21   ` Daniel Vetter
2021-12-22 21:21     ` Daniel Vetter
2022-01-03 11:13     ` Christian König
2022-01-03 11:13       ` Christian König
2022-01-14 16:31       ` Daniel Vetter
2022-01-14 16:31         ` Daniel Vetter
2022-01-17 11:26         ` Christian König
2022-01-17 11:26           ` Christian König
2022-03-02 17:39           ` Jason Ekstrand
2021-12-07 12:33 ` [PATCH 05/24] RDMA: use dma_resv_wait() instead of extracting the fence Christian König
2021-12-22 21:23   ` Daniel Vetter
2021-12-22 21:23     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 06/24] drm/etnaviv: stop using dma_resv_excl_fence Christian König
2021-12-22 21:26   ` Daniel Vetter
2021-12-22 21:26     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 07/24] drm/nouveau: " Christian König
2021-12-22 21:26   ` Daniel Vetter
2021-12-22 21:26     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 08/24] drm/vmwgfx: " Christian König
2021-12-22 21:31   ` Daniel Vetter
2021-12-22 21:31     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 09/24] drm/radeon: " Christian König
2021-12-22 21:30   ` Daniel Vetter
2021-12-22 21:30     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 10/24] drm/amdgpu: remove excl as shared workarounds Christian König
2021-12-22 21:34   ` Daniel Vetter
2021-12-22 21:34     ` Daniel Vetter
2021-12-07 12:33 ` [PATCH 11/24] drm/amdgpu: use dma_resv_for_each_fence for CS workaround Christian König
2021-12-22 21:37   ` Daniel Vetter
2021-12-22 21:37     ` Daniel Vetter
2022-01-03 12:24     ` Christian König
2022-01-03 12:24       ` Christian König
2021-12-07 12:33 ` [PATCH 12/24] dma-buf: finally make dma_resv_excl_fence private Christian König
2021-12-22 21:39   ` Daniel Vetter
2021-12-22 21:39     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 13/24] dma-buf: drop the DAG approach for the dma_resv object Christian König
2021-12-22 21:43   ` Daniel Vetter
2021-12-22 21:43     ` Daniel Vetter
2022-01-04 15:08     ` Christian König
2022-01-04 15:08       ` Christian König
2022-01-14 16:33       ` Daniel Vetter
2022-01-14 16:33         ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 14/24] dma-buf/drivers: make reserving a shared slot mandatory v2 Christian König
2021-12-22 21:50   ` Daniel Vetter
2021-12-22 21:50     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 15/24] drm: support more than one write fence in drm_gem_plane_helper_prepare_fb Christian König
2021-12-22 21:51   ` Daniel Vetter
2021-12-22 21:51     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 16/24] drm/nouveau: support more than one write fence in fenv50_wndw_prepare_fb Christian König
2021-12-22 21:52   ` Daniel Vetter
2021-12-22 21:52     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 17/24] drm/amdgpu: use dma_resv_get_singleton in amdgpu_pasid_free_cb Christian König
2021-12-22 21:53   ` Daniel Vetter
2021-12-22 21:53     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 18/24] dma-buf: add enum dma_resv_usage v3 Christian König
2021-12-22 22:00   ` Daniel Vetter
2021-12-22 22:00     ` Daniel Vetter
2022-03-02 17:55     ` Jason Ekstrand
2022-03-03 13:13       ` Christian König
2021-12-07 12:34 ` [PATCH 19/24] dma-buf: specify usage while adding fences to dma_resv obj v2 Christian König
2021-12-07 12:34 ` [PATCH 20/24] dma-buf: add DMA_RESV_USAGE_KERNEL Christian König
2021-12-22 22:05   ` Daniel Vetter
2021-12-22 22:05     ` Daniel Vetter
2022-03-02 18:11     ` Jason Ekstrand
2022-03-03 13:49       ` Christian König
2021-12-07 12:34 ` [PATCH 21/24] dma-buf: add DMA_RESV_USAGE_BOOKKEEP Christian König
2021-12-22 22:10   ` Daniel Vetter
2021-12-22 22:10     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 22/24] dma-buf: wait for map to complete for static attachments Christian König
2021-12-22 22:16   ` Daniel Vetter
2021-12-22 22:16     ` Daniel Vetter
2021-12-07 12:34 ` [PATCH 23/24] amdgpu: remove DMA-buf fence workaround Christian König
2021-12-07 12:34 ` [PATCH 24/24] drm/ttm: remove bo->moving Christian König
2021-12-17 14:39 ` completely rework the dma_resv semantic Christian König
2021-12-22 22:17   ` Daniel Vetter
2021-12-22 22:17     ` Daniel Vetter
2021-12-23  9:11     ` Christian König
2021-12-23  9:11       ` Christian König
2022-01-14 16:35       ` Daniel Vetter
2022-01-14 16:35         ` Daniel Vetter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.