All of lore.kernel.org
 help / color / mirror / Atom feed
* Deploying new iterator interface for dma-buf
@ 2021-09-17 12:34 ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Hopefully the last round for this.

Added dma_resv_iter_begin/end as requested by Daniel. Fixed a bunch of
problems pointed out by the CI systems and found a few more myselve.

Please review and/or comment,
Christian.



^ permalink raw reply	[flat|nested] 115+ messages in thread

* [Intel-gfx] Deploying new iterator interface for dma-buf
@ 2021-09-17 12:34 ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Hopefully the last round for this.

Added dma_resv_iter_begin/end as requested by Daniel. Fixed a bunch of
problems pointed out by the CI systems and found a few more myselve.

Please review and/or comment,
Christian.



^ permalink raw reply	[flat|nested] 115+ messages in thread

* [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Abstract the complexity of iterating over all the fences
in a dma_resv object.

The new loop handles the whole RCU and retry dance and
returns only fences where we can be sure we grabbed the
right one.

v2: fix accessing the shared fences while they might be freed,
    improve kerneldoc, rename _cursor to _iter, add
    dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 84 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 145 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 84fbe60629e3..3e77cad2c9d4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 }
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ * @first: if we should start over
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iterration is started over again.
+ */
+struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
+					      bool first)
+{
+	struct dma_resv *obj = cursor->obj;
+
+	first |= read_seqcount_retry(&obj->seq, cursor->seq);
+	do {
+		/* Drop the reference from the previous round */
+		dma_fence_put(cursor->fence);
+
+		cursor->is_first = first;
+		if (first) {
+			cursor->seq = read_seqcount_begin(&obj->seq);
+			cursor->index = -1;
+			cursor->fences = dma_resv_shared_list(obj);
+
+			cursor->fence = dma_resv_excl_fence(obj);
+			if (cursor->fence &&
+			    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &cursor->fence->flags))
+				cursor->fence = NULL;
+		} else {
+			cursor->fence = NULL;
+		}
+
+		if (cursor->fence) {
+			cursor->fence = dma_fence_get_rcu(cursor->fence);
+		} else if (cursor->all_fences && cursor->fences) {
+			struct dma_resv_list *fences = cursor->fences;
+
+			while (++cursor->index < fences->shared_count) {
+				cursor->fence = rcu_dereference(
+					fences->shared[cursor->index]);
+				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+					      &cursor->fence->flags))
+					break;
+			}
+			if (cursor->index < fences->shared_count)
+				cursor->fence =
+					dma_fence_get_rcu(cursor->fence);
+			else
+				cursor->fence = NULL;
+		}
+
+		/* For the eventually next round */
+		first = true;
+	} while (read_seqcount_retry(&obj->seq, cursor->seq));
+
+	return cursor->fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 9100dd3dc21f..693d16117153 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -149,6 +149,90 @@ struct dma_resv {
 	struct dma_resv_list __rcu *fence;
 };
 
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ */
+struct dma_resv_iter {
+	/** @obj: The dma_resv object we iterate over */
+	struct dma_resv *obj;
+
+	/** @all_fences: If all fences should be returned */
+	bool all_fences;
+
+	/** @fence: the currently handled fence */
+	struct dma_fence *fence;
+
+	/** @seq: sequence number to check for modifications */
+	unsigned int seq;
+
+	/** @index: index into the shared fences */
+	unsigned int index;
+
+	/** @fences: the shared fences */
+	struct dma_resv_list *fences;
+
+	/** @is_first: true if this is the first returned fence */
+	bool is_first;
+};
+
+struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
+					      bool first);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterator over
+ * @all_fences: If all fences should be returned or just the exclusive one
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+					struct dma_resv *obj,
+					bool all_fences)
+{
+	cursor->obj = obj;
+	cursor->all_fences = all_fences;
+	cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+	dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * @cursor: the cursor of the current position
+ *
+ * Returns true if the currently returned fence is the exclusive one.
+ */
+static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+{
+	return cursor->index == -1;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * dma_resv::lock. The RCU read side lock must be hold when using this, but can
+ * be dropped and re-taken as necessary inside the loop. The cursor needs to be
+ * initialized with dma_resv_iter_begin_unlocked() and cleaned up with
+ * dma_resv_iter_end_unlocked().
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
+	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
+	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Abstract the complexity of iterating over all the fences
in a dma_resv object.

The new loop handles the whole RCU and retry dance and
returns only fences where we can be sure we grabbed the
right one.

v2: fix accessing the shared fences while they might be freed,
    improve kerneldoc, rename _cursor to _iter, add
    dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 84 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 145 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 84fbe60629e3..3e77cad2c9d4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 }
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ * @first: if we should start over
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iterration is started over again.
+ */
+struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
+					      bool first)
+{
+	struct dma_resv *obj = cursor->obj;
+
+	first |= read_seqcount_retry(&obj->seq, cursor->seq);
+	do {
+		/* Drop the reference from the previous round */
+		dma_fence_put(cursor->fence);
+
+		cursor->is_first = first;
+		if (first) {
+			cursor->seq = read_seqcount_begin(&obj->seq);
+			cursor->index = -1;
+			cursor->fences = dma_resv_shared_list(obj);
+
+			cursor->fence = dma_resv_excl_fence(obj);
+			if (cursor->fence &&
+			    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &cursor->fence->flags))
+				cursor->fence = NULL;
+		} else {
+			cursor->fence = NULL;
+		}
+
+		if (cursor->fence) {
+			cursor->fence = dma_fence_get_rcu(cursor->fence);
+		} else if (cursor->all_fences && cursor->fences) {
+			struct dma_resv_list *fences = cursor->fences;
+
+			while (++cursor->index < fences->shared_count) {
+				cursor->fence = rcu_dereference(
+					fences->shared[cursor->index]);
+				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+					      &cursor->fence->flags))
+					break;
+			}
+			if (cursor->index < fences->shared_count)
+				cursor->fence =
+					dma_fence_get_rcu(cursor->fence);
+			else
+				cursor->fence = NULL;
+		}
+
+		/* For the eventually next round */
+		first = true;
+	} while (read_seqcount_retry(&obj->seq, cursor->seq));
+
+	return cursor->fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 9100dd3dc21f..693d16117153 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -149,6 +149,90 @@ struct dma_resv {
 	struct dma_resv_list __rcu *fence;
 };
 
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ */
+struct dma_resv_iter {
+	/** @obj: The dma_resv object we iterate over */
+	struct dma_resv *obj;
+
+	/** @all_fences: If all fences should be returned */
+	bool all_fences;
+
+	/** @fence: the currently handled fence */
+	struct dma_fence *fence;
+
+	/** @seq: sequence number to check for modifications */
+	unsigned int seq;
+
+	/** @index: index into the shared fences */
+	unsigned int index;
+
+	/** @fences: the shared fences */
+	struct dma_resv_list *fences;
+
+	/** @is_first: true if this is the first returned fence */
+	bool is_first;
+};
+
+struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
+					      bool first);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterator over
+ * @all_fences: If all fences should be returned or just the exclusive one
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+					struct dma_resv *obj,
+					bool all_fences)
+{
+	cursor->obj = obj;
+	cursor->all_fences = all_fences;
+	cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+	dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * @cursor: the cursor of the current position
+ *
+ * Returns true if the currently returned fence is the exclusive one.
+ */
+static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+{
+	return cursor->index == -1;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * dma_resv::lock. The RCU read side lock must be hold when using this, but can
+ * be dropped and re-taken as necessary inside the loop. The cursor needs to be
+ * initialized with dma_resv_iter_begin_unlocked() and cleaned up with
+ * dma_resv_iter_end_unlocked().
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
+	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
+	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

A simpler version of the iterator to be used when the dma_resv object is
locked.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 17 +++++++++++++++++
 2 files changed, 50 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 3e77cad2c9d4..a3c79a99fb44 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
 }
 EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
 
+/**
+ * dma_resv_iter_walk - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ * @first: if we should start over
+ *
+ * Return all the fences in the dma_resv object while holding the
+ * dma_resv::lock.
+ */
+struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
+{
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->is_first = first;
+	if (first) {
+		struct dma_fence *fence;
+
+		cursor->index = -1;
+		cursor->fences = dma_resv_shared_list(cursor->obj);
+
+		fence = dma_resv_excl_fence(cursor->obj);
+		if (fence)
+			return fence;
+	}
+
+	if (!cursor->all_fences || !cursor->fences ||
+	    ++cursor->index >= cursor->fences->shared_count)
+		return NULL;
+
+	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
+					 dma_resv_held(cursor->obj));
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 693d16117153..8c968f8c9d33 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -179,6 +179,7 @@ struct dma_resv_iter {
 
 struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
 					      bool first);
+struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
 
 /**
  * dma_resv_iter_begin - initialize a dma_resv_iter object
@@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
 	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
 	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
 
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * dma_resv::lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_walk(cursor, true); fence;	\
+	     fence = dma_resv_iter_walk(cursor, false))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

A simpler version of the iterator to be used when the dma_resv object is
locked.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 17 +++++++++++++++++
 2 files changed, 50 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 3e77cad2c9d4..a3c79a99fb44 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
 }
 EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
 
+/**
+ * dma_resv_iter_walk - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ * @first: if we should start over
+ *
+ * Return all the fences in the dma_resv object while holding the
+ * dma_resv::lock.
+ */
+struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
+{
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->is_first = first;
+	if (first) {
+		struct dma_fence *fence;
+
+		cursor->index = -1;
+		cursor->fences = dma_resv_shared_list(cursor->obj);
+
+		fence = dma_resv_excl_fence(cursor->obj);
+		if (fence)
+			return fence;
+	}
+
+	if (!cursor->all_fences || !cursor->fences ||
+	    ++cursor->index >= cursor->fences->shared_count)
+		return NULL;
+
+	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
+					 dma_resv_held(cursor->obj));
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 693d16117153..8c968f8c9d33 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -179,6 +179,7 @@ struct dma_resv_iter {
 
 struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
 					      bool first);
+struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
 
 /**
  * dma_resv_iter_begin - initialize a dma_resv_iter object
@@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
 	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
 	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
 
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * dma_resv::lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_walk(cursor, true); fence;	\
+	     fence = dma_resv_iter_walk(cursor, false))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled else where.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
 1 file changed, 35 insertions(+), 51 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a3c79a99fb44..406150dea5e4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
  */
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
-	struct dma_resv_list *src_list, *dst_list;
-	struct dma_fence *old, *new;
-	unsigned int i;
+	struct dma_resv_iter cursor;
+	struct dma_resv_list *list;
+	struct dma_fence *f, *excl;
 
 	dma_resv_assert_held(dst);
 
-	rcu_read_lock();
-	src_list = dma_resv_shared_list(src);
-
-retry:
-	if (src_list) {
-		unsigned int shared_count = src_list->shared_count;
-
-		rcu_read_unlock();
+	list = NULL;
+	excl = NULL;
 
-		dst_list = dma_resv_list_alloc(shared_count);
-		if (!dst_list)
-			return -ENOMEM;
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, src, true);
+	dma_resv_for_each_fence_unlocked(&cursor, f) {
 
-		rcu_read_lock();
-		src_list = dma_resv_shared_list(src);
-		if (!src_list || src_list->shared_count > shared_count) {
-			kfree(dst_list);
-			goto retry;
-		}
+		if (cursor.is_first) {
+			dma_resv_list_free(list);
+			dma_fence_put(excl);
 
-		dst_list->shared_count = 0;
-		for (i = 0; i < src_list->shared_count; ++i) {
-			struct dma_fence __rcu **dst;
-			struct dma_fence *fence;
+			if (cursor.fences) {
+				unsigned int cnt = cursor.fences->shared_count;
 
-			fence = rcu_dereference(src_list->shared[i]);
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &fence->flags))
-				continue;
+				rcu_read_unlock();
+				list = dma_resv_list_alloc(cnt);
+				if (!list) {
+					dma_resv_iter_end(&cursor);
+					return -ENOMEM;
+				}
 
-			if (!dma_fence_get_rcu(fence)) {
-				dma_resv_list_free(dst_list);
-				src_list = dma_resv_shared_list(src);
-				goto retry;
-			}
+				list->shared_count = 0;
+				rcu_read_lock();
 
-			if (dma_fence_is_signaled(fence)) {
-				dma_fence_put(fence);
-				continue;
+			} else {
+				list = NULL;
 			}
-
-			dst = &dst_list->shared[dst_list->shared_count++];
-			rcu_assign_pointer(*dst, fence);
+			excl = NULL;
 		}
-	} else {
-		dst_list = NULL;
-	}
 
-	new = dma_fence_get_rcu_safe(&src->fence_excl);
+		dma_fence_get(f);
+		if (dma_resv_iter_is_exclusive(&cursor))
+			excl = f;
+		else
+			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 
-	src_list = dma_resv_shared_list(dst);
-	old = dma_resv_excl_fence(dst);
-
 	write_seqcount_begin(&dst->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
-	RCU_INIT_POINTER(dst->fence_excl, new);
-	RCU_INIT_POINTER(dst->fence, dst_list);
+	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
 	write_seqcount_end(&dst->seq);
 
-	dma_resv_list_free(src_list);
-	dma_fence_put(old);
+	dma_resv_list_free(list);
+	dma_fence_put(excl);
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled else where.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
 1 file changed, 35 insertions(+), 51 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a3c79a99fb44..406150dea5e4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
  */
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
-	struct dma_resv_list *src_list, *dst_list;
-	struct dma_fence *old, *new;
-	unsigned int i;
+	struct dma_resv_iter cursor;
+	struct dma_resv_list *list;
+	struct dma_fence *f, *excl;
 
 	dma_resv_assert_held(dst);
 
-	rcu_read_lock();
-	src_list = dma_resv_shared_list(src);
-
-retry:
-	if (src_list) {
-		unsigned int shared_count = src_list->shared_count;
-
-		rcu_read_unlock();
+	list = NULL;
+	excl = NULL;
 
-		dst_list = dma_resv_list_alloc(shared_count);
-		if (!dst_list)
-			return -ENOMEM;
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, src, true);
+	dma_resv_for_each_fence_unlocked(&cursor, f) {
 
-		rcu_read_lock();
-		src_list = dma_resv_shared_list(src);
-		if (!src_list || src_list->shared_count > shared_count) {
-			kfree(dst_list);
-			goto retry;
-		}
+		if (cursor.is_first) {
+			dma_resv_list_free(list);
+			dma_fence_put(excl);
 
-		dst_list->shared_count = 0;
-		for (i = 0; i < src_list->shared_count; ++i) {
-			struct dma_fence __rcu **dst;
-			struct dma_fence *fence;
+			if (cursor.fences) {
+				unsigned int cnt = cursor.fences->shared_count;
 
-			fence = rcu_dereference(src_list->shared[i]);
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &fence->flags))
-				continue;
+				rcu_read_unlock();
+				list = dma_resv_list_alloc(cnt);
+				if (!list) {
+					dma_resv_iter_end(&cursor);
+					return -ENOMEM;
+				}
 
-			if (!dma_fence_get_rcu(fence)) {
-				dma_resv_list_free(dst_list);
-				src_list = dma_resv_shared_list(src);
-				goto retry;
-			}
+				list->shared_count = 0;
+				rcu_read_lock();
 
-			if (dma_fence_is_signaled(fence)) {
-				dma_fence_put(fence);
-				continue;
+			} else {
+				list = NULL;
 			}
-
-			dst = &dst_list->shared[dst_list->shared_count++];
-			rcu_assign_pointer(*dst, fence);
+			excl = NULL;
 		}
-	} else {
-		dst_list = NULL;
-	}
 
-	new = dma_fence_get_rcu_safe(&src->fence_excl);
+		dma_fence_get(f);
+		if (dma_resv_iter_is_exclusive(&cursor))
+			excl = f;
+		else
+			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 
-	src_list = dma_resv_shared_list(dst);
-	old = dma_resv_excl_fence(dst);
-
 	write_seqcount_begin(&dst->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
-	RCU_INIT_POINTER(dst->fence_excl, new);
-	RCU_INIT_POINTER(dst->fence, dst_list);
+	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
 	write_seqcount_end(&dst->seq);
 
-	dma_resv_list_free(src_list);
-	dma_fence_put(old);
+	dma_resv_list_free(list);
+	dma_fence_put(excl);
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 04/26] dma-buf: use new iterator in dma_resv_get_fences v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

v2: use sizeof(void*) instead

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 112 +++++++++++++------------------------
 1 file changed, 40 insertions(+), 72 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 406150dea5e4..9b90bd9ac018 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -487,99 +487,67 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
  * the required size, and must be freed by caller)
  *
  * Retrieve all fences from the reservation object. If the pointer for the
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
-			unsigned int *pshared_count,
-			struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+			unsigned int *shared_count, struct dma_fence ***shared)
 {
-	struct dma_fence **shared = NULL;
-	struct dma_fence *fence_excl;
-	unsigned int shared_count;
-	int ret = 1;
-
-	do {
-		struct dma_resv_list *fobj;
-		unsigned int i, seq;
-		size_t sz = 0;
-
-		shared_count = i = 0;
-
-		rcu_read_lock();
-		seq = read_seqcount_begin(&obj->seq);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 
-		fence_excl = dma_resv_excl_fence(obj);
-		if (fence_excl && !dma_fence_get_rcu(fence_excl))
-			goto unlock;
+	*shared_count = 0;
+	*shared = NULL;
 
-		fobj = dma_resv_shared_list(obj);
-		if (fobj)
-			sz += sizeof(*shared) * fobj->shared_max;
+	if (fence_excl)
+		*fence_excl = NULL;
 
-		if (!pfence_excl && fence_excl)
-			sz += sizeof(*shared);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
-		if (sz) {
-			struct dma_fence **nshared;
+		if (cursor.is_first) {
+			unsigned int count;
 
-			nshared = krealloc(shared, sz,
-					   GFP_NOWAIT | __GFP_NOWARN);
-			if (!nshared) {
-				rcu_read_unlock();
+			while (*shared_count)
+				dma_fence_put((*shared)[--(*shared_count)]);
 
-				dma_fence_put(fence_excl);
-				fence_excl = NULL;
+			if (fence_excl)
+				dma_fence_put(*fence_excl);
 
-				nshared = krealloc(shared, sz, GFP_KERNEL);
-				if (nshared) {
-					shared = nshared;
-					continue;
-				}
+			count = cursor.fences ? cursor.fences->shared_count : 0;
+			count += fence_excl ? 0 : 1;
+			rcu_read_unlock();
 
-				ret = -ENOMEM;
-				break;
-			}
-			shared = nshared;
-			shared_count = fobj ? fobj->shared_count : 0;
-			for (i = 0; i < shared_count; ++i) {
-				shared[i] = rcu_dereference(fobj->shared[i]);
-				if (!dma_fence_get_rcu(shared[i]))
-					break;
+			/* Eventually re-allocate the array */
+			*shared = krealloc_array(*shared, count,
+						 sizeof(void *),
+						 GFP_KERNEL);
+			if (count && !*shared) {
+				dma_resv_iter_end(&cursor);
+				return -ENOMEM;
 			}
+			rcu_read_lock();
 		}
 
-		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
-			while (i--)
-				dma_fence_put(shared[i]);
-			dma_fence_put(fence_excl);
-			goto unlock;
-		}
-
-		ret = 0;
-unlock:
-		rcu_read_unlock();
-	} while (ret);
-
-	if (pfence_excl)
-		*pfence_excl = fence_excl;
-	else if (fence_excl)
-		shared[shared_count++] = fence_excl;
+		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+			*fence_excl = fence;
+		else
+			(*shared)[(*shared_count)++] = fence;
 
-	if (!shared_count) {
-		kfree(shared);
-		shared = NULL;
+		/* Don't drop the reference */
+		fence = NULL;
 	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
-	*pshared_count = shared_count;
-	*pshared = shared;
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 04/26] dma-buf: use new iterator in dma_resv_get_fences v2
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

v2: use sizeof(void*) instead

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 112 +++++++++++++------------------------
 1 file changed, 40 insertions(+), 72 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 406150dea5e4..9b90bd9ac018 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -487,99 +487,67 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
  * the required size, and must be freed by caller)
  *
  * Retrieve all fences from the reservation object. If the pointer for the
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
-			unsigned int *pshared_count,
-			struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+			unsigned int *shared_count, struct dma_fence ***shared)
 {
-	struct dma_fence **shared = NULL;
-	struct dma_fence *fence_excl;
-	unsigned int shared_count;
-	int ret = 1;
-
-	do {
-		struct dma_resv_list *fobj;
-		unsigned int i, seq;
-		size_t sz = 0;
-
-		shared_count = i = 0;
-
-		rcu_read_lock();
-		seq = read_seqcount_begin(&obj->seq);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 
-		fence_excl = dma_resv_excl_fence(obj);
-		if (fence_excl && !dma_fence_get_rcu(fence_excl))
-			goto unlock;
+	*shared_count = 0;
+	*shared = NULL;
 
-		fobj = dma_resv_shared_list(obj);
-		if (fobj)
-			sz += sizeof(*shared) * fobj->shared_max;
+	if (fence_excl)
+		*fence_excl = NULL;
 
-		if (!pfence_excl && fence_excl)
-			sz += sizeof(*shared);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 
-		if (sz) {
-			struct dma_fence **nshared;
+		if (cursor.is_first) {
+			unsigned int count;
 
-			nshared = krealloc(shared, sz,
-					   GFP_NOWAIT | __GFP_NOWARN);
-			if (!nshared) {
-				rcu_read_unlock();
+			while (*shared_count)
+				dma_fence_put((*shared)[--(*shared_count)]);
 
-				dma_fence_put(fence_excl);
-				fence_excl = NULL;
+			if (fence_excl)
+				dma_fence_put(*fence_excl);
 
-				nshared = krealloc(shared, sz, GFP_KERNEL);
-				if (nshared) {
-					shared = nshared;
-					continue;
-				}
+			count = cursor.fences ? cursor.fences->shared_count : 0;
+			count += fence_excl ? 0 : 1;
+			rcu_read_unlock();
 
-				ret = -ENOMEM;
-				break;
-			}
-			shared = nshared;
-			shared_count = fobj ? fobj->shared_count : 0;
-			for (i = 0; i < shared_count; ++i) {
-				shared[i] = rcu_dereference(fobj->shared[i]);
-				if (!dma_fence_get_rcu(shared[i]))
-					break;
+			/* Eventually re-allocate the array */
+			*shared = krealloc_array(*shared, count,
+						 sizeof(void *),
+						 GFP_KERNEL);
+			if (count && !*shared) {
+				dma_resv_iter_end(&cursor);
+				return -ENOMEM;
 			}
+			rcu_read_lock();
 		}
 
-		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
-			while (i--)
-				dma_fence_put(shared[i]);
-			dma_fence_put(fence_excl);
-			goto unlock;
-		}
-
-		ret = 0;
-unlock:
-		rcu_read_unlock();
-	} while (ret);
-
-	if (pfence_excl)
-		*pfence_excl = fence_excl;
-	else if (fence_excl)
-		shared[shared_count++] = fence_excl;
+		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+			*fence_excl = fence;
+		else
+			(*shared)[(*shared_count)++] = fence;
 
-	if (!shared_count) {
-		kfree(shared);
-		shared = NULL;
+		/* Don't drop the reference */
+		fence = NULL;
 	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
-	*pshared_count = shared_count;
-	*pshared = shared;
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
 1 file changed, 10 insertions(+), 58 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 9b90bd9ac018..c7db553ab115 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 			   unsigned long timeout)
 {
 	long ret = timeout ? timeout : 1;
-	unsigned int seq, shared_count;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	int i;
 
-retry:
-	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
 	rcu_read_lock();
-	i = -1;
-
-	fence = dma_resv_excl_fence(obj);
-	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		if (!dma_fence_get_rcu(fence))
-			goto unlock_retry;
+	dma_resv_iter_begin(&cursor, obj, wait_all);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
 
-		if (dma_fence_is_signaled(fence)) {
-			dma_fence_put(fence);
-			fence = NULL;
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		if (ret <= 0) {
+			dma_resv_iter_end(&cursor);
+			return ret;
 		}
 
-	} else {
-		fence = NULL;
-	}
-
-	if (wait_all) {
-		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
-		if (fobj)
-			shared_count = fobj->shared_count;
-
-		for (i = 0; !fence && i < shared_count; ++i) {
-			struct dma_fence *lfence;
-
-			lfence = rcu_dereference(fobj->shared[i]);
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &lfence->flags))
-				continue;
-
-			if (!dma_fence_get_rcu(lfence))
-				goto unlock_retry;
-
-			if (dma_fence_is_signaled(lfence)) {
-				dma_fence_put(lfence);
-				continue;
-			}
-
-			fence = lfence;
-			break;
-		}
+		rcu_read_lock();
 	}
-
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-	if (fence) {
-		if (read_seqcount_retry(&obj->seq, seq)) {
-			dma_fence_put(fence);
-			goto retry;
-		}
 
-		ret = dma_fence_wait_timeout(fence, intr, ret);
-		dma_fence_put(fence);
-		if (ret > 0 && wait_all && (i + 1 < shared_count))
-			goto retry;
-	}
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
 1 file changed, 10 insertions(+), 58 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 9b90bd9ac018..c7db553ab115 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 			   unsigned long timeout)
 {
 	long ret = timeout ? timeout : 1;
-	unsigned int seq, shared_count;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	int i;
 
-retry:
-	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
 	rcu_read_lock();
-	i = -1;
-
-	fence = dma_resv_excl_fence(obj);
-	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		if (!dma_fence_get_rcu(fence))
-			goto unlock_retry;
+	dma_resv_iter_begin(&cursor, obj, wait_all);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
 
-		if (dma_fence_is_signaled(fence)) {
-			dma_fence_put(fence);
-			fence = NULL;
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		if (ret <= 0) {
+			dma_resv_iter_end(&cursor);
+			return ret;
 		}
 
-	} else {
-		fence = NULL;
-	}
-
-	if (wait_all) {
-		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
-		if (fobj)
-			shared_count = fobj->shared_count;
-
-		for (i = 0; !fence && i < shared_count; ++i) {
-			struct dma_fence *lfence;
-
-			lfence = rcu_dereference(fobj->shared[i]);
-			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-				     &lfence->flags))
-				continue;
-
-			if (!dma_fence_get_rcu(lfence))
-				goto unlock_retry;
-
-			if (dma_fence_is_signaled(lfence)) {
-				dma_fence_put(lfence);
-				continue;
-			}
-
-			fence = lfence;
-			break;
-		}
+		rcu_read_lock();
 	}
-
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-	if (fence) {
-		if (read_seqcount_retry(&obj->seq, seq)) {
-			dma_fence_put(fence);
-			goto retry;
-		}
 
-		ret = dma_fence_wait_timeout(fence, intr, ret);
-		dma_fence_put(fence);
-		if (ret > 0 && wait_all && (i + 1 < shared_count))
-			goto retry;
-	}
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 06/26] dma-buf: use new iterator in dma_resv_test_signaled
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 56 ++++++--------------------------------
 1 file changed, 9 insertions(+), 47 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index c7db553ab115..d8f428ddaedd 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -593,22 +593,6 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
 
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
-	struct dma_fence *fence, *lfence = passed_fence;
-	int ret = 1;
-
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		fence = dma_fence_get_rcu(lfence);
-		if (!fence)
-			return -1;
-
-		ret = !!dma_fence_is_signaled(fence);
-		dma_fence_put(fence);
-	}
-	return ret;
-}
-
 /**
  * dma_resv_test_signaled - Test if a reservation object's fences have been
  * signaled.
@@ -625,43 +609,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  */
 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 {
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	unsigned int seq;
-	int ret;
 
 	rcu_read_lock();
-retry:
-	ret = true;
-	seq = read_seqcount_begin(&obj->seq);
-
-	if (test_all) {
-		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-		unsigned int i, shared_count;
-
-		shared_count = fobj ? fobj->shared_count : 0;
-		for (i = 0; i < shared_count; ++i) {
-			fence = rcu_dereference(fobj->shared[i]);
-			ret = dma_resv_test_signaled_single(fence);
-			if (ret < 0)
-				goto retry;
-			else if (!ret)
-				break;
+	dma_resv_iter_begin(&cursor, obj, test_all);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (!dma_fence_is_signaled(fence)) {
+			dma_resv_iter_end(&cursor);
+			rcu_read_unlock();
+			return false;
 		}
 	}
-
-	fence = dma_resv_excl_fence(obj);
-	if (ret && fence) {
-		ret = dma_resv_test_signaled_single(fence);
-		if (ret < 0)
-			goto retry;
-
-	}
-
-	if (read_seqcount_retry(&obj->seq, seq))
-		goto retry;
-
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-	return ret;
+	return true;
 }
 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 06/26] dma-buf: use new iterator in dma_resv_test_signaled
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 56 ++++++--------------------------------
 1 file changed, 9 insertions(+), 47 deletions(-)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index c7db553ab115..d8f428ddaedd 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -593,22 +593,6 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
 
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
-	struct dma_fence *fence, *lfence = passed_fence;
-	int ret = 1;
-
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		fence = dma_fence_get_rcu(lfence);
-		if (!fence)
-			return -1;
-
-		ret = !!dma_fence_is_signaled(fence);
-		dma_fence_put(fence);
-	}
-	return ret;
-}
-
 /**
  * dma_resv_test_signaled - Test if a reservation object's fences have been
  * signaled.
@@ -625,43 +609,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  */
 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 {
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	unsigned int seq;
-	int ret;
 
 	rcu_read_lock();
-retry:
-	ret = true;
-	seq = read_seqcount_begin(&obj->seq);
-
-	if (test_all) {
-		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-		unsigned int i, shared_count;
-
-		shared_count = fobj ? fobj->shared_count : 0;
-		for (i = 0; i < shared_count; ++i) {
-			fence = rcu_dereference(fobj->shared[i]);
-			ret = dma_resv_test_signaled_single(fence);
-			if (ret < 0)
-				goto retry;
-			else if (!ret)
-				break;
+	dma_resv_iter_begin(&cursor, obj, test_all);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (!dma_fence_is_signaled(fence)) {
+			dma_resv_iter_end(&cursor);
+			rcu_read_unlock();
+			return false;
 		}
 	}
-
-	fence = dma_resv_excl_fence(obj);
-	if (ret && fence) {
-		ret = dma_resv_test_signaled_single(fence);
-		if (ret < 0)
-			goto retry;
-
-	}
-
-	if (read_seqcount_retry(&obj->seq, seq))
-		goto retry;
-
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-	return ret;
+	return true;
 }
 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 07/26] drm/ttm: use the new iterator in ttm_bo_flush_all_fences
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This is probably a fix since we didn't even grabed a reference to the
fences.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b22c0013dbf..7d804c0c69b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -269,22 +269,16 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
 	struct dma_resv *resv = &bo->base._resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	int i;
 
 	rcu_read_lock();
-	fobj = dma_resv_shared_list(resv);
-	fence = dma_resv_excl_fence(resv);
-	if (fence && !fence->ops->signaled)
-		dma_fence_enable_sw_signaling(fence);
-
-	for (i = 0; fobj && i < fobj->shared_count; ++i) {
-		fence = rcu_dereference(fobj->shared[i]);
-
+	dma_resv_iter_begin(&cursor, resv, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
 	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 07/26] drm/ttm: use the new iterator in ttm_bo_flush_all_fences
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This is probably a fix since we didn't even grabed a reference to the
fences.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b22c0013dbf..7d804c0c69b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -269,22 +269,16 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
 	struct dma_resv *resv = &bo->base._resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
-	int i;
 
 	rcu_read_lock();
-	fobj = dma_resv_shared_list(resv);
-	fence = dma_resv_excl_fence(resv);
-	if (fence && !fence->ops->signaled)
-		dma_fence_enable_sw_signaling(fence);
-
-	for (i = 0; fobj && i < fobj->shared_count; ++i) {
-		fence = rcu_dereference(fobj->shared[i]);
-
+	dma_resv_iter_begin(&cursor, resv, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
 	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 08/26] drm/amdgpu: use the new iterator in amdgpu_sync_resv
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 44 ++++++++----------------
 1 file changed, 14 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 862eb3c1c4c5..f7d8487799b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
 		     void *owner)
 {
-	struct dma_resv_list *flist;
+	struct dma_resv_iter cursor;
 	struct dma_fence *f;
-	unsigned i;
-	int r = 0;
+	int r;
 
 	if (resv == NULL)
 		return -EINVAL;
 
-	/* always sync to the exclusive fence */
-	f = dma_resv_excl_fence(resv);
-	dma_fence_chain_for_each(f, f) {
-		struct dma_fence_chain *chain = to_dma_fence_chain(f);
-
-		if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
-					   chain->fence : f)) {
-			r = amdgpu_sync_fence(sync, f);
-			dma_fence_put(f);
-			if (r)
-				return r;
-			break;
-		}
-	}
-
-	flist = dma_resv_shared_list(resv);
-	if (!flist)
-		return 0;
-
-	for (i = 0; i < flist->shared_count; ++i) {
-		f = rcu_dereference_protected(flist->shared[i],
-					      dma_resv_held(resv));
-
-		if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
-			r = amdgpu_sync_fence(sync, f);
-			if (r)
-				return r;
+	dma_resv_for_each_fence(&cursor, resv, true, f) {
+		dma_fence_chain_for_each(f, f) {
+			struct dma_fence_chain *chain = to_dma_fence_chain(f);
+
+			if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
+						   chain->fence : f)) {
+				r = amdgpu_sync_fence(sync, f);
+				dma_fence_put(f);
+				if (r)
+					return r;
+				break;
+			}
 		}
 	}
 	return 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 08/26] drm/amdgpu: use the new iterator in amdgpu_sync_resv
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 44 ++++++++----------------
 1 file changed, 14 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 862eb3c1c4c5..f7d8487799b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
 		     void *owner)
 {
-	struct dma_resv_list *flist;
+	struct dma_resv_iter cursor;
 	struct dma_fence *f;
-	unsigned i;
-	int r = 0;
+	int r;
 
 	if (resv == NULL)
 		return -EINVAL;
 
-	/* always sync to the exclusive fence */
-	f = dma_resv_excl_fence(resv);
-	dma_fence_chain_for_each(f, f) {
-		struct dma_fence_chain *chain = to_dma_fence_chain(f);
-
-		if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
-					   chain->fence : f)) {
-			r = amdgpu_sync_fence(sync, f);
-			dma_fence_put(f);
-			if (r)
-				return r;
-			break;
-		}
-	}
-
-	flist = dma_resv_shared_list(resv);
-	if (!flist)
-		return 0;
-
-	for (i = 0; i < flist->shared_count; ++i) {
-		f = rcu_dereference_protected(flist->shared[i],
-					      dma_resv_held(resv));
-
-		if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
-			r = amdgpu_sync_fence(sync, f);
-			if (r)
-				return r;
+	dma_resv_for_each_fence(&cursor, resv, true, f) {
+		dma_fence_chain_for_each(f, f) {
+			struct dma_fence_chain *chain = to_dma_fence_chain(f);
+
+			if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
+						   chain->fence : f)) {
+				r = amdgpu_sync_fence(sync, f);
+				dma_fence_put(f);
+				if (r)
+					return r;
+				break;
+			}
 		}
 	}
 	return 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 09/26] drm/amdgpu: use new iterator in amdgpu_ttm_bo_eviction_valuable
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1129e17e9f09..4511cd15c3a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1332,10 +1332,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 					    const struct ttm_place *place)
 {
 	unsigned long num_pages = bo->resource->num_pages;
+	struct dma_resv_iter resv_cursor;
 	struct amdgpu_res_cursor cursor;
-	struct dma_resv_list *flist;
 	struct dma_fence *f;
-	int i;
 
 	/* Swapout? */
 	if (bo->resource->mem_type == TTM_PL_SYSTEM)
@@ -1349,14 +1348,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	flist = dma_resv_shared_list(bo->base.resv);
-	if (flist) {
-		for (i = 0; i < flist->shared_count; ++i) {
-			f = rcu_dereference_protected(flist->shared[i],
-				dma_resv_held(bo->base.resv));
-			if (amdkfd_fence_check_mm(f, current->mm))
-				return false;
-		}
+	dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+		if (amdkfd_fence_check_mm(f, current->mm))
+			return false;
 	}
 
 	switch (bo->resource->mem_type) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 09/26] drm/amdgpu: use new iterator in amdgpu_ttm_bo_eviction_valuable
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1129e17e9f09..4511cd15c3a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1332,10 +1332,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 					    const struct ttm_place *place)
 {
 	unsigned long num_pages = bo->resource->num_pages;
+	struct dma_resv_iter resv_cursor;
 	struct amdgpu_res_cursor cursor;
-	struct dma_resv_list *flist;
 	struct dma_fence *f;
-	int i;
 
 	/* Swapout? */
 	if (bo->resource->mem_type == TTM_PL_SYSTEM)
@@ -1349,14 +1348,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	flist = dma_resv_shared_list(bo->base.resv);
-	if (flist) {
-		for (i = 0; i < flist->shared_count; ++i) {
-			f = rcu_dereference_protected(flist->shared[i],
-				dma_resv_held(bo->base.resv));
-			if (amdkfd_fence_check_mm(f, current->mm))
-				return false;
-		}
+	dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+		if (amdkfd_fence_check_mm(f, current->mm))
+			return false;
 	}
 
 	switch (bo->resource->mem_type) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 10/26] drm/msm: use new iterator in msm_gem_describe
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit. Also drop the RCU read side lock since the
object is locked anyway.

Untested since I can't get the driver to compile on !ARM.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/msm/msm_gem.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 22308a1b66fc..14907622769f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -880,7 +880,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct dma_resv *robj = obj->resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 	struct msm_gem_vma *vma;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -955,22 +955,13 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 		seq_puts(m, "\n");
 	}
 
-	rcu_read_lock();
-	fobj = dma_resv_shared_list(robj);
-	if (fobj) {
-		unsigned int i, shared_count = fobj->shared_count;
-
-		for (i = 0; i < shared_count; i++) {
-			fence = rcu_dereference(fobj->shared[i]);
+	dma_resv_for_each_fence(&cursor, robj, true, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			describe_fence(fence, "Exclusive", m);
+		else
 			describe_fence(fence, "Shared", m);
-		}
 	}
 
-	fence = dma_resv_excl_fence(robj);
-	if (fence)
-		describe_fence(fence, "Exclusive", m);
-	rcu_read_unlock();
-
 	msm_gem_unlock(obj);
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 10/26] drm/msm: use new iterator in msm_gem_describe
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit. Also drop the RCU read side lock since the
object is locked anyway.

Untested since I can't get the driver to compile on !ARM.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/msm/msm_gem.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 22308a1b66fc..14907622769f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -880,7 +880,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct dma_resv *robj = obj->resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 	struct msm_gem_vma *vma;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -955,22 +955,13 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 		seq_puts(m, "\n");
 	}
 
-	rcu_read_lock();
-	fobj = dma_resv_shared_list(robj);
-	if (fobj) {
-		unsigned int i, shared_count = fobj->shared_count;
-
-		for (i = 0; i < shared_count; i++) {
-			fence = rcu_dereference(fobj->shared[i]);
+	dma_resv_for_each_fence(&cursor, robj, true, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			describe_fence(fence, "Exclusive", m);
+		else
 			describe_fence(fence, "Shared", m);
-		}
 	}
 
-	fence = dma_resv_excl_fence(robj);
-	if (fence)
-		describe_fence(fence, "Exclusive", m);
-	rcu_read_unlock();
-
 	msm_gem_unlock(obj);
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 11/26] drm/radeon: use new iterator in radeon_sync_resv
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon_sync.c | 22 +++-------------------
 1 file changed, 3 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 9257b60144c4..b991ba1bcd51 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev,
 		     struct dma_resv *resv,
 		     bool shared)
 {
-	struct dma_resv_list *flist;
-	struct dma_fence *f;
+	struct dma_resv_iter cursor;
 	struct radeon_fence *fence;
-	unsigned i;
+	struct dma_fence *f;
 	int r = 0;
 
-	/* always sync to the exclusive fence */
-	f = dma_resv_excl_fence(resv);
-	fence = f ? to_radeon_fence(f) : NULL;
-	if (fence && fence->rdev == rdev)
-		radeon_sync_fence(sync, fence);
-	else if (f)
-		r = dma_fence_wait(f, true);
-
-	flist = dma_resv_shared_list(resv);
-	if (shared || !flist || r)
-		return r;
-
-	for (i = 0; i < flist->shared_count; ++i) {
-		f = rcu_dereference_protected(flist->shared[i],
-					      dma_resv_held(resv));
+	dma_resv_for_each_fence(&cursor, resv, shared, f) {
 		fence = to_radeon_fence(f);
 		if (fence && fence->rdev == rdev)
 			radeon_sync_fence(sync, fence);
 		else
 			r = dma_fence_wait(f, true);
-
 		if (r)
 			break;
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 11/26] drm/radeon: use new iterator in radeon_sync_resv
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon_sync.c | 22 +++-------------------
 1 file changed, 3 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 9257b60144c4..b991ba1bcd51 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev,
 		     struct dma_resv *resv,
 		     bool shared)
 {
-	struct dma_resv_list *flist;
-	struct dma_fence *f;
+	struct dma_resv_iter cursor;
 	struct radeon_fence *fence;
-	unsigned i;
+	struct dma_fence *f;
 	int r = 0;
 
-	/* always sync to the exclusive fence */
-	f = dma_resv_excl_fence(resv);
-	fence = f ? to_radeon_fence(f) : NULL;
-	if (fence && fence->rdev == rdev)
-		radeon_sync_fence(sync, fence);
-	else if (f)
-		r = dma_fence_wait(f, true);
-
-	flist = dma_resv_shared_list(resv);
-	if (shared || !flist || r)
-		return r;
-
-	for (i = 0; i < flist->shared_count; ++i) {
-		f = rcu_dereference_protected(flist->shared[i],
-					      dma_resv_held(resv));
+	dma_resv_for_each_fence(&cursor, resv, shared, f) {
 		fence = to_radeon_fence(f);
 		if (fence && fence->rdev == rdev)
 			radeon_sync_fence(sync, fence);
 		else
 			r = dma_fence_wait(f, true);
-
 		if (r)
 			break;
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:34   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: use dma_resv_for_each_fence

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
 1 file changed, 6 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 042c16b5d54a..5bc5f775abe1 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
 					    struct drm_gem_object *obj,
 					    bool write)
 {
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret;
-	struct dma_fence **fences;
-	unsigned int i, fence_count;
-
-	if (!write) {
-		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
-
-		return drm_sched_job_add_dependency(job, fence);
-	}
-
-	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
-	if (ret || !fence_count)
-		return ret;
 
-	for (i = 0; i < fence_count; i++) {
-		ret = drm_sched_job_add_dependency(job, fences[i]);
+	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+		ret = drm_sched_job_add_dependency(job, fence);
 		if (ret)
-			break;
+			return ret;
 	}
-
-	for (; i < fence_count; i++)
-		dma_fence_put(fences[i]);
-	kfree(fences);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-09-17 12:34   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:34 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: use dma_resv_for_each_fence

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
 1 file changed, 6 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 042c16b5d54a..5bc5f775abe1 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
 					    struct drm_gem_object *obj,
 					    bool write)
 {
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret;
-	struct dma_fence **fences;
-	unsigned int i, fence_count;
-
-	if (!write) {
-		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
-
-		return drm_sched_job_add_dependency(job, fence);
-	}
-
-	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
-	if (ret || !fence_count)
-		return ret;
 
-	for (i = 0; i < fence_count; i++) {
-		ret = drm_sched_job_add_dependency(job, fences[i]);
+	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+		ret = drm_sched_job_add_dependency(job, fence);
 		if (ret)
-			break;
+			return ret;
 	}
-
-	for (; i < fence_count; i++)
-		dma_fence_put(fences[i]);
-	kfree(fences);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled else where.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
 1 file changed, 11 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6234e17259c1..b1cb7ba688da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	struct dma_resv_list *list;
-	unsigned int seq;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int err;
 
 	err = -ENOENT;
@@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-retry:
-	seq = raw_read_seqcount(&obj->base.resv->seq);
-
-	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
-
-	/* Translate shared fences to READ set of engines */
-	list = dma_resv_shared_list(obj->base.resv);
-	if (list) {
-		unsigned int shared_count = list->shared_count, i;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence =
-				rcu_dereference(list->shared[i]);
-
+	args->busy = false;
+	dma_resv_iter_begin(&cursor, obj->base.resv, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			/* Translate the exclusive fence to the READ *and* WRITE engine */
+			args->busy = busy_check_writer(fence);
+		else
+			/* Translate shared fences to READ set of engines */
 			args->busy |= busy_check_reader(fence);
-		}
 	}
-
-	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
-		goto retry;
+	dma_resv_iter_end(&cursor);
 
 	err = 0;
 out:
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This makes the function much simpler since the complex
retry logic is now handled else where.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
 1 file changed, 11 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6234e17259c1..b1cb7ba688da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	struct dma_resv_list *list;
-	unsigned int seq;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int err;
 
 	err = -ENOENT;
@@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-retry:
-	seq = raw_read_seqcount(&obj->base.resv->seq);
-
-	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
-
-	/* Translate shared fences to READ set of engines */
-	list = dma_resv_shared_list(obj->base.resv);
-	if (list) {
-		unsigned int shared_count = list->shared_count, i;
-
-		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence =
-				rcu_dereference(list->shared[i]);
-
+	args->busy = false;
+	dma_resv_iter_begin(&cursor, obj->base.resv, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			/* Translate the exclusive fence to the READ *and* WRITE engine */
+			args->busy = busy_check_writer(fence);
+		else
+			/* Translate shared fences to READ set of engines */
 			args->busy |= busy_check_reader(fence);
-		}
 	}
-
-	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
-		goto retry;
+	dma_resv_iter_end(&cursor);
 
 	err = 0;
 out:
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: use dma_resv_for_each_fence instead, according to Tvrtko the lock is
    held here anyway.
v3: back to using dma_resv_for_each_fence_unlocked.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/i915_sw_fence.c | 57 ++++++++--------------------
 1 file changed, 15 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index c589a681da77..7635b0478ea5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -572,56 +572,29 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    unsigned long timeout,
 				    gfp_t gfp)
 {
-	struct dma_fence *excl;
+	struct dma_resv_iter cursor;
+	struct dma_fence *f;
 	int ret = 0, pending;
 
 	debug_fence_assert(fence);
 	might_sleep_if(gfpflags_allow_blocking(gfp));
 
-	if (write) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-
-		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			if (shared[i]->ops == exclude)
-				continue;
-
-			pending = i915_sw_fence_await_dma_fence(fence,
-								shared[i],
-								timeout,
-								gfp);
-			if (pending < 0) {
-				ret = pending;
-				break;
-			}
-
-			ret |= pending;
-		}
-
-		for (i = 0; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(resv);
-	}
-
-	if (ret >= 0 && excl && excl->ops != exclude) {
-		pending = i915_sw_fence_await_dma_fence(fence,
-							excl,
-							timeout,
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, resv, write);
+	dma_resv_for_each_fence_unlocked(&cursor, f) {
+		rcu_read_unlock();
+		pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
 							gfp);
-		if (pending < 0)
+		rcu_read_lock();
+		if (pending < 0) {
 			ret = pending;
-		else
-			ret |= pending;
-	}
-
-	dma_fence_put(excl);
+			break;
+		}
 
+		ret |= pending;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: use dma_resv_for_each_fence instead, according to Tvrtko the lock is
    held here anyway.
v3: back to using dma_resv_for_each_fence_unlocked.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/i915_sw_fence.c | 57 ++++++++--------------------
 1 file changed, 15 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index c589a681da77..7635b0478ea5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -572,56 +572,29 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    unsigned long timeout,
 				    gfp_t gfp)
 {
-	struct dma_fence *excl;
+	struct dma_resv_iter cursor;
+	struct dma_fence *f;
 	int ret = 0, pending;
 
 	debug_fence_assert(fence);
 	might_sleep_if(gfpflags_allow_blocking(gfp));
 
-	if (write) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-
-		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			if (shared[i]->ops == exclude)
-				continue;
-
-			pending = i915_sw_fence_await_dma_fence(fence,
-								shared[i],
-								timeout,
-								gfp);
-			if (pending < 0) {
-				ret = pending;
-				break;
-			}
-
-			ret |= pending;
-		}
-
-		for (i = 0; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(resv);
-	}
-
-	if (ret >= 0 && excl && excl->ops != exclude) {
-		pending = i915_sw_fence_await_dma_fence(fence,
-							excl,
-							timeout,
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, resv, write);
+	dma_resv_for_each_fence_unlocked(&cursor, f) {
+		rcu_read_unlock();
+		pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
 							gfp);
-		if (pending < 0)
+		rcu_read_lock();
+		if (pending < 0) {
 			ret = pending;
-		else
-			ret |= pending;
-	}
-
-	dma_fence_put(excl);
+			break;
+		}
 
+		ret |= pending;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 15/26] drm/i915: use the new iterator in i915_request_await_object v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/rcu_read_unlock()
v3: use dma_resv_for_each_fence instead

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/i915_request.c | 34 +++++------------------------
 1 file changed, 5 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ce446716d092..3839712ebd23 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1509,38 +1509,14 @@ i915_request_await_object(struct i915_request *to,
 			  struct drm_i915_gem_object *obj,
 			  bool write)
 {
-	struct dma_fence *excl;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret = 0;
 
-	if (write) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-
-		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
-					  &shared);
+	dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+		ret = i915_request_await_dma_fence(to, fence);
 		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			ret = i915_request_await_dma_fence(to, shared[i]);
-			if (ret)
-				break;
-
-			dma_fence_put(shared[i]);
-		}
-
-		for (; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(obj->base.resv);
-	}
-
-	if (excl) {
-		if (ret == 0)
-			ret = i915_request_await_dma_fence(to, excl);
-
-		dma_fence_put(excl);
+			break;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 15/26] drm/i915: use the new iterator in i915_request_await_object v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/rcu_read_unlock()
v3: use dma_resv_for_each_fence instead

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/i915_request.c | 34 +++++------------------------
 1 file changed, 5 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ce446716d092..3839712ebd23 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1509,38 +1509,14 @@ i915_request_await_object(struct i915_request *to,
 			  struct drm_i915_gem_object *obj,
 			  bool write)
 {
-	struct dma_fence *excl;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret = 0;
 
-	if (write) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-
-		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
-					  &shared);
+	dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+		ret = i915_request_await_dma_fence(to, fence);
 		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			ret = i915_request_await_dma_fence(to, shared[i]);
-			if (ret)
-				break;
-
-			dma_fence_put(shared[i]);
-		}
-
-		for (; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(obj->base.resv);
-	}
-
-	if (excl) {
-		if (ret == 0)
-			ret = i915_request_await_dma_fence(to, excl);
-
-		dma_fence_put(excl);
+			break;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 16/26] drm/i915: use new iterator in i915_gem_object_wait_reservation v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu read unlock.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_wait.c | 57 ++++++------------------
 1 file changed, 14 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index f909aaa09d9c..e416cf528635 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -37,55 +37,26 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 				 unsigned int flags,
 				 long timeout)
 {
-	struct dma_fence *excl;
-	bool prune_fences = false;
-
-	if (flags & I915_WAIT_ALL) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-		int ret;
-
-		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			timeout = i915_gem_object_wait_fence(shared[i],
-							     flags, timeout);
-			if (timeout < 0)
-				break;
-
-			dma_fence_put(shared[i]);
-		}
-
-		for (; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-
-		/*
-		 * If both shared fences and an exclusive fence exist,
-		 * then by construction the shared fences must be later
-		 * than the exclusive fence. If we successfully wait for
-		 * all the shared fences, we know that the exclusive fence
-		 * must all be signaled. If all the shared fences are
-		 * signaled, we can prune the array and recover the
-		 * floating references on the fences/requests.
-		 */
-		prune_fences = count && timeout >= 0;
-	} else {
-		excl = dma_resv_get_excl_unlocked(resv);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
+
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		timeout = i915_gem_object_wait_fence(fence, flags, timeout);
+		rcu_read_lock();
+		if (timeout < 0)
+			break;
 	}
-
-	if (excl && timeout >= 0)
-		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
-	dma_fence_put(excl);
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
 	/*
 	 * Opportunistically prune the fences iff we know they have *all* been
 	 * signaled.
 	 */
-	if (prune_fences)
+	if (timeout > 0)
 		dma_resv_prune(resv);
 
 	return timeout;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 16/26] drm/i915: use new iterator in i915_gem_object_wait_reservation v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu read unlock.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_wait.c | 57 ++++++------------------
 1 file changed, 14 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index f909aaa09d9c..e416cf528635 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -37,55 +37,26 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 				 unsigned int flags,
 				 long timeout)
 {
-	struct dma_fence *excl;
-	bool prune_fences = false;
-
-	if (flags & I915_WAIT_ALL) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-		int ret;
-
-		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			timeout = i915_gem_object_wait_fence(shared[i],
-							     flags, timeout);
-			if (timeout < 0)
-				break;
-
-			dma_fence_put(shared[i]);
-		}
-
-		for (; i < count; i++)
-			dma_fence_put(shared[i]);
-		kfree(shared);
-
-		/*
-		 * If both shared fences and an exclusive fence exist,
-		 * then by construction the shared fences must be later
-		 * than the exclusive fence. If we successfully wait for
-		 * all the shared fences, we know that the exclusive fence
-		 * must all be signaled. If all the shared fences are
-		 * signaled, we can prune the array and recover the
-		 * floating references on the fences/requests.
-		 */
-		prune_fences = count && timeout >= 0;
-	} else {
-		excl = dma_resv_get_excl_unlocked(resv);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
+
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		timeout = i915_gem_object_wait_fence(fence, flags, timeout);
+		rcu_read_lock();
+		if (timeout < 0)
+			break;
 	}
-
-	if (excl && timeout >= 0)
-		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
-	dma_fence_put(excl);
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
 	/*
 	 * Opportunistically prune the fences iff we know they have *all* been
 	 * signaled.
 	 */
-	if (prune_fences)
+	if (timeout > 0)
 		dma_resv_prune(resv);
 
 	return timeout;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 17/26] drm/i915: use new iterator in i915_gem_object_wait_priority v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_wait.c | 34 +++++++-----------------
 1 file changed, 10 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index e416cf528635..de8084b6af42 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -122,32 +122,18 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
 			      const struct i915_sched_attr *attr)
 {
-	struct dma_fence *excl;
-
-	if (flags & I915_WAIT_ALL) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-		int ret;
-
-		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
-					  &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			i915_gem_fence_wait_priority(shared[i], attr);
-			dma_fence_put(shared[i]);
-		}
-
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(obj->base.resv);
-	}
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 
-	if (excl) {
-		i915_gem_fence_wait_priority(excl, attr);
-		dma_fence_put(excl);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		i915_gem_fence_wait_priority(fence, attr);
+		rcu_read_lock();
 	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 17/26] drm/i915: use new iterator in i915_gem_object_wait_priority v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_wait.c | 34 +++++++-----------------
 1 file changed, 10 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index e416cf528635..de8084b6af42 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -122,32 +122,18 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
 			      const struct i915_sched_attr *attr)
 {
-	struct dma_fence *excl;
-
-	if (flags & I915_WAIT_ALL) {
-		struct dma_fence **shared;
-		unsigned int count, i;
-		int ret;
-
-		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
-					  &shared);
-		if (ret)
-			return ret;
-
-		for (i = 0; i < count; i++) {
-			i915_gem_fence_wait_priority(shared[i], attr);
-			dma_fence_put(shared[i]);
-		}
-
-		kfree(shared);
-	} else {
-		excl = dma_resv_get_excl_unlocked(obj->base.resv);
-	}
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 
-	if (excl) {
-		i915_gem_fence_wait_priority(excl, attr);
-		dma_fence_put(excl);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		i915_gem_fence_wait_priority(fence, attr);
+		rcu_read_lock();
 	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 18/26] drm/i915: use new iterator in i915_gem_object_last_write_engine v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This is maybe even a fix since the RCU usage here looks incorrect.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 48112b9d76df..487329a96e92 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -507,16 +507,18 @@ static inline struct intel_engine_cs *
 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 {
 	struct intel_engine_cs *engine = NULL;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
 	rcu_read_lock();
-	fence = dma_resv_get_excl_unlocked(obj->base.resv);
+	dma_resv_iter_begin(&cursor, obj->base.resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (fence && dma_fence_is_i915(fence) &&
+		    !dma_fence_is_signaled(fence))
+			engine = to_request(fence)->engine;
+	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-
-	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
-		engine = to_request(fence)->engine;
-	dma_fence_put(fence);
-
 	return engine;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 18/26] drm/i915: use new iterator in i915_gem_object_last_write_engine v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

This is maybe even a fix since the RCU usage here looks incorrect.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 48112b9d76df..487329a96e92 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -507,16 +507,18 @@ static inline struct intel_engine_cs *
 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 {
 	struct intel_engine_cs *engine = NULL;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 
 	rcu_read_lock();
-	fence = dma_resv_get_excl_unlocked(obj->base.resv);
+	dma_resv_iter_begin(&cursor, obj->base.resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (fence && dma_fence_is_i915(fence) &&
+		    !dma_fence_is_signaled(fence))
+			engine = to_request(fence)->engine;
+	}
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
-
-	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
-		engine = to_request(fence)->engine;
-	dma_fence_put(fence);
-
 	return engine;
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 19/26] drm/i915: use new cursor in intel_prepare_plane_fb v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/display/intel_display.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 134a6acbd8fb..51e3df0de1ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11290,6 +11290,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
+		struct dma_resv_iter cursor;
 		struct dma_fence *fence;
 
 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
@@ -11300,12 +11301,16 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 		if (ret < 0)
 			goto unpin_fb;
 
-		fence = dma_resv_get_excl_unlocked(obj->base.resv);
-		if (fence) {
+		rcu_read_lock();
+		dma_resv_iter_begin(&cursor, obj->base.resv, false);
+		dma_resv_for_each_fence_unlocked(&cursor, fence) {
+			rcu_read_unlock();
 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
-			dma_fence_put(fence);
+			rcu_read_lock();
 		}
+		dma_resv_iter_end(&cursor);
+		rcu_read_unlock();
 	} else {
 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 					   new_plane_state->uapi.fence);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 19/26] drm/i915: use new cursor in intel_prepare_plane_fb v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/display/intel_display.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 134a6acbd8fb..51e3df0de1ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11290,6 +11290,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
+		struct dma_resv_iter cursor;
 		struct dma_fence *fence;
 
 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
@@ -11300,12 +11301,16 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 		if (ret < 0)
 			goto unpin_fb;
 
-		fence = dma_resv_get_excl_unlocked(obj->base.resv);
-		if (fence) {
+		rcu_read_lock();
+		dma_resv_iter_begin(&cursor, obj->base.resv, false);
+		dma_resv_for_each_fence_unlocked(&cursor, fence) {
+			rcu_read_unlock();
 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
-			dma_fence_put(fence);
+			rcu_read_lock();
 		}
+		dma_resv_iter_end(&cursor);
+		rcu_read_unlock();
 	} else {
 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 					   new_plane_state->uapi.fence);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
 1 file changed, 12 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 09c820045859..c2c41b668f40 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 				     struct drm_gem_object *obj,
 				     bool write)
 {
-	int ret;
-	struct dma_fence **fences;
-	unsigned int i, fence_count;
-
-	if (!write) {
-		struct dma_fence *fence =
-			dma_resv_get_excl_unlocked(obj->resv);
-
-		return drm_gem_fence_array_add(fence_array, fence);
-	}
-
-	ret = dma_resv_get_fences(obj->resv, NULL,
-						&fence_count, &fences);
-	if (ret || !fence_count)
-		return ret;
-
-	for (i = 0; i < fence_count; i++) {
-		ret = drm_gem_fence_array_add(fence_array, fences[i]);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
+	int ret = 0;
+
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->resv, write);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		ret = drm_gem_fence_array_add(fence_array, fence);
+		rcu_read_lock();
 		if (ret)
 			break;
 	}
-
-	for (; i < fence_count; i++)
-		dma_fence_put(fences[i]);
-	kfree(fences);
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
 1 file changed, 12 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 09c820045859..c2c41b668f40 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 				     struct drm_gem_object *obj,
 				     bool write)
 {
-	int ret;
-	struct dma_fence **fences;
-	unsigned int i, fence_count;
-
-	if (!write) {
-		struct dma_fence *fence =
-			dma_resv_get_excl_unlocked(obj->resv);
-
-		return drm_gem_fence_array_add(fence_array, fence);
-	}
-
-	ret = dma_resv_get_fences(obj->resv, NULL,
-						&fence_count, &fences);
-	if (ret || !fence_count)
-		return ret;
-
-	for (i = 0; i < fence_count; i++) {
-		ret = drm_gem_fence_array_add(fence_array, fences[i]);
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
+	int ret = 0;
+
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->resv, write);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		ret = drm_gem_fence_array_add(fence_array, fence);
+		rcu_read_lock();
 		if (ret)
 			break;
 	}
-
-	for (; i < fence_count; i++)
-		dma_fence_put(fences[i]);
-	kfree(fences);
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Makes the handling a bit more complex, but avoids the use of
dma_resv_get_excl_unlocked().

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e570398abd78..d8f9c6432544 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -143,6 +143,7 @@
  */
 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 {
+	struct dma_resv_iter cursor;
 	struct drm_gem_object *obj;
 	struct dma_fence *fence;
 
@@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	fence = dma_resv_get_excl_unlocked(obj->resv);
-	drm_atomic_set_fence_for_plane(state, fence);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		/* TODO: We only use the first write fence here */
+		drm_atomic_set_fence_for_plane(state, fence);
+		return 0;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
+	drm_atomic_set_fence_for_plane(state, NULL);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Makes the handling a bit more complex, but avoids the use of
dma_resv_get_excl_unlocked().

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e570398abd78..d8f9c6432544 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -143,6 +143,7 @@
  */
 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 {
+	struct dma_resv_iter cursor;
 	struct drm_gem_object *obj;
 	struct dma_fence *fence;
 
@@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	fence = dma_resv_get_excl_unlocked(obj->resv);
-	drm_atomic_set_fence_for_plane(state, fence);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, obj->resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		rcu_read_unlock();
+		/* TODO: We only use the first write fence here */
+		drm_atomic_set_fence_for_plane(state, fence);
+		return 0;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 
+	drm_atomic_set_fence_for_plane(state, NULL);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 22/26] drm/nouveau: use the new iterator in nouveau_fence_sync
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/nouveau_fence.c | 48 +++++++------------------
 1 file changed, 12 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 05d0b3eb3690..26f9299df881 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 }
 
 int
-nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+		   bool exclusive, bool intr)
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
-	struct dma_fence *fence;
 	struct dma_resv *resv = nvbo->bo.base.resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	struct nouveau_fence *f;
-	int ret = 0, i;
+	int ret;
 
 	if (!exclusive) {
 		ret = dma_resv_reserve_shared(resv, 1);
@@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 			return ret;
 	}
 
-	fobj = dma_resv_shared_list(resv);
-	fence = dma_resv_excl_fence(resv);
-
-	if (fence) {
+	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
 		struct nouveau_channel *prev = NULL;
 		bool must_wait = true;
 
@@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		if (f) {
 			rcu_read_lock();
 			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+			if (prev && (prev == chan ||
+				     fctx->sync(f, prev, chan) == 0))
 				must_wait = false;
 			rcu_read_unlock();
 		}
 
-		if (must_wait)
+		if (must_wait) {
 			ret = dma_fence_wait(fence, intr);
-
-		return ret;
-	}
-
-	if (!exclusive || !fobj)
-		return ret;
-
-	for (i = 0; i < fobj->shared_count && !ret; ++i) {
-		struct nouveau_channel *prev = NULL;
-		bool must_wait = true;
-
-		fence = rcu_dereference_protected(fobj->shared[i],
-						dma_resv_held(resv));
-
-		f = nouveau_local_fence(fence, chan->drm);
-		if (f) {
-			rcu_read_lock();
-			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
-				must_wait = false;
-			rcu_read_unlock();
+			if (ret)
+				return ret;
 		}
-
-		if (must_wait)
-			ret = dma_fence_wait(fence, intr);
 	}
-
-	return ret;
+	return 0;
 }
 
 void
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 22/26] drm/nouveau: use the new iterator in nouveau_fence_sync
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Simplifying the code a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/nouveau_fence.c | 48 +++++++------------------
 1 file changed, 12 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 05d0b3eb3690..26f9299df881 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 }
 
 int
-nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+		   bool exclusive, bool intr)
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
-	struct dma_fence *fence;
 	struct dma_resv *resv = nvbo->bo.base.resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	struct nouveau_fence *f;
-	int ret = 0, i;
+	int ret;
 
 	if (!exclusive) {
 		ret = dma_resv_reserve_shared(resv, 1);
@@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 			return ret;
 	}
 
-	fobj = dma_resv_shared_list(resv);
-	fence = dma_resv_excl_fence(resv);
-
-	if (fence) {
+	dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
 		struct nouveau_channel *prev = NULL;
 		bool must_wait = true;
 
@@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		if (f) {
 			rcu_read_lock();
 			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+			if (prev && (prev == chan ||
+				     fctx->sync(f, prev, chan) == 0))
 				must_wait = false;
 			rcu_read_unlock();
 		}
 
-		if (must_wait)
+		if (must_wait) {
 			ret = dma_fence_wait(fence, intr);
-
-		return ret;
-	}
-
-	if (!exclusive || !fobj)
-		return ret;
-
-	for (i = 0; i < fobj->shared_count && !ret; ++i) {
-		struct nouveau_channel *prev = NULL;
-		bool must_wait = true;
-
-		fence = rcu_dereference_protected(fobj->shared[i],
-						dma_resv_held(resv));
-
-		f = nouveau_local_fence(fence, chan->drm);
-		if (f) {
-			rcu_read_lock();
-			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
-				must_wait = false;
-			rcu_read_unlock();
+			if (ret)
+				return ret;
 		}
-
-		if (must_wait)
-			ret = dma_fence_wait(fence, intr);
 	}
-
-	return ret;
+	return 0;
 }
 
 void
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 23/26] drm/nouveau: use the new interator in nv50_wndw_prepare_fb v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Makes the handling a bit more complex, but avoids the use of
dma_resv_get_excl_unlocked().

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 8d048bacd6f0..566f50f53f24 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -539,6 +539,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	struct nouveau_bo *nvbo;
 	struct nv50_head_atom *asyh;
 	struct nv50_wndw_ctxdma *ctxdma;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret;
 
 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
@@ -561,7 +563,15 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		/* TODO: We only use the first writer here */
+		asyw->state.fence = dma_fence_get(fence);
+		break;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	asyw->image.offset[0] = nvbo->offset;
 
 	if (wndw->func->prepare) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 23/26] drm/nouveau: use the new interator in nv50_wndw_prepare_fb v2
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Makes the handling a bit more complex, but avoids the use of
dma_resv_get_excl_unlocked().

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 8d048bacd6f0..566f50f53f24 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -539,6 +539,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	struct nouveau_bo *nvbo;
 	struct nv50_head_atom *asyh;
 	struct nv50_wndw_ctxdma *ctxdma;
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
 	int ret;
 
 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
@@ -561,7 +563,15 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
+	rcu_read_lock();
+	dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		/* TODO: We only use the first writer here */
+		asyw->state.fence = dma_fence_get(fence);
+		break;
+	}
+	dma_resv_iter_end(&cursor);
+	rcu_read_unlock();
 	asyw->image.offset[0] = nvbo->offset;
 
 	if (wndw->func->prepare) {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 24/26] drm/etnaviv: use new iterator in etnaviv_gem_describe
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Instead of hand rolling the logic.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.c | 29 ++++++++++-----------------
 1 file changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8f1b5af47dd6..16f5991446c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -428,19 +428,17 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 	const char *type, struct seq_file *m)
 {
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-		seq_printf(m, "\t%9s: %s %s seq %llu\n",
-			   type,
-			   fence->ops->get_driver_name(fence),
-			   fence->ops->get_timeline_name(fence),
-			   fence->seqno);
+	seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
+		   fence->ops->get_driver_name(fence),
+		   fence->ops->get_timeline_name(fence),
+		   fence->seqno);
 }
 
 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct dma_resv *robj = obj->resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
@@ -450,19 +448,14 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			off, etnaviv_obj->vaddr, obj->size);
 
 	rcu_read_lock();
-	fobj = dma_resv_shared_list(robj);
-	if (fobj) {
-		unsigned int i, shared_count = fobj->shared_count;
-
-		for (i = 0; i < shared_count; i++) {
-			fence = rcu_dereference(fobj->shared[i]);
+	dma_resv_iter_begin(&cursor, robj, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			etnaviv_gem_describe_fence(fence, "Exclusive", m);
+		else
 			etnaviv_gem_describe_fence(fence, "Shared", m);
-		}
 	}
-
-	fence = dma_resv_excl_fence(robj);
-	if (fence)
-		etnaviv_gem_describe_fence(fence, "Exclusive", m);
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 24/26] drm/etnaviv: use new iterator in etnaviv_gem_describe
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Instead of hand rolling the logic.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.c | 29 ++++++++++-----------------
 1 file changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8f1b5af47dd6..16f5991446c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -428,19 +428,17 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 	const char *type, struct seq_file *m)
 {
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-		seq_printf(m, "\t%9s: %s %s seq %llu\n",
-			   type,
-			   fence->ops->get_driver_name(fence),
-			   fence->ops->get_timeline_name(fence),
-			   fence->seqno);
+	seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
+		   fence->ops->get_driver_name(fence),
+		   fence->ops->get_timeline_name(fence),
+		   fence->seqno);
 }
 
 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct dma_resv *robj = obj->resv;
-	struct dma_resv_list *fobj;
+	struct dma_resv_iter cursor;
 	struct dma_fence *fence;
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
@@ -450,19 +448,14 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			off, etnaviv_obj->vaddr, obj->size);
 
 	rcu_read_lock();
-	fobj = dma_resv_shared_list(robj);
-	if (fobj) {
-		unsigned int i, shared_count = fobj->shared_count;
-
-		for (i = 0; i < shared_count; i++) {
-			fence = rcu_dereference(fobj->shared[i]);
+	dma_resv_iter_begin(&cursor, robj, true);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (dma_resv_iter_is_exclusive(&cursor))
+			etnaviv_gem_describe_fence(fence, "Exclusive", m);
+		else
 			etnaviv_gem_describe_fence(fence, "Shared", m);
-		}
 	}
-
-	fence = dma_resv_excl_fence(robj);
-	if (fence)
-		etnaviv_gem_describe_fence(fence, "Exclusive", m);
+	dma_resv_iter_end(&cursor);
 	rcu_read_unlock();
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 25/26] drm/etnaviv: replace dma_resv_get_excl_unlocked
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

We certainly hold the reservation lock here, no need for the RCU dance.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4dd7d9d541c0..7e17bc2b5df1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			if (ret)
 				return ret;
 		} else {
-			bo->excl = dma_resv_get_excl_unlocked(robj);
+			bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
 		}
 
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 25/26] drm/etnaviv: replace dma_resv_get_excl_unlocked
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

We certainly hold the reservation lock here, no need for the RCU dance.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4dd7d9d541c0..7e17bc2b5df1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			if (ret)
 				return ret;
 		} else {
-			bo->excl = dma_resv_get_excl_unlocked(robj);
+			bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
 		}
 
 	}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [PATCH 26/26] dma-buf: nuke dma_resv_get_excl_unlocked
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
@ 2021-09-17 12:35   ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Heureka, that's finally not used any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 include/linux/dma-resv.h | 26 --------------------------
 1 file changed, 26 deletions(-)

diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 8c968f8c9d33..f42ca254acb5 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -423,32 +423,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
 	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
-/**
- * dma_resv_get_excl_unlocked - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_unlocked(struct dma_resv *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
 /**
  * dma_resv_shared_list - get the reservation object's shared fence list
  * @obj: the reservation object
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* [Intel-gfx] [PATCH 26/26] dma-buf: nuke dma_resv_get_excl_unlocked
@ 2021-09-17 12:35   ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-17 12:35 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Heureka, that's finally not used any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 include/linux/dma-resv.h | 26 --------------------------
 1 file changed, 26 deletions(-)

diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 8c968f8c9d33..f42ca254acb5 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -423,32 +423,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
 	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
-/**
- * dma_resv_get_excl_unlocked - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_unlocked(struct dma_resv *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
 /**
  * dma_resv_shared_list - get the reservation object's shared fence list
  * @obj: the reservation object
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

* Re: [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 13:23     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 13:23 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:48PM +0200, Christian König wrote:
> Abstract the complexity of iterating over all the fences
> in a dma_resv object.
> 
> The new loop handles the whole RCU and retry dance and
> returns only fences where we can be sure we grabbed the
> right one.
> 
> v2: fix accessing the shared fences while they might be freed,
>     improve kerneldoc, rename _cursor to _iter, add
>     dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 84 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 145 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 84fbe60629e3..3e77cad2c9d4 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  }
>  EXPORT_SYMBOL(dma_resv_add_excl_fence);
>  
> +/**
> + * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
> + * @cursor: cursor to record the current position
> + * @first: if we should start over
> + *
> + * Return all the fences in the dma_resv object which are not yet signaled.
> + * The returned fence has an extra local reference so will stay alive.
> + * If a concurrent modify is detected the whole iterration is started over again.
> + */
> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,

Bit ocd, but I'd still just call that iter_next.

> +					      bool first)

Hm I'd put all the init code into iter_begin ...

> +{
> +	struct dma_resv *obj = cursor->obj;

Aren't we missing rcu_read_lock() around the entire thing here?

> +
> +	first |= read_seqcount_retry(&obj->seq, cursor->seq);
> +	do {
> +		/* Drop the reference from the previous round */
> +		dma_fence_put(cursor->fence);
> +
> +		cursor->is_first = first;
> +		if (first) {
> +			cursor->seq = read_seqcount_begin(&obj->seq);
> +			cursor->index = -1;
> +			cursor->fences = dma_resv_shared_list(obj);

And then also call iter_begin from here. That way we guarantee that
read_seqcount_begin is always called before _retry(). It's not a problem
with the seqcount implementation (I think at least), but it definitely
looks funny.

Calling iter_begin here also makes it clear that we're essentially
restarting.

> +
> +			cursor->fence = dma_resv_excl_fence(obj);
> +			if (cursor->fence &&
> +			    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,

Please use the right dma_fence wrapper here for this and don't look at the
bits/flags outside of dma_fence.[hc] code. I just realized that we don't
have the right amount of barriers in there for the fastpath, i.e. if we
have:

x = 0; /* static initializer */

thread a
	x = 1;
	dma_fence_signal(fence);


thread b;
	if (dma_fence_is_signalled(fence))
		printk("%i\n", x);

Then you might actually be able to observe x == 0 in thread b. Which is
not what we want at all.

So no open-coding of dma_fence flag bits code outside of drm_fence.[hc]
please. And yes i915-gem code is unfortunately a disaster.

> +				     &cursor->fence->flags))
> +				cursor->fence = NULL;
> +		} else {
> +			cursor->fence = NULL;
> +		}
> +
> +		if (cursor->fence) {
> +			cursor->fence = dma_fence_get_rcu(cursor->fence);
> +		} else if (cursor->all_fences && cursor->fences) {
> +			struct dma_resv_list *fences = cursor->fences;
> +
> +			while (++cursor->index < fences->shared_count) {
> +				cursor->fence = rcu_dereference(
> +					fences->shared[cursor->index]);
> +				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> +					      &cursor->fence->flags))
> +					break;
> +			}
> +			if (cursor->index < fences->shared_count)
> +				cursor->fence =
> +					dma_fence_get_rcu(cursor->fence);
> +			else
> +				cursor->fence = NULL;
> +		}

The control flow here is very hairy, but I'm not sure how to best do this.
With my suggestion to move the read_seqcount_begin into iter_begin maybe
something like this:

iter_next()
{
	do {
		dma_fence_put(cursor->fence)
		cursor->fence = NULL;

		if (cursor->index == -1) { /* reset by iter_begin()
			cursor->fence = get_exclusive();
			cusor->index++;
		} else {
			cursor->fence = shared_fences[++cursor->index]
		}

		if (!dma_fence_is_signalled(cursor->fence))
			continue; /* just grab the next fence. */

		cursor->fence =  dma_fence_get_rcu(cursor->fence);

		if (!cursor->fence || read_seqcount_retry()) {
			/* we lost the race, restart completely */
			iter_begin(); /* ->fence will be cleaned up at beginning of the loop */
			continue;
		}

		return cursor->fence;
	} while (true);
}

Maybe I missed something, but that avoids the duplication of all the
tricky code, i.e. checking for signalling, rcu protected conditional
fence_get, and the retry is also nicely at the end.
> +
> +		/* For the eventually next round */
> +		first = true;
> +	} while (read_seqcount_retry(&obj->seq, cursor->seq));
> +
> +	return cursor->fence;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
> +
>  /**
>   * dma_resv_copy_fences - Copy all fences from src to dst.
>   * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 9100dd3dc21f..693d16117153 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -149,6 +149,90 @@ struct dma_resv {
>  	struct dma_resv_list __rcu *fence;
>  };
>  
> +/**
> + * struct dma_resv_iter - current position into the dma_resv fences
> + *
> + * Don't touch this directly in the driver, use the accessor function instead.
> + */
> +struct dma_resv_iter {
> +	/** @obj: The dma_resv object we iterate over */
> +	struct dma_resv *obj;
> +
> +	/** @all_fences: If all fences should be returned */
> +	bool all_fences;
> +
> +	/** @fence: the currently handled fence */
> +	struct dma_fence *fence;
> +
> +	/** @seq: sequence number to check for modifications */
> +	unsigned int seq;
> +
> +	/** @index: index into the shared fences */

If you go with my suggestion (assuming it works): Please add "-1 indicates
to pick the exclusive fence instead."

> +	unsigned int index;
> +
> +	/** @fences: the shared fences */
> +	struct dma_resv_list *fences;
> +
> +	/** @is_first: true if this is the first returned fence */
> +	bool is_first;

I think if we just rely on -1 == exclusive fence/is_first we don't need
this one here?

> +};
> +
> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> +					      bool first);
> +
> +/**
> + * dma_resv_iter_begin - initialize a dma_resv_iter object
> + * @cursor: The dma_resv_iter object to initialize
> + * @obj: The dma_resv object which we want to iterator over
> + * @all_fences: If all fences should be returned or just the exclusive one

Please add: "Callers must clean up the iterator with dma_resv_iter_end()."

> + */
> +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
> +					struct dma_resv *obj,
> +					bool all_fences)
> +{
> +	cursor->obj = obj;
> +	cursor->all_fences = all_fences;
> +	cursor->fence = NULL;
> +}
> +
> +/**
> + * dma_resv_iter_end - cleanup a dma_resv_iter object
> + * @cursor: the dma_resv_iter object which should be cleaned up
> + *
> + * Make sure that the reference to the fence in the cursor is properly
> + * dropped.

Please add:

"This function must be called every time dma_resv_iter_begin() was called
to clean up any references."
> + */
> +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
> +{
> +	dma_fence_put(cursor->fence);
> +}
> +
> +/**
> + * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
> + * @cursor: the cursor of the current position
> + *
> + * Returns true if the currently returned fence is the exclusive one.
> + */
> +static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
> +{
> +	return cursor->index == -1;
> +}
> +
> +/**
> + * dma_resv_for_each_fence_unlocked - unlocked fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object without holding the
> + * dma_resv::lock. The RCU read side lock must be hold when using this, but can
> + * be dropped and re-taken as necessary inside the loop. The cursor needs to be
> + * initialized with dma_resv_iter_begin_unlocked() and cleaned up with

We don't have an _unlocked version?

> + * dma_resv_iter_end_unlocked().
> + */
> +#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
> +	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
> +	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
> +
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
@ 2021-09-17 13:23     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 13:23 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:48PM +0200, Christian König wrote:
> Abstract the complexity of iterating over all the fences
> in a dma_resv object.
> 
> The new loop handles the whole RCU and retry dance and
> returns only fences where we can be sure we grabbed the
> right one.
> 
> v2: fix accessing the shared fences while they might be freed,
>     improve kerneldoc, rename _cursor to _iter, add
>     dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 84 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 145 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 84fbe60629e3..3e77cad2c9d4 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>  }
>  EXPORT_SYMBOL(dma_resv_add_excl_fence);
>  
> +/**
> + * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
> + * @cursor: cursor to record the current position
> + * @first: if we should start over
> + *
> + * Return all the fences in the dma_resv object which are not yet signaled.
> + * The returned fence has an extra local reference so will stay alive.
> + * If a concurrent modify is detected the whole iterration is started over again.
> + */
> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,

Bit ocd, but I'd still just call that iter_next.

> +					      bool first)

Hm I'd put all the init code into iter_begin ...

> +{
> +	struct dma_resv *obj = cursor->obj;

Aren't we missing rcu_read_lock() around the entire thing here?

> +
> +	first |= read_seqcount_retry(&obj->seq, cursor->seq);
> +	do {
> +		/* Drop the reference from the previous round */
> +		dma_fence_put(cursor->fence);
> +
> +		cursor->is_first = first;
> +		if (first) {
> +			cursor->seq = read_seqcount_begin(&obj->seq);
> +			cursor->index = -1;
> +			cursor->fences = dma_resv_shared_list(obj);

And then also call iter_begin from here. That way we guarantee that
read_seqcount_begin is always called before _retry(). It's not a problem
with the seqcount implementation (I think at least), but it definitely
looks funny.

Calling iter_begin here also makes it clear that we're essentially
restarting.

> +
> +			cursor->fence = dma_resv_excl_fence(obj);
> +			if (cursor->fence &&
> +			    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,

Please use the right dma_fence wrapper here for this and don't look at the
bits/flags outside of dma_fence.[hc] code. I just realized that we don't
have the right amount of barriers in there for the fastpath, i.e. if we
have:

x = 0; /* static initializer */

thread a
	x = 1;
	dma_fence_signal(fence);


thread b;
	if (dma_fence_is_signalled(fence))
		printk("%i\n", x);

Then you might actually be able to observe x == 0 in thread b. Which is
not what we want at all.

So no open-coding of dma_fence flag bits code outside of drm_fence.[hc]
please. And yes i915-gem code is unfortunately a disaster.

> +				     &cursor->fence->flags))
> +				cursor->fence = NULL;
> +		} else {
> +			cursor->fence = NULL;
> +		}
> +
> +		if (cursor->fence) {
> +			cursor->fence = dma_fence_get_rcu(cursor->fence);
> +		} else if (cursor->all_fences && cursor->fences) {
> +			struct dma_resv_list *fences = cursor->fences;
> +
> +			while (++cursor->index < fences->shared_count) {
> +				cursor->fence = rcu_dereference(
> +					fences->shared[cursor->index]);
> +				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> +					      &cursor->fence->flags))
> +					break;
> +			}
> +			if (cursor->index < fences->shared_count)
> +				cursor->fence =
> +					dma_fence_get_rcu(cursor->fence);
> +			else
> +				cursor->fence = NULL;
> +		}

The control flow here is very hairy, but I'm not sure how to best do this.
With my suggestion to move the read_seqcount_begin into iter_begin maybe
something like this:

iter_next()
{
	do {
		dma_fence_put(cursor->fence)
		cursor->fence = NULL;

		if (cursor->index == -1) { /* reset by iter_begin()
			cursor->fence = get_exclusive();
			cusor->index++;
		} else {
			cursor->fence = shared_fences[++cursor->index]
		}

		if (!dma_fence_is_signalled(cursor->fence))
			continue; /* just grab the next fence. */

		cursor->fence =  dma_fence_get_rcu(cursor->fence);

		if (!cursor->fence || read_seqcount_retry()) {
			/* we lost the race, restart completely */
			iter_begin(); /* ->fence will be cleaned up at beginning of the loop */
			continue;
		}

		return cursor->fence;
	} while (true);
}

Maybe I missed something, but that avoids the duplication of all the
tricky code, i.e. checking for signalling, rcu protected conditional
fence_get, and the retry is also nicely at the end.
> +
> +		/* For the eventually next round */
> +		first = true;
> +	} while (read_seqcount_retry(&obj->seq, cursor->seq));
> +
> +	return cursor->fence;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
> +
>  /**
>   * dma_resv_copy_fences - Copy all fences from src to dst.
>   * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 9100dd3dc21f..693d16117153 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -149,6 +149,90 @@ struct dma_resv {
>  	struct dma_resv_list __rcu *fence;
>  };
>  
> +/**
> + * struct dma_resv_iter - current position into the dma_resv fences
> + *
> + * Don't touch this directly in the driver, use the accessor function instead.
> + */
> +struct dma_resv_iter {
> +	/** @obj: The dma_resv object we iterate over */
> +	struct dma_resv *obj;
> +
> +	/** @all_fences: If all fences should be returned */
> +	bool all_fences;
> +
> +	/** @fence: the currently handled fence */
> +	struct dma_fence *fence;
> +
> +	/** @seq: sequence number to check for modifications */
> +	unsigned int seq;
> +
> +	/** @index: index into the shared fences */

If you go with my suggestion (assuming it works): Please add "-1 indicates
to pick the exclusive fence instead."

> +	unsigned int index;
> +
> +	/** @fences: the shared fences */
> +	struct dma_resv_list *fences;
> +
> +	/** @is_first: true if this is the first returned fence */
> +	bool is_first;

I think if we just rely on -1 == exclusive fence/is_first we don't need
this one here?

> +};
> +
> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> +					      bool first);
> +
> +/**
> + * dma_resv_iter_begin - initialize a dma_resv_iter object
> + * @cursor: The dma_resv_iter object to initialize
> + * @obj: The dma_resv object which we want to iterator over
> + * @all_fences: If all fences should be returned or just the exclusive one

Please add: "Callers must clean up the iterator with dma_resv_iter_end()."

> + */
> +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
> +					struct dma_resv *obj,
> +					bool all_fences)
> +{
> +	cursor->obj = obj;
> +	cursor->all_fences = all_fences;
> +	cursor->fence = NULL;
> +}
> +
> +/**
> + * dma_resv_iter_end - cleanup a dma_resv_iter object
> + * @cursor: the dma_resv_iter object which should be cleaned up
> + *
> + * Make sure that the reference to the fence in the cursor is properly
> + * dropped.

Please add:

"This function must be called every time dma_resv_iter_begin() was called
to clean up any references."
> + */
> +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
> +{
> +	dma_fence_put(cursor->fence);
> +}
> +
> +/**
> + * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
> + * @cursor: the cursor of the current position
> + *
> + * Returns true if the currently returned fence is the exclusive one.
> + */
> +static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
> +{
> +	return cursor->index == -1;
> +}
> +
> +/**
> + * dma_resv_for_each_fence_unlocked - unlocked fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object without holding the
> + * dma_resv::lock. The RCU read side lock must be hold when using this, but can
> + * be dropped and re-taken as necessary inside the loop. The cursor needs to be
> + * initialized with dma_resv_iter_begin_unlocked() and cleaned up with

We don't have an _unlocked version?

> + * dma_resv_iter_end_unlocked().
> + */
> +#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
> +	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
> +	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
> +
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 13:27     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 13:27 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:49PM +0200, Christian König wrote:
> A simpler version of the iterator to be used when the dma_resv object is
> locked.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 17 +++++++++++++++++
>  2 files changed, 50 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 3e77cad2c9d4..a3c79a99fb44 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
>  
> +/**
> + * dma_resv_iter_walk - walk over fences in a dma_resv obj
> + * @cursor: cursor to record the current position
> + * @first: if we should start over
> + *
> + * Return all the fences in the dma_resv object while holding the
> + * dma_resv::lock.
> + */
> +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
> +{
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->is_first = first;
> +	if (first) {
> +		struct dma_fence *fence;
> +
> +		cursor->index = -1;
> +		cursor->fences = dma_resv_shared_list(cursor->obj);
> +
> +		fence = dma_resv_excl_fence(cursor->obj);
> +		if (fence)
> +			return fence;
> +	}

I think you can still use the shared iter_begin/end functions even with my
suggestions for patch 1, but would mean changes here too.

> +
> +	if (!cursor->all_fences || !cursor->fences ||
> +	    ++cursor->index >= cursor->fences->shared_count)
> +		return NULL;
> +
> +	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
> +					 dma_resv_held(cursor->obj));
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
> +
>  /**
>   * dma_resv_copy_fences - Copy all fences from src to dst.
>   * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 693d16117153..8c968f8c9d33 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -179,6 +179,7 @@ struct dma_resv_iter {
>  
>  struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
>  					      bool first);
> +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
>  
>  /**
>   * dma_resv_iter_begin - initialize a dma_resv_iter object
> @@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
>  	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
>  	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
>  
> +/**
> + * dma_resv_for_each_fence - fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @obj: a dma_resv object pointer
> + * @all_fences: true if all fences should be returned
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object while holding the
> + * dma_resv::lock. @all_fences controls if the shared fences are returned as

&dma_resv.lock is how you reference struct members in kerneldoc. I think
you had this also in patch 1.

> + * well. The cursor initialisation is part of the iterator.

Please also link to the iter_begin/end functions here.

Aside from doc nits and obviously changes due to changes in patch 1 (if we
do them), this looks good.
-Daniel

> + */
> +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +	     fence = dma_resv_iter_walk(cursor, true); fence;	\
> +	     fence = dma_resv_iter_walk(cursor, false))
> +
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
@ 2021-09-17 13:27     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 13:27 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:49PM +0200, Christian König wrote:
> A simpler version of the iterator to be used when the dma_resv object is
> locked.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
>  include/linux/dma-resv.h   | 17 +++++++++++++++++
>  2 files changed, 50 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 3e77cad2c9d4..a3c79a99fb44 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
>  
> +/**
> + * dma_resv_iter_walk - walk over fences in a dma_resv obj
> + * @cursor: cursor to record the current position
> + * @first: if we should start over
> + *
> + * Return all the fences in the dma_resv object while holding the
> + * dma_resv::lock.
> + */
> +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
> +{
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->is_first = first;
> +	if (first) {
> +		struct dma_fence *fence;
> +
> +		cursor->index = -1;
> +		cursor->fences = dma_resv_shared_list(cursor->obj);
> +
> +		fence = dma_resv_excl_fence(cursor->obj);
> +		if (fence)
> +			return fence;
> +	}

I think you can still use the shared iter_begin/end functions even with my
suggestions for patch 1, but would mean changes here too.

> +
> +	if (!cursor->all_fences || !cursor->fences ||
> +	    ++cursor->index >= cursor->fences->shared_count)
> +		return NULL;
> +
> +	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
> +					 dma_resv_held(cursor->obj));
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
> +
>  /**
>   * dma_resv_copy_fences - Copy all fences from src to dst.
>   * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 693d16117153..8c968f8c9d33 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -179,6 +179,7 @@ struct dma_resv_iter {
>  
>  struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
>  					      bool first);
> +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
>  
>  /**
>   * dma_resv_iter_begin - initialize a dma_resv_iter object
> @@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
>  	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
>  	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
>  
> +/**
> + * dma_resv_for_each_fence - fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @obj: a dma_resv object pointer
> + * @all_fences: true if all fences should be returned
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object while holding the
> + * dma_resv::lock. @all_fences controls if the shared fences are returned as

&dma_resv.lock is how you reference struct members in kerneldoc. I think
you had this also in patch 1.

> + * well. The cursor initialisation is part of the iterator.

Please also link to the iter_begin/end functions here.

Aside from doc nits and obviously changes due to changes in patch 1 (if we
do them), this looks good.
-Daniel

> + */
> +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +	     fence = dma_resv_iter_walk(cursor, true); fence;	\
> +	     fence = dma_resv_iter_walk(cursor, false))
> +
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
                   ` (26 preceding siblings ...)
  (?)
@ 2021-09-17 14:01 ` Patchwork
  -1 siblings, 0 replies; 115+ messages in thread
From: Patchwork @ 2021-09-17 14:01 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
URL   : https://patchwork.freedesktop.org/series/94805/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
d9db5bae923c dma-buf: add dma_resv_for_each_fence_unlocked v2
-:70: CHECK:OPEN_ENDED_LINE: Lines should not end with a '('
#70: FILE: drivers/dma-buf/dma-resv.c:366:
+				cursor->fence = rcu_dereference(

-:140: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#140: FILE: include/linux/dma-resv.h:190:
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+					struct dma_resv *obj,

-:182: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'cursor' - possible side-effects?
#182: FILE: include/linux/dma-resv.h:232:
+#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
+	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
+	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))

-:182: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'fence' - possible side-effects?
#182: FILE: include/linux/dma-resv.h:232:
+#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
+	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
+	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))

-:188: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 4 checks, 157 lines checked
a903605ee308 dma-buf: add dma_resv_for_each_fence
-:85: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'cursor' - possible side-effects?
#85: FILE: include/linux/dma-resv.h:248:
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_walk(cursor, true); fence;	\
+	     fence = dma_resv_iter_walk(cursor, false))

-:85: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'fence' - possible side-effects?
#85: FILE: include/linux/dma-resv.h:248:
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_walk(cursor, true); fence;	\
+	     fence = dma_resv_iter_walk(cursor, false))

-:92: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 2 checks, 68 lines checked
61845118125a dma-buf: use new iterator in dma_resv_copy_fences
-:127: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 109 lines checked
3d471d53ec2b dma-buf: use new iterator in dma_resv_get_fences v2
-:159: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 139 lines checked
600bac37a0f1 dma-buf: use new iterator in dma_resv_wait_timeout
-:102: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 84 lines checked
b93771325f9f dma-buf: use new iterator in dma_resv_test_signaled
-:93: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 74 lines checked
05ffba354ecc drm/ttm: use the new iterator in ttm_bo_flush_all_fences
-:44: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 26 lines checked
bbdc9793743a drm/amdgpu: use the new iterator in amdgpu_sync_resv
-:72: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 55 lines checked
511232e4b5ca drm/amdgpu: use new iterator in amdgpu_ttm_bo_eviction_valuable
-:47: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 28 lines checked
58e24849df68 drm/msm: use new iterator in msm_gem_describe
-:55: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 34 lines checked
ab00e34c795a drm/radeon: use new iterator in radeon_sync_resv
-:53: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 36 lines checked
749c88a44413 drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
-:56: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 36 lines checked
aa613855df42 drm/i915: use the new iterator in i915_gem_busy_ioctl
-:65: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 46 lines checked
5fac734e6483 drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
-:93: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 71 lines checked
59ae4d4e8254 drm/i915: use the new iterator in i915_request_await_object v2
-:64: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 43 lines checked
c7c263d3cc20 drm/i915: use new iterator in i915_gem_object_wait_reservation v2
-:89: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 69 lines checked
a2269c6c31d4 drm/i915: use new iterator in i915_gem_object_wait_priority v2
-:62: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 42 lines checked
1b7252cd01a4 drm/i915: use new iterator in i915_gem_object_last_write_engine v2
-:44: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 24 lines checked
cd25bcfa7b95 drm/i915: use new cursor in intel_prepare_plane_fb v2
-:46: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 26 lines checked
82c3476a212c drm: use new iterator in drm_gem_fence_array_add_implicit v2
-:62: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 43 lines checked
8ad97e523761 drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
-:48: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 27 lines checked
a6220344b108 drm/nouveau: use the new iterator in nouveau_fence_sync
-:96: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 77 lines checked
2be87d3cd2c8 drm/nouveau: use the new interator in nv50_wndw_prepare_fb v2
-:45: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 24 lines checked
6851abceef53 drm/etnaviv: use new iterator in etnaviv_gem_describe
-:67: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 49 lines checked
f7eb003ad813 drm/etnaviv: replace dma_resv_get_excl_unlocked
-:25: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 8 lines checked
3ed2dc6e4f12 dma-buf: nuke dma_resv_get_excl_unlocked
-:49: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 0 checks, 32 lines checked



^ permalink raw reply	[flat|nested] 115+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
                   ` (27 preceding siblings ...)
  (?)
@ 2021-09-17 14:29 ` Patchwork
  -1 siblings, 0 replies; 115+ messages in thread
From: Patchwork @ 2021-09-17 14:29 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 10206 bytes --]

== Series Details ==

Series: series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
URL   : https://patchwork.freedesktop.org/series/94805/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10604 -> Patchwork_21084
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_21084:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_module_load@reload:
    - {fi-ehl-2}:         [INCOMPLETE][1] ([i915#4136]) -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-ehl-2/igt@i915_module_load@reload.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-ehl-2/igt@i915_module_load@reload.html

  
Known issues
------------

  Here are the changes found in Patchwork_21084 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@amdgpu/amd_basic@cs-gfx:
    - fi-rkl-guc:         NOTRUN -> [SKIP][3] ([fdo#109315]) +17 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-rkl-guc/igt@amdgpu/amd_basic@cs-gfx.html

  * igt@amdgpu/amd_basic@cs-sdma:
    - fi-kbl-7500u:       NOTRUN -> [SKIP][4] ([fdo#109271]) +17 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-kbl-7500u/igt@amdgpu/amd_basic@cs-sdma.html

  * igt@core_hotunplug@unbind-rebind:
    - fi-tgl-1115g4:      NOTRUN -> [INCOMPLETE][5] ([i915#4130])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@core_hotunplug@unbind-rebind.html
    - fi-cfl-8700k:       [PASS][6] -> [INCOMPLETE][7] ([i915#4130])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-cfl-8700k/igt@core_hotunplug@unbind-rebind.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-cfl-8700k/igt@core_hotunplug@unbind-rebind.html
    - fi-skl-guc:         [PASS][8] -> [INCOMPLETE][9] ([i915#4130])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-skl-guc/igt@core_hotunplug@unbind-rebind.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-skl-guc/igt@core_hotunplug@unbind-rebind.html

  * igt@gem_huc_copy@huc-copy:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][10] ([i915#2190])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@gem_huc_copy@huc-copy.html

  * igt@i915_pm_backlight@basic-brightness:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][11] ([i915#1155])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@i915_pm_backlight@basic-brightness.html

  * igt@kms_chamelium@common-hpd-after-suspend:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][12] ([fdo#111827]) +8 similar issues
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@kms_chamelium@common-hpd-after-suspend.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][13] ([i915#4103]) +1 similar issue
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  * igt@kms_force_connector_basic@force-load-detect:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][14] ([fdo#109285])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@kms_force_connector_basic@force-load-detect.html

  * igt@kms_psr@primary_mmap_gtt:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][15] ([i915#1072]) +3 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@kms_psr@primary_mmap_gtt.html

  * igt@prime_vgem@basic-userptr:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][16] ([i915#3301])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@prime_vgem@basic-userptr.html

  * igt@runner@aborted:
    - fi-tgl-1115g4:      NOTRUN -> [FAIL][17] ([i915#1602] / [i915#2722])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-1115g4/igt@runner@aborted.html

  
#### Possible fixes ####

  * igt@core_hotunplug@unbind-rebind:
    - fi-rkl-guc:         [INCOMPLETE][18] ([i915#4130]) -> [PASS][19]
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-rkl-guc/igt@core_hotunplug@unbind-rebind.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-rkl-guc/igt@core_hotunplug@unbind-rebind.html
    - fi-kbl-7500u:       [INCOMPLETE][20] ([i915#4130]) -> [PASS][21]
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-kbl-7500u/igt@core_hotunplug@unbind-rebind.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-kbl-7500u/igt@core_hotunplug@unbind-rebind.html

  
#### Warnings ####

  * igt@i915_module_load@reload:
    - fi-kbl-8809g:       [INCOMPLETE][22] -> [INCOMPLETE][23] ([i915#4130])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-kbl-8809g/igt@i915_module_load@reload.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-kbl-8809g/igt@i915_module_load@reload.html
    - fi-icl-u2:          [INCOMPLETE][24] ([i915#4130] / [i915#4136]) -> [INCOMPLETE][25] ([i915#4130])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-icl-u2/igt@i915_module_load@reload.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-icl-u2/igt@i915_module_load@reload.html
    - fi-cml-u2:          [INCOMPLETE][26] ([i915#4130] / [i915#4136]) -> [INCOMPLETE][27] ([i915#4130])
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-cml-u2/igt@i915_module_load@reload.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-cml-u2/igt@i915_module_load@reload.html
    - fi-kbl-soraka:      [INCOMPLETE][28] ([i915#4130] / [i915#4136]) -> [INCOMPLETE][29] ([i915#4130])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-kbl-soraka/igt@i915_module_load@reload.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-kbl-soraka/igt@i915_module_load@reload.html
    - fi-tgl-u2:          [INCOMPLETE][30] ([i915#4130] / [i915#4136]) -> [INCOMPLETE][31] ([i915#4130])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/fi-tgl-u2/igt@i915_module_load@reload.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/fi-tgl-u2/igt@i915_module_load@reload.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [fdo#109315]: https://bugs.freedesktop.org/show_bug.cgi?id=109315
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1155]: https://gitlab.freedesktop.org/drm/intel/issues/1155
  [i915#1602]: https://gitlab.freedesktop.org/drm/intel/issues/1602
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#2722]: https://gitlab.freedesktop.org/drm/intel/issues/2722
  [i915#3301]: https://gitlab.freedesktop.org/drm/intel/issues/3301
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
  [i915#4130]: https://gitlab.freedesktop.org/drm/intel/issues/4130
  [i915#4136]: https://gitlab.freedesktop.org/drm/intel/issues/4136


Participating hosts (38 -> 33)
------------------------------

  Additional (1): fi-tgl-1115g4 
  Missing    (6): fi-ilk-m540 bat-dg1-6 fi-hsw-4200u fi-ctg-p8600 bat-jsl-2 fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_10604 -> Patchwork_21084

  CI-20190529: 20190529
  CI_DRM_10604: febea2142ec3332a63b3a0afaee75163207e7060 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6211: 7b275b3eb17ddf6e7c5b7b9ba359b7f5345a5311 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_21084: 3ed2dc6e4f12718aee4634ee3aa1263a75af0fda @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

3ed2dc6e4f12 dma-buf: nuke dma_resv_get_excl_unlocked
f7eb003ad813 drm/etnaviv: replace dma_resv_get_excl_unlocked
6851abceef53 drm/etnaviv: use new iterator in etnaviv_gem_describe
2be87d3cd2c8 drm/nouveau: use the new interator in nv50_wndw_prepare_fb v2
a6220344b108 drm/nouveau: use the new iterator in nouveau_fence_sync
8ad97e523761 drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
82c3476a212c drm: use new iterator in drm_gem_fence_array_add_implicit v2
cd25bcfa7b95 drm/i915: use new cursor in intel_prepare_plane_fb v2
1b7252cd01a4 drm/i915: use new iterator in i915_gem_object_last_write_engine v2
a2269c6c31d4 drm/i915: use new iterator in i915_gem_object_wait_priority v2
c7c263d3cc20 drm/i915: use new iterator in i915_gem_object_wait_reservation v2
59ae4d4e8254 drm/i915: use the new iterator in i915_request_await_object v2
5fac734e6483 drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
aa613855df42 drm/i915: use the new iterator in i915_gem_busy_ioctl
749c88a44413 drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
ab00e34c795a drm/radeon: use new iterator in radeon_sync_resv
58e24849df68 drm/msm: use new iterator in msm_gem_describe
511232e4b5ca drm/amdgpu: use new iterator in amdgpu_ttm_bo_eviction_valuable
bbdc9793743a drm/amdgpu: use the new iterator in amdgpu_sync_resv
05ffba354ecc drm/ttm: use the new iterator in ttm_bo_flush_all_fences
b93771325f9f dma-buf: use new iterator in dma_resv_test_signaled
600bac37a0f1 dma-buf: use new iterator in dma_resv_wait_timeout
3d471d53ec2b dma-buf: use new iterator in dma_resv_get_fences v2
61845118125a dma-buf: use new iterator in dma_resv_copy_fences
a903605ee308 dma-buf: add dma_resv_for_each_fence
d9db5bae923c dma-buf: add dma_resv_for_each_fence_unlocked v2

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/index.html

[-- Attachment #2: Type: text/html, Size: 12556 bytes --]

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
  2021-09-17 13:27     ` [Intel-gfx] " Daniel Vetter
@ 2021-09-17 14:30       ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:30 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 03:27:55PM +0200, Daniel Vetter wrote:
> On Fri, Sep 17, 2021 at 02:34:49PM +0200, Christian König wrote:
> > A simpler version of the iterator to be used when the dma_resv object is
> > locked.
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >  drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
> >  include/linux/dma-resv.h   | 17 +++++++++++++++++
> >  2 files changed, 50 insertions(+)
> > 
> > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > index 3e77cad2c9d4..a3c79a99fb44 100644
> > --- a/drivers/dma-buf/dma-resv.c
> > +++ b/drivers/dma-buf/dma-resv.c
> > @@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> >  }
> >  EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
> >  
> > +/**
> > + * dma_resv_iter_walk - walk over fences in a dma_resv obj
> > + * @cursor: cursor to record the current position
> > + * @first: if we should start over
> > + *
> > + * Return all the fences in the dma_resv object while holding the
> > + * dma_resv::lock.

I think we should document here that the fence is valid for as long as the
dma_resv_lock is held, which is not like the _unlocked version, where the
fence stops being valid either on the next call to iter_next() or on the
call to iter_end() to clean up everything.

Might be good to clarify that also for the unlocked version to be really
precise here when the fence is valid and when not.
-Daniel

> > + */
> > +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
> > +{
> > +	dma_resv_assert_held(cursor->obj);
> > +
> > +	cursor->is_first = first;
> > +	if (first) {
> > +		struct dma_fence *fence;
> > +
> > +		cursor->index = -1;
> > +		cursor->fences = dma_resv_shared_list(cursor->obj);
> > +
> > +		fence = dma_resv_excl_fence(cursor->obj);
> > +		if (fence)
> > +			return fence;
> > +	}
> 
> I think you can still use the shared iter_begin/end functions even with my
> suggestions for patch 1, but would mean changes here too.
> 
> > +
> > +	if (!cursor->all_fences || !cursor->fences ||
> > +	    ++cursor->index >= cursor->fences->shared_count)
> > +		return NULL;
> > +
> > +	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
> > +					 dma_resv_held(cursor->obj));
> > +}
> > +EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
> > +
> >  /**
> >   * dma_resv_copy_fences - Copy all fences from src to dst.
> >   * @dst: the destination reservation object
> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index 693d16117153..8c968f8c9d33 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -179,6 +179,7 @@ struct dma_resv_iter {
> >  
> >  struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> >  					      bool first);
> > +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
> >  
> >  /**
> >   * dma_resv_iter_begin - initialize a dma_resv_iter object
> > @@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
> >  	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
> >  	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
> >  
> > +/**
> > + * dma_resv_for_each_fence - fence iterator
> > + * @cursor: a struct dma_resv_iter pointer
> > + * @obj: a dma_resv object pointer
> > + * @all_fences: true if all fences should be returned
> > + * @fence: the current fence
> > + *
> > + * Iterate over the fences in a struct dma_resv object while holding the
> > + * dma_resv::lock. @all_fences controls if the shared fences are returned as
> 
> &dma_resv.lock is how you reference struct members in kerneldoc. I think
> you had this also in patch 1.
> 
> > + * well. The cursor initialisation is part of the iterator.
> 
> Please also link to the iter_begin/end functions here.
> 
> Aside from doc nits and obviously changes due to changes in patch 1 (if we
> do them), this looks good.
> -Daniel
> 
> > + */
> > +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> > +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> > +	     fence = dma_resv_iter_walk(cursor, true); fence;	\
> > +	     fence = dma_resv_iter_walk(cursor, false))
> > +
> >  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
> >  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
> >  
> > -- 
> > 2.25.1
> > 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 02/26] dma-buf: add dma_resv_for_each_fence
@ 2021-09-17 14:30       ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:30 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 03:27:55PM +0200, Daniel Vetter wrote:
> On Fri, Sep 17, 2021 at 02:34:49PM +0200, Christian König wrote:
> > A simpler version of the iterator to be used when the dma_resv object is
> > locked.
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >  drivers/dma-buf/dma-resv.c | 33 +++++++++++++++++++++++++++++++++
> >  include/linux/dma-resv.h   | 17 +++++++++++++++++
> >  2 files changed, 50 insertions(+)
> > 
> > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > index 3e77cad2c9d4..a3c79a99fb44 100644
> > --- a/drivers/dma-buf/dma-resv.c
> > +++ b/drivers/dma-buf/dma-resv.c
> > @@ -384,6 +384,39 @@ struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> >  }
> >  EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
> >  
> > +/**
> > + * dma_resv_iter_walk - walk over fences in a dma_resv obj
> > + * @cursor: cursor to record the current position
> > + * @first: if we should start over
> > + *
> > + * Return all the fences in the dma_resv object while holding the
> > + * dma_resv::lock.

I think we should document here that the fence is valid for as long as the
dma_resv_lock is held, which is not like the _unlocked version, where the
fence stops being valid either on the next call to iter_next() or on the
call to iter_end() to clean up everything.

Might be good to clarify that also for the unlocked version to be really
precise here when the fence is valid and when not.
-Daniel

> > + */
> > +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first)
> > +{
> > +	dma_resv_assert_held(cursor->obj);
> > +
> > +	cursor->is_first = first;
> > +	if (first) {
> > +		struct dma_fence *fence;
> > +
> > +		cursor->index = -1;
> > +		cursor->fences = dma_resv_shared_list(cursor->obj);
> > +
> > +		fence = dma_resv_excl_fence(cursor->obj);
> > +		if (fence)
> > +			return fence;
> > +	}
> 
> I think you can still use the shared iter_begin/end functions even with my
> suggestions for patch 1, but would mean changes here too.
> 
> > +
> > +	if (!cursor->all_fences || !cursor->fences ||
> > +	    ++cursor->index >= cursor->fences->shared_count)
> > +		return NULL;
> > +
> > +	return rcu_dereference_protected(cursor->fences->shared[cursor->index],
> > +					 dma_resv_held(cursor->obj));
> > +}
> > +EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
> > +
> >  /**
> >   * dma_resv_copy_fences - Copy all fences from src to dst.
> >   * @dst: the destination reservation object
> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index 693d16117153..8c968f8c9d33 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -179,6 +179,7 @@ struct dma_resv_iter {
> >  
> >  struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> >  					      bool first);
> > +struct dma_fence *dma_resv_iter_walk(struct dma_resv_iter *cursor, bool first);
> >  
> >  /**
> >   * dma_resv_iter_begin - initialize a dma_resv_iter object
> > @@ -233,6 +234,22 @@ static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
> >  	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
> >  	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
> >  
> > +/**
> > + * dma_resv_for_each_fence - fence iterator
> > + * @cursor: a struct dma_resv_iter pointer
> > + * @obj: a dma_resv object pointer
> > + * @all_fences: true if all fences should be returned
> > + * @fence: the current fence
> > + *
> > + * Iterate over the fences in a struct dma_resv object while holding the
> > + * dma_resv::lock. @all_fences controls if the shared fences are returned as
> 
> &dma_resv.lock is how you reference struct members in kerneldoc. I think
> you had this also in patch 1.
> 
> > + * well. The cursor initialisation is part of the iterator.
> 
> Please also link to the iter_begin/end functions here.
> 
> Aside from doc nits and obviously changes due to changes in patch 1 (if we
> do them), this looks good.
> -Daniel
> 
> > + */
> > +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> > +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> > +	     fence = dma_resv_iter_walk(cursor, true); fence;	\
> > +	     fence = dma_resv_iter_walk(cursor, false))
> > +
> >  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
> >  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
> >  
> > -- 
> > 2.25.1
> > 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:35     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:35 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:50PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled else where.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
>  1 file changed, 35 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a3c79a99fb44..406150dea5e4 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
>   */
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  {
> -	struct dma_resv_list *src_list, *dst_list;
> -	struct dma_fence *old, *new;
> -	unsigned int i;
> +	struct dma_resv_iter cursor;
> +	struct dma_resv_list *list;
> +	struct dma_fence *f, *excl;
>  
>  	dma_resv_assert_held(dst);
>  
> -	rcu_read_lock();
> -	src_list = dma_resv_shared_list(src);
> -
> -retry:
> -	if (src_list) {
> -		unsigned int shared_count = src_list->shared_count;
> -
> -		rcu_read_unlock();
> +	list = NULL;
> +	excl = NULL;
>  
> -		dst_list = dma_resv_list_alloc(shared_count);
> -		if (!dst_list)
> -			return -ENOMEM;
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, src, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, f) {
>  
> -		rcu_read_lock();
> -		src_list = dma_resv_shared_list(src);
> -		if (!src_list || src_list->shared_count > shared_count) {
> -			kfree(dst_list);
> -			goto retry;
> -		}
> +		if (cursor.is_first) {

Maybe have a wrapper for this, like dma_resv_iter_is_reset or is_first or
is_restart (my preference) with some nice docs that this returns true
everytime we had to restart the sequence?

Otherwise I fully agree, this is so much better with all the hairy
restarting and get_rcu and test_bit shovelled away somewhere.

Either way (but I much prefer a wrapper for is_first):

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +			dma_resv_list_free(list);
> +			dma_fence_put(excl);
>  
> -		dst_list->shared_count = 0;
> -		for (i = 0; i < src_list->shared_count; ++i) {
> -			struct dma_fence __rcu **dst;
> -			struct dma_fence *fence;
> +			if (cursor.fences) {
> +				unsigned int cnt = cursor.fences->shared_count;
>  
> -			fence = rcu_dereference(src_list->shared[i]);
> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> -				     &fence->flags))
> -				continue;
> +				rcu_read_unlock();
> +				list = dma_resv_list_alloc(cnt);
> +				if (!list) {
> +					dma_resv_iter_end(&cursor);
> +					return -ENOMEM;
> +				}
>  
> -			if (!dma_fence_get_rcu(fence)) {
> -				dma_resv_list_free(dst_list);
> -				src_list = dma_resv_shared_list(src);
> -				goto retry;
> -			}
> +				list->shared_count = 0;
> +				rcu_read_lock();
>  
> -			if (dma_fence_is_signaled(fence)) {
> -				dma_fence_put(fence);
> -				continue;
> +			} else {
> +				list = NULL;
>  			}
> -
> -			dst = &dst_list->shared[dst_list->shared_count++];
> -			rcu_assign_pointer(*dst, fence);
> +			excl = NULL;
>  		}
> -	} else {
> -		dst_list = NULL;
> -	}
>  
> -	new = dma_fence_get_rcu_safe(&src->fence_excl);
> +		dma_fence_get(f);
> +		if (dma_resv_iter_is_exclusive(&cursor))
> +			excl = f;
> +		else
> +			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
> +	}
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
>  
> -	src_list = dma_resv_shared_list(dst);
> -	old = dma_resv_excl_fence(dst);
> -
>  	write_seqcount_begin(&dst->seq);
> -	/* write_seqcount_begin provides the necessary memory barrier */
> -	RCU_INIT_POINTER(dst->fence_excl, new);
> -	RCU_INIT_POINTER(dst->fence, dst_list);
> +	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
> +	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
>  	write_seqcount_end(&dst->seq);
>  
> -	dma_resv_list_free(src_list);
> -	dma_fence_put(old);
> +	dma_resv_list_free(list);
> +	dma_fence_put(excl);
>  
>  	return 0;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
@ 2021-09-17 14:35     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:35 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:50PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled else where.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
>  1 file changed, 35 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a3c79a99fb44..406150dea5e4 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
>   */
>  int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  {
> -	struct dma_resv_list *src_list, *dst_list;
> -	struct dma_fence *old, *new;
> -	unsigned int i;
> +	struct dma_resv_iter cursor;
> +	struct dma_resv_list *list;
> +	struct dma_fence *f, *excl;
>  
>  	dma_resv_assert_held(dst);
>  
> -	rcu_read_lock();
> -	src_list = dma_resv_shared_list(src);
> -
> -retry:
> -	if (src_list) {
> -		unsigned int shared_count = src_list->shared_count;
> -
> -		rcu_read_unlock();
> +	list = NULL;
> +	excl = NULL;
>  
> -		dst_list = dma_resv_list_alloc(shared_count);
> -		if (!dst_list)
> -			return -ENOMEM;
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, src, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, f) {
>  
> -		rcu_read_lock();
> -		src_list = dma_resv_shared_list(src);
> -		if (!src_list || src_list->shared_count > shared_count) {
> -			kfree(dst_list);
> -			goto retry;
> -		}
> +		if (cursor.is_first) {

Maybe have a wrapper for this, like dma_resv_iter_is_reset or is_first or
is_restart (my preference) with some nice docs that this returns true
everytime we had to restart the sequence?

Otherwise I fully agree, this is so much better with all the hairy
restarting and get_rcu and test_bit shovelled away somewhere.

Either way (but I much prefer a wrapper for is_first):

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +			dma_resv_list_free(list);
> +			dma_fence_put(excl);
>  
> -		dst_list->shared_count = 0;
> -		for (i = 0; i < src_list->shared_count; ++i) {
> -			struct dma_fence __rcu **dst;
> -			struct dma_fence *fence;
> +			if (cursor.fences) {
> +				unsigned int cnt = cursor.fences->shared_count;
>  
> -			fence = rcu_dereference(src_list->shared[i]);
> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> -				     &fence->flags))
> -				continue;
> +				rcu_read_unlock();
> +				list = dma_resv_list_alloc(cnt);
> +				if (!list) {
> +					dma_resv_iter_end(&cursor);
> +					return -ENOMEM;
> +				}
>  
> -			if (!dma_fence_get_rcu(fence)) {
> -				dma_resv_list_free(dst_list);
> -				src_list = dma_resv_shared_list(src);
> -				goto retry;
> -			}
> +				list->shared_count = 0;
> +				rcu_read_lock();
>  
> -			if (dma_fence_is_signaled(fence)) {
> -				dma_fence_put(fence);
> -				continue;
> +			} else {
> +				list = NULL;
>  			}
> -
> -			dst = &dst_list->shared[dst_list->shared_count++];
> -			rcu_assign_pointer(*dst, fence);
> +			excl = NULL;
>  		}
> -	} else {
> -		dst_list = NULL;
> -	}
>  
> -	new = dma_fence_get_rcu_safe(&src->fence_excl);
> +		dma_fence_get(f);
> +		if (dma_resv_iter_is_exclusive(&cursor))
> +			excl = f;
> +		else
> +			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
> +	}
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
>  
> -	src_list = dma_resv_shared_list(dst);
> -	old = dma_resv_excl_fence(dst);
> -
>  	write_seqcount_begin(&dst->seq);
> -	/* write_seqcount_begin provides the necessary memory barrier */
> -	RCU_INIT_POINTER(dst->fence_excl, new);
> -	RCU_INIT_POINTER(dst->fence, dst_list);
> +	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
> +	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
>  	write_seqcount_end(&dst->seq);
>  
> -	dma_resv_list_free(src_list);
> -	dma_fence_put(old);
> +	dma_resv_list_free(list);
> +	dma_fence_put(excl);
>  
>  	return 0;
>  }
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 04/26] dma-buf: use new iterator in dma_resv_get_fences v2
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:39     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:39 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:51PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> v2: use sizeof(void*) instead
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 112 +++++++++++++------------------------
>  1 file changed, 40 insertions(+), 72 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 406150dea5e4..9b90bd9ac018 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -487,99 +487,67 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
> - * @pfence_excl: the returned exclusive fence (or NULL)
> - * @pshared_count: the number of shared fences returned
> - * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
> + * @fence_excl: the returned exclusive fence (or NULL)
> + * @shared_count: the number of shared fences returned
> + * @shared: the array of shared fence ptrs returned (array is krealloc'd to
>   * the required size, and must be freed by caller)
>   *
>   * Retrieve all fences from the reservation object. If the pointer for the
>   * exclusive fence is not specified the fence is put into the array of the
>   * shared fences as well. Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> -			unsigned int *pshared_count,
> -			struct dma_fence ***pshared)
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
> +			unsigned int *shared_count, struct dma_fence ***shared)
>  {
> -	struct dma_fence **shared = NULL;
> -	struct dma_fence *fence_excl;
> -	unsigned int shared_count;
> -	int ret = 1;
> -
> -	do {
> -		struct dma_resv_list *fobj;
> -		unsigned int i, seq;
> -		size_t sz = 0;
> -
> -		shared_count = i = 0;
> -
> -		rcu_read_lock();
> -		seq = read_seqcount_begin(&obj->seq);
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  
> -		fence_excl = dma_resv_excl_fence(obj);
> -		if (fence_excl && !dma_fence_get_rcu(fence_excl))
> -			goto unlock;
> +	*shared_count = 0;
> +	*shared = NULL;
>  
> -		fobj = dma_resv_shared_list(obj);
> -		if (fobj)
> -			sz += sizeof(*shared) * fobj->shared_max;
> +	if (fence_excl)
> +		*fence_excl = NULL;
>  
> -		if (!pfence_excl && fence_excl)
> -			sz += sizeof(*shared);
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  
> -		if (sz) {
> -			struct dma_fence **nshared;
> +		if (cursor.is_first) {

Yeah with the second one here I definitely think we need a
dma_resv_iter_is_restart() helper. I'm not sure whether that should have
is_first or restart_only semantics, but I guess gcc wont see through the
maze anyway, and hence initializing everything to NULL/0 is required.

Also is_first is a bit confusing naming imo. You mean "is this the first
fence" but readers could equally read this as "is this the first time
we're in the loop", which is rather confusing. Hence why I think an
iter_is_restart() or maybe iter_restarted() naming is a notch clearer.


> +			unsigned int count;
>  
> -			nshared = krealloc(shared, sz,
> -					   GFP_NOWAIT | __GFP_NOWARN);
> -			if (!nshared) {
> -				rcu_read_unlock();
> +			while (*shared_count)
> +				dma_fence_put((*shared)[--(*shared_count)]);
>  
> -				dma_fence_put(fence_excl);
> -				fence_excl = NULL;
> +			if (fence_excl)
> +				dma_fence_put(*fence_excl);
>  
> -				nshared = krealloc(shared, sz, GFP_KERNEL);
> -				if (nshared) {
> -					shared = nshared;
> -					continue;
> -				}
> +			count = cursor.fences ? cursor.fences->shared_count : 0;
> +			count += fence_excl ? 0 : 1;
> +			rcu_read_unlock();
>  
> -				ret = -ENOMEM;
> -				break;
> -			}
> -			shared = nshared;
> -			shared_count = fobj ? fobj->shared_count : 0;
> -			for (i = 0; i < shared_count; ++i) {
> -				shared[i] = rcu_dereference(fobj->shared[i]);
> -				if (!dma_fence_get_rcu(shared[i]))
> -					break;
> +			/* Eventually re-allocate the array */
> +			*shared = krealloc_array(*shared, count,
> +						 sizeof(void *),
> +						 GFP_KERNEL);
> +			if (count && !*shared) {
> +				dma_resv_iter_end(&cursor);
> +				return -ENOMEM;
>  			}
> +			rcu_read_lock();
>  		}
>  
> -		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
> -			while (i--)
> -				dma_fence_put(shared[i]);
> -			dma_fence_put(fence_excl);
> -			goto unlock;
> -		}
> -
> -		ret = 0;
> -unlock:
> -		rcu_read_unlock();
> -	} while (ret);
> -
> -	if (pfence_excl)
> -		*pfence_excl = fence_excl;
> -	else if (fence_excl)
> -		shared[shared_count++] = fence_excl;
> +		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
> +			*fence_excl = fence;
> +		else
> +			(*shared)[(*shared_count)++] = fence;
>  
> -	if (!shared_count) {
> -		kfree(shared);
> -		shared = NULL;
> +		/* Don't drop the reference */
> +		fence = NULL;
>  	}
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>  
> -	*pshared_count = shared_count;
> -	*pshared = shared;
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_get_fences);

With the wrapper I'd like to have:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 04/26] dma-buf: use new iterator in dma_resv_get_fences v2
@ 2021-09-17 14:39     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:39 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:51PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> v2: use sizeof(void*) instead
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 112 +++++++++++++------------------------
>  1 file changed, 40 insertions(+), 72 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 406150dea5e4..9b90bd9ac018 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -487,99 +487,67 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
>   * dma_resv_get_fences - Get an object's shared and exclusive
>   * fences without update side lock held
>   * @obj: the reservation object
> - * @pfence_excl: the returned exclusive fence (or NULL)
> - * @pshared_count: the number of shared fences returned
> - * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
> + * @fence_excl: the returned exclusive fence (or NULL)
> + * @shared_count: the number of shared fences returned
> + * @shared: the array of shared fence ptrs returned (array is krealloc'd to
>   * the required size, and must be freed by caller)
>   *
>   * Retrieve all fences from the reservation object. If the pointer for the
>   * exclusive fence is not specified the fence is put into the array of the
>   * shared fences as well. Returns either zero or -ENOMEM.
>   */
> -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
> -			unsigned int *pshared_count,
> -			struct dma_fence ***pshared)
> +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
> +			unsigned int *shared_count, struct dma_fence ***shared)
>  {
> -	struct dma_fence **shared = NULL;
> -	struct dma_fence *fence_excl;
> -	unsigned int shared_count;
> -	int ret = 1;
> -
> -	do {
> -		struct dma_resv_list *fobj;
> -		unsigned int i, seq;
> -		size_t sz = 0;
> -
> -		shared_count = i = 0;
> -
> -		rcu_read_lock();
> -		seq = read_seqcount_begin(&obj->seq);
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  
> -		fence_excl = dma_resv_excl_fence(obj);
> -		if (fence_excl && !dma_fence_get_rcu(fence_excl))
> -			goto unlock;
> +	*shared_count = 0;
> +	*shared = NULL;
>  
> -		fobj = dma_resv_shared_list(obj);
> -		if (fobj)
> -			sz += sizeof(*shared) * fobj->shared_max;
> +	if (fence_excl)
> +		*fence_excl = NULL;
>  
> -		if (!pfence_excl && fence_excl)
> -			sz += sizeof(*shared);
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  
> -		if (sz) {
> -			struct dma_fence **nshared;
> +		if (cursor.is_first) {

Yeah with the second one here I definitely think we need a
dma_resv_iter_is_restart() helper. I'm not sure whether that should have
is_first or restart_only semantics, but I guess gcc wont see through the
maze anyway, and hence initializing everything to NULL/0 is required.

Also is_first is a bit confusing naming imo. You mean "is this the first
fence" but readers could equally read this as "is this the first time
we're in the loop", which is rather confusing. Hence why I think an
iter_is_restart() or maybe iter_restarted() naming is a notch clearer.


> +			unsigned int count;
>  
> -			nshared = krealloc(shared, sz,
> -					   GFP_NOWAIT | __GFP_NOWARN);
> -			if (!nshared) {
> -				rcu_read_unlock();
> +			while (*shared_count)
> +				dma_fence_put((*shared)[--(*shared_count)]);
>  
> -				dma_fence_put(fence_excl);
> -				fence_excl = NULL;
> +			if (fence_excl)
> +				dma_fence_put(*fence_excl);
>  
> -				nshared = krealloc(shared, sz, GFP_KERNEL);
> -				if (nshared) {
> -					shared = nshared;
> -					continue;
> -				}
> +			count = cursor.fences ? cursor.fences->shared_count : 0;
> +			count += fence_excl ? 0 : 1;
> +			rcu_read_unlock();
>  
> -				ret = -ENOMEM;
> -				break;
> -			}
> -			shared = nshared;
> -			shared_count = fobj ? fobj->shared_count : 0;
> -			for (i = 0; i < shared_count; ++i) {
> -				shared[i] = rcu_dereference(fobj->shared[i]);
> -				if (!dma_fence_get_rcu(shared[i]))
> -					break;
> +			/* Eventually re-allocate the array */
> +			*shared = krealloc_array(*shared, count,
> +						 sizeof(void *),
> +						 GFP_KERNEL);
> +			if (count && !*shared) {
> +				dma_resv_iter_end(&cursor);
> +				return -ENOMEM;
>  			}
> +			rcu_read_lock();
>  		}
>  
> -		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
> -			while (i--)
> -				dma_fence_put(shared[i]);
> -			dma_fence_put(fence_excl);
> -			goto unlock;
> -		}
> -
> -		ret = 0;
> -unlock:
> -		rcu_read_unlock();
> -	} while (ret);
> -
> -	if (pfence_excl)
> -		*pfence_excl = fence_excl;
> -	else if (fence_excl)
> -		shared[shared_count++] = fence_excl;
> +		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
> +			*fence_excl = fence;
> +		else
> +			(*shared)[(*shared_count)++] = fence;
>  
> -	if (!shared_count) {
> -		kfree(shared);
> -		shared = NULL;
> +		/* Don't drop the reference */
> +		fence = NULL;
>  	}
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>  
> -	*pshared_count = shared_count;
> -	*pshared = shared;
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_get_fences);

With the wrapper I'd like to have:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:43     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:43 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:52PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
>  1 file changed, 10 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 9b90bd9ac018..c7db553ab115 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout)
>  {
>  	long ret = timeout ? timeout : 1;
> -	unsigned int seq, shared_count;
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	int i;
>  
> -retry:
> -	shared_count = 0;
> -	seq = read_seqcount_begin(&obj->seq);
>  	rcu_read_lock();

I missed this in my previous conversion reviews, but pls move the
rcu_read_lock into the iterator. That should simplify the flow in all of
these quite a bit more, and since the iter_next_unlocked grabs a full
reference for the iteration body we really don't need that protected by
rcu.

We can't toss rcu protection for dma_resv anytime soon (if ever), but we
can at least make it an implementation detail.

> -	i = -1;
> -
> -	fence = dma_resv_excl_fence(obj);
> -	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
> -		if (!dma_fence_get_rcu(fence))
> -			goto unlock_retry;
> +	dma_resv_iter_begin(&cursor, obj, wait_all);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
>  
> -		if (dma_fence_is_signaled(fence)) {
> -			dma_fence_put(fence);
> -			fence = NULL;
> +		ret = dma_fence_wait_timeout(fence, intr, ret);
> +		if (ret <= 0) {
> +			dma_resv_iter_end(&cursor);
> +			return ret;
>  		}
>  
> -	} else {
> -		fence = NULL;
> -	}
> -
> -	if (wait_all) {
> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
> -
> -		if (fobj)
> -			shared_count = fobj->shared_count;
> -
> -		for (i = 0; !fence && i < shared_count; ++i) {
> -			struct dma_fence *lfence;
> -
> -			lfence = rcu_dereference(fobj->shared[i]);
> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> -				     &lfence->flags))
> -				continue;
> -
> -			if (!dma_fence_get_rcu(lfence))
> -				goto unlock_retry;
> -
> -			if (dma_fence_is_signaled(lfence)) {
> -				dma_fence_put(lfence);
> -				continue;
> -			}
> -
> -			fence = lfence;
> -			break;
> -		}
> +		rcu_read_lock();
>  	}
> -
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
> -	if (fence) {
> -		if (read_seqcount_retry(&obj->seq, seq)) {
> -			dma_fence_put(fence);
> -			goto retry;
> -		}
>  
> -		ret = dma_fence_wait_timeout(fence, intr, ret);
> -		dma_fence_put(fence);
> -		if (ret > 0 && wait_all && (i + 1 < shared_count))
> -			goto retry;
> -	}
>  	return ret;
> -
> -unlock_retry:
> -	rcu_read_unlock();
> -	goto retry;

I think we still have the same semantics, and it's so much tidier.

With the rcu_read_unlock stuff into iterators (also applies to previous
two patches):

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  }
>  EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
@ 2021-09-17 14:43     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:43 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:52PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
>  1 file changed, 10 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 9b90bd9ac018..c7db553ab115 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  			   unsigned long timeout)
>  {
>  	long ret = timeout ? timeout : 1;
> -	unsigned int seq, shared_count;
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	int i;
>  
> -retry:
> -	shared_count = 0;
> -	seq = read_seqcount_begin(&obj->seq);
>  	rcu_read_lock();

I missed this in my previous conversion reviews, but pls move the
rcu_read_lock into the iterator. That should simplify the flow in all of
these quite a bit more, and since the iter_next_unlocked grabs a full
reference for the iteration body we really don't need that protected by
rcu.

We can't toss rcu protection for dma_resv anytime soon (if ever), but we
can at least make it an implementation detail.

> -	i = -1;
> -
> -	fence = dma_resv_excl_fence(obj);
> -	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
> -		if (!dma_fence_get_rcu(fence))
> -			goto unlock_retry;
> +	dma_resv_iter_begin(&cursor, obj, wait_all);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
>  
> -		if (dma_fence_is_signaled(fence)) {
> -			dma_fence_put(fence);
> -			fence = NULL;
> +		ret = dma_fence_wait_timeout(fence, intr, ret);
> +		if (ret <= 0) {
> +			dma_resv_iter_end(&cursor);
> +			return ret;
>  		}
>  
> -	} else {
> -		fence = NULL;
> -	}
> -
> -	if (wait_all) {
> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
> -
> -		if (fobj)
> -			shared_count = fobj->shared_count;
> -
> -		for (i = 0; !fence && i < shared_count; ++i) {
> -			struct dma_fence *lfence;
> -
> -			lfence = rcu_dereference(fobj->shared[i]);
> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> -				     &lfence->flags))
> -				continue;
> -
> -			if (!dma_fence_get_rcu(lfence))
> -				goto unlock_retry;
> -
> -			if (dma_fence_is_signaled(lfence)) {
> -				dma_fence_put(lfence);
> -				continue;
> -			}
> -
> -			fence = lfence;
> -			break;
> -		}
> +		rcu_read_lock();
>  	}
> -
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
> -	if (fence) {
> -		if (read_seqcount_retry(&obj->seq, seq)) {
> -			dma_fence_put(fence);
> -			goto retry;
> -		}
>  
> -		ret = dma_fence_wait_timeout(fence, intr, ret);
> -		dma_fence_put(fence);
> -		if (ret > 0 && wait_all && (i + 1 < shared_count))
> -			goto retry;
> -	}
>  	return ret;
> -
> -unlock_retry:
> -	rcu_read_unlock();
> -	goto retry;

I think we still have the same semantics, and it's so much tidier.

With the rcu_read_unlock stuff into iterators (also applies to previous
two patches):

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  }
>  EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 06/26] dma-buf: use new iterator in dma_resv_test_signaled
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:45     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:45 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:53PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 56 ++++++--------------------------------
>  1 file changed, 9 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index c7db553ab115..d8f428ddaedd 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -593,22 +593,6 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>  
>  
> -static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> -{
> -	struct dma_fence *fence, *lfence = passed_fence;
> -	int ret = 1;
> -
> -	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
> -		fence = dma_fence_get_rcu(lfence);
> -		if (!fence)
> -			return -1;
> -
> -		ret = !!dma_fence_is_signaled(fence);
> -		dma_fence_put(fence);
> -	}
> -	return ret;
> -}
> -
>  /**
>   * dma_resv_test_signaled - Test if a reservation object's fences have been
>   * signaled.
> @@ -625,43 +609,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   */
>  bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
>  {
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	unsigned int seq;
> -	int ret;
>  
>  	rcu_read_lock();
> -retry:
> -	ret = true;
> -	seq = read_seqcount_begin(&obj->seq);
> -
> -	if (test_all) {
> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
> -		unsigned int i, shared_count;
> -
> -		shared_count = fobj ? fobj->shared_count : 0;
> -		for (i = 0; i < shared_count; ++i) {
> -			fence = rcu_dereference(fobj->shared[i]);
> -			ret = dma_resv_test_signaled_single(fence);
> -			if (ret < 0)
> -				goto retry;
> -			else if (!ret)
> -				break;
> +	dma_resv_iter_begin(&cursor, obj, test_all);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		if (!dma_fence_is_signaled(fence)) {

Should we be extremely clever and document that the iterator already
filters out unsignalled fences? We could rely on that here :-) Otoh we
don't want to make the full is_signalled check in that iterator, so this
makes sense.

Again rcu_read_lock into the iterators pls. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +			dma_resv_iter_end(&cursor);
> +			rcu_read_unlock();
> +			return false;
>  		}
>  	}
> -
> -	fence = dma_resv_excl_fence(obj);
> -	if (ret && fence) {
> -		ret = dma_resv_test_signaled_single(fence);
> -		if (ret < 0)
> -			goto retry;
> -
> -	}
> -
> -	if (read_seqcount_retry(&obj->seq, seq))
> -		goto retry;
> -
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
> -	return ret;
> +	return true;
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 06/26] dma-buf: use new iterator in dma_resv_test_signaled
@ 2021-09-17 14:45     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:45 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:53PM +0200, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled elsewhere.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 56 ++++++--------------------------------
>  1 file changed, 9 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index c7db553ab115..d8f428ddaedd 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -593,22 +593,6 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>  EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>  
>  
> -static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> -{
> -	struct dma_fence *fence, *lfence = passed_fence;
> -	int ret = 1;
> -
> -	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
> -		fence = dma_fence_get_rcu(lfence);
> -		if (!fence)
> -			return -1;
> -
> -		ret = !!dma_fence_is_signaled(fence);
> -		dma_fence_put(fence);
> -	}
> -	return ret;
> -}
> -
>  /**
>   * dma_resv_test_signaled - Test if a reservation object's fences have been
>   * signaled.
> @@ -625,43 +609,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   */
>  bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
>  {
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	unsigned int seq;
> -	int ret;
>  
>  	rcu_read_lock();
> -retry:
> -	ret = true;
> -	seq = read_seqcount_begin(&obj->seq);
> -
> -	if (test_all) {
> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
> -		unsigned int i, shared_count;
> -
> -		shared_count = fobj ? fobj->shared_count : 0;
> -		for (i = 0; i < shared_count; ++i) {
> -			fence = rcu_dereference(fobj->shared[i]);
> -			ret = dma_resv_test_signaled_single(fence);
> -			if (ret < 0)
> -				goto retry;
> -			else if (!ret)
> -				break;
> +	dma_resv_iter_begin(&cursor, obj, test_all);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		if (!dma_fence_is_signaled(fence)) {

Should we be extremely clever and document that the iterator already
filters out unsignalled fences? We could rely on that here :-) Otoh we
don't want to make the full is_signalled check in that iterator, so this
makes sense.

Again rcu_read_lock into the iterators pls. With that:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +			dma_resv_iter_end(&cursor);
> +			rcu_read_unlock();
> +			return false;
>  		}
>  	}
> -
> -	fence = dma_resv_excl_fence(obj);
> -	if (ret && fence) {
> -		ret = dma_resv_test_signaled_single(fence);
> -		if (ret < 0)
> -			goto retry;
> -
> -	}
> -
> -	if (read_seqcount_retry(&obj->seq, seq))
> -		goto retry;
> -
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
> -	return ret;
> +	return true;
>  }
>  EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 07/26] drm/ttm: use the new iterator in ttm_bo_flush_all_fences
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:50     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:50 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:54PM +0200, Christian König wrote:
> This is probably a fix since we didn't even grabed a reference to the
> fences.

It's rcu protected, and we only care about speeding things up a bit. I
think this wont have any impact on correctness, and I don't think any
driver could blow up?

But yeah maybe we should have a few assert sprinkled into various
dma_fence functions to make sure we never call them when the refcount has
dropped to 0. That would catch stuff like this, and help lock down the
dma-fence api quite a bit.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo.c | 14 ++++----------
>  1 file changed, 4 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 3b22c0013dbf..7d804c0c69b0 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -269,22 +269,16 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
>  static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>  {
>  	struct dma_resv *resv = &bo->base._resv;
> -	struct dma_resv_list *fobj;
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	int i;
>  
>  	rcu_read_lock();
> -	fobj = dma_resv_shared_list(resv);
> -	fence = dma_resv_excl_fence(resv);
> -	if (fence && !fence->ops->signaled)
> -		dma_fence_enable_sw_signaling(fence);
> -
> -	for (i = 0; fobj && i < fobj->shared_count; ++i) {
> -		fence = rcu_dereference(fobj->shared[i]);
> -
> +	dma_resv_iter_begin(&cursor, resv, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  		if (!fence->ops->signaled)

Imo delete this check here. If that really matters for performance we
should have it in the core dma_fence function, not replicated all over the
place like this. Noodling around in dma_fence internals like this isn't
cool.

With that removal included:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  			dma_fence_enable_sw_signaling(fence);
>  	}
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
>  }
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 07/26] drm/ttm: use the new iterator in ttm_bo_flush_all_fences
@ 2021-09-17 14:50     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:50 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:54PM +0200, Christian König wrote:
> This is probably a fix since we didn't even grabed a reference to the
> fences.

It's rcu protected, and we only care about speeding things up a bit. I
think this wont have any impact on correctness, and I don't think any
driver could blow up?

But yeah maybe we should have a few assert sprinkled into various
dma_fence functions to make sure we never call them when the refcount has
dropped to 0. That would catch stuff like this, and help lock down the
dma-fence api quite a bit.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo.c | 14 ++++----------
>  1 file changed, 4 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 3b22c0013dbf..7d804c0c69b0 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -269,22 +269,16 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
>  static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>  {
>  	struct dma_resv *resv = &bo->base._resv;
> -	struct dma_resv_list *fobj;
> +	struct dma_resv_iter cursor;
>  	struct dma_fence *fence;
> -	int i;
>  
>  	rcu_read_lock();
> -	fobj = dma_resv_shared_list(resv);
> -	fence = dma_resv_excl_fence(resv);
> -	if (fence && !fence->ops->signaled)
> -		dma_fence_enable_sw_signaling(fence);
> -
> -	for (i = 0; fobj && i < fobj->shared_count; ++i) {
> -		fence = rcu_dereference(fobj->shared[i]);
> -
> +	dma_resv_iter_begin(&cursor, resv, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>  		if (!fence->ops->signaled)

Imo delete this check here. If that really matters for performance we
should have it in the core dma_fence function, not replicated all over the
place like this. Noodling around in dma_fence internals like this isn't
cool.

With that removal included:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>  			dma_fence_enable_sw_signaling(fence);
>  	}
> +	dma_resv_iter_end(&cursor);
>  	rcu_read_unlock();
>  }
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:52     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:52 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>  1 file changed, 6 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 042c16b5d54a..5bc5f775abe1 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>  					    struct drm_gem_object *obj,
>  					    bool write)
>  {
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_sched_job_add_dependency(job, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
>  
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_sched_job_add_dependency(job, fences[i]);
> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {

Hah I got tricked reading your 2nd patch, the iter_begin() is included and
we don't need iter_end for this. Please correct my comments for patch 2
:-)

On this as-is:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +		ret = drm_sched_job_add_dependency(job, fence);
>  		if (ret)
> -			break;
> +			return ret;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-09-17 14:52     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:52 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>  1 file changed, 6 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 042c16b5d54a..5bc5f775abe1 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>  					    struct drm_gem_object *obj,
>  					    bool write)
>  {
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_sched_job_add_dependency(job, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
>  
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_sched_job_add_dependency(job, fences[i]);
> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {

Hah I got tricked reading your 2nd patch, the iter_begin() is included and
we don't need iter_end for this. Please correct my comments for patch 2
:-)

On this as-is:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +		ret = drm_sched_job_add_dependency(job, fence);
>  		if (ret)
> -			break;
> +			return ret;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>  
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:53     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:53 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:07PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: add missing rcu_read_lock()/unlock()
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

This will be gone as soon as I can land the last conversion patches. Plus
it's always called with dma_resv_lock held.

I wouldn't bother tbh.
-Daniel

> ---
>  drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
>  1 file changed, 12 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 09c820045859..c2c41b668f40 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>  				     struct drm_gem_object *obj,
>  				     bool write)
>  {
> -	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence =
> -			dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_gem_fence_array_add(fence_array, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL,
> -						&fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
> -
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_gem_fence_array_add(fence_array, fences[i]);
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
> +	int ret = 0;
> +
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj->resv, write);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
> +		ret = drm_gem_fence_array_add(fence_array, fence);
> +		rcu_read_lock();
>  		if (ret)
>  			break;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>  	return ret;
>  }
>  EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
@ 2021-09-17 14:53     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:53 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:07PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: add missing rcu_read_lock()/unlock()
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

This will be gone as soon as I can land the last conversion patches. Plus
it's always called with dma_resv_lock held.

I wouldn't bother tbh.
-Daniel

> ---
>  drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
>  1 file changed, 12 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 09c820045859..c2c41b668f40 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>  				     struct drm_gem_object *obj,
>  				     bool write)
>  {
> -	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence =
> -			dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_gem_fence_array_add(fence_array, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL,
> -						&fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
> -
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_gem_fence_array_add(fence_array, fences[i]);
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
> +	int ret = 0;
> +
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj->resv, write);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
> +		ret = drm_gem_fence_array_add(fence_array, fence);
> +		rcu_read_lock();
>  		if (ret)
>  			break;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>  	return ret;
>  }
>  EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:55     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:55 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:08PM +0200, Christian König wrote:
> Makes the handling a bit more complex, but avoids the use of
> dma_resv_get_excl_unlocked().
> 
> v2: add missing rcu_read_lock()/unlock()
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
>  1 file changed, 12 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index e570398abd78..d8f9c6432544 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -143,6 +143,7 @@
>   */
>  int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  {
> +	struct dma_resv_iter cursor;
>  	struct drm_gem_object *obj;
>  	struct dma_fence *fence;
>  
> @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>  		return 0;
>  
>  	obj = drm_gem_fb_get_obj(state->fb, 0);
> -	fence = dma_resv_get_excl_unlocked(obj->resv);
> -	drm_atomic_set_fence_for_plane(state, fence);
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj->resv, false);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
> +		/* TODO: We only use the first write fence here */
> +		drm_atomic_set_fence_for_plane(state, fence);

Yeah I wonder whether we should/need to collate them all together. But I
guesss whomever hits that first with their funny multi-plane yuv or
whatever gets to do that. Or I'm not clear on what exactly your TODO here
means?

> +		return 0;
> +	}
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();

Imo we should do full dma_resv_lock here. atomic helpers are designed to
allow this, and it simplifies things. Also it really doesn't matter for
atomic, we should be able to do 60fps*a few planes easily :-)
-Daniel

>  
> +	drm_atomic_set_fence_for_plane(state, NULL);
>  	return 0;
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
@ 2021-09-17 14:55     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:55 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:08PM +0200, Christian König wrote:
> Makes the handling a bit more complex, but avoids the use of
> dma_resv_get_excl_unlocked().
> 
> v2: add missing rcu_read_lock()/unlock()
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
>  1 file changed, 12 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
> index e570398abd78..d8f9c6432544 100644
> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
> @@ -143,6 +143,7 @@
>   */
>  int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>  {
> +	struct dma_resv_iter cursor;
>  	struct drm_gem_object *obj;
>  	struct dma_fence *fence;
>  
> @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>  		return 0;
>  
>  	obj = drm_gem_fb_get_obj(state->fb, 0);
> -	fence = dma_resv_get_excl_unlocked(obj->resv);
> -	drm_atomic_set_fence_for_plane(state, fence);
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, obj->resv, false);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
> +		/* TODO: We only use the first write fence here */
> +		drm_atomic_set_fence_for_plane(state, fence);

Yeah I wonder whether we should/need to collate them all together. But I
guesss whomever hits that first with their funny multi-plane yuv or
whatever gets to do that. Or I'm not clear on what exactly your TODO here
means?

> +		return 0;
> +	}
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();

Imo we should do full dma_resv_lock here. atomic helpers are designed to
allow this, and it simplifies things. Also it really doesn't matter for
atomic, we should be able to do 60fps*a few planes easily :-)
-Daniel

>  
> +	drm_atomic_set_fence_for_plane(state, NULL);
>  	return 0;
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 26/26] dma-buf: nuke dma_resv_get_excl_unlocked
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
@ 2021-09-17 14:56     ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:56 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:13PM +0200, Christian König wrote:
> Heureka, that's finally not used any more.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  include/linux/dma-resv.h | 26 --------------------------
>  1 file changed, 26 deletions(-)
> 
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 8c968f8c9d33..f42ca254acb5 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -423,32 +423,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
>  	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
> -/**
> - * dma_resv_get_excl_unlocked - get the reservation object's
> - * exclusive fence, without lock held.
> - * @obj: the reservation object
> - *
> - * If there is an exclusive fence, this atomically increments it's
> - * reference count and returns it.
> - *
> - * RETURNS
> - * The exclusive fence or NULL if none
> - */
> -static inline struct dma_fence *
> -dma_resv_get_excl_unlocked(struct dma_resv *obj)
> -{
> -	struct dma_fence *fence;
> -
> -	if (!rcu_access_pointer(obj->fence_excl))
> -		return NULL;
> -
> -	rcu_read_lock();
> -	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> -	rcu_read_unlock();
> -
> -	return fence;
> -}
> -
>  /**
>   * dma_resv_shared_list - get the reservation object's shared fence list
>   * @obj: the reservation object
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 26/26] dma-buf: nuke dma_resv_get_excl_unlocked
@ 2021-09-17 14:56     ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-09-17 14:56 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

On Fri, Sep 17, 2021 at 02:35:13PM +0200, Christian König wrote:
> Heureka, that's finally not used any more.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  include/linux/dma-resv.h | 26 --------------------------
>  1 file changed, 26 deletions(-)
> 
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 8c968f8c9d33..f42ca254acb5 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -423,32 +423,6 @@ dma_resv_excl_fence(struct dma_resv *obj)
>  	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
>  }
>  
> -/**
> - * dma_resv_get_excl_unlocked - get the reservation object's
> - * exclusive fence, without lock held.
> - * @obj: the reservation object
> - *
> - * If there is an exclusive fence, this atomically increments it's
> - * reference count and returns it.
> - *
> - * RETURNS
> - * The exclusive fence or NULL if none
> - */
> -static inline struct dma_fence *
> -dma_resv_get_excl_unlocked(struct dma_resv *obj)
> -{
> -	struct dma_fence *fence;
> -
> -	if (!rcu_access_pointer(obj->fence_excl))
> -		return NULL;
> -
> -	rcu_read_lock();
> -	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> -	rcu_read_unlock();
> -
> -	return fence;
> -}
> -
>  /**
>   * dma_resv_shared_list - get the reservation object's shared fence list
>   * @obj: the reservation object
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 12:34 ` [Intel-gfx] " Christian König
                   ` (28 preceding siblings ...)
  (?)
@ 2021-09-17 15:43 ` Patchwork
  -1 siblings, 0 replies; 115+ messages in thread
From: Patchwork @ 2021-09-17 15:43 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 30300 bytes --]

== Series Details ==

Series: series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
URL   : https://patchwork.freedesktop.org/series/94805/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_10604_full -> Patchwork_21084_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_21084_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_21084_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_21084_full:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_pm_rpm@gem-mmap-type@wb:
    - shard-iclb:         NOTRUN -> [FAIL][1] +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb6/igt@i915_pm_rpm@gem-mmap-type@wb.html

  
Known issues
------------

  Here are the changes found in Patchwork_21084_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_sseu@invalid-args:
    - shard-tglb:         NOTRUN -> [SKIP][2] ([i915#280])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@gem_ctx_sseu@invalid-args.html

  * igt@gem_eio@unwedge-stress:
    - shard-tglb:         [PASS][3] -> [TIMEOUT][4] ([i915#2369] / [i915#3063] / [i915#3648])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb7/igt@gem_eio@unwedge-stress.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@gem_eio@unwedge-stress.html

  * igt@gem_exec_fair@basic-none-rrul@rcs0:
    - shard-tglb:         NOTRUN -> [FAIL][5] ([i915#2842])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@gem_exec_fair@basic-none-rrul@rcs0.html

  * igt@gem_exec_fair@basic-none-solo@rcs0:
    - shard-kbl:          NOTRUN -> [FAIL][6] ([i915#2842])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@gem_exec_fair@basic-none-solo@rcs0.html

  * igt@gem_exec_fair@basic-none@vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][7] ([i915#2842])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb4/igt@gem_exec_fair@basic-none@vcs1.html

  * igt@gem_exec_fair@basic-pace@vcs1:
    - shard-tglb:         [PASS][8] -> [FAIL][9] ([i915#2842])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb8/igt@gem_exec_fair@basic-pace@vcs1.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@gem_exec_fair@basic-pace@vcs1.html

  * igt@gem_exec_params@secure-non-root:
    - shard-tglb:         NOTRUN -> [SKIP][10] ([fdo#112283])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@gem_exec_params@secure-non-root.html

  * igt@gem_pread@exhaustion:
    - shard-kbl:          NOTRUN -> [WARN][11] ([i915#2658])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@gem_pread@exhaustion.html

  * igt@gem_userptr_blits@input-checking:
    - shard-tglb:         NOTRUN -> [DMESG-WARN][12] ([i915#3002])
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@gem_userptr_blits@input-checking.html

  * igt@gem_userptr_blits@unsync-unmap-cycles:
    - shard-tglb:         NOTRUN -> [SKIP][13] ([i915#3297])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@gem_userptr_blits@unsync-unmap-cycles.html

  * igt@gen9_exec_parse@allowed-single:
    - shard-skl:          [PASS][14] -> [DMESG-WARN][15] ([i915#1436] / [i915#716])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl1/igt@gen9_exec_parse@allowed-single.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl1/igt@gen9_exec_parse@allowed-single.html

  * igt@gen9_exec_parse@bb-start-far:
    - shard-tglb:         NOTRUN -> [SKIP][16] ([i915#2856])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@gen9_exec_parse@bb-start-far.html

  * igt@i915_pm_rpm@system-suspend-execbuf:
    - shard-tglb:         [PASS][17] -> [INCOMPLETE][18] ([i915#2411] / [i915#456] / [i915#750])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb3/igt@i915_pm_rpm@system-suspend-execbuf.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb7/igt@i915_pm_rpm@system-suspend-execbuf.html

  * igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip:
    - shard-kbl:          NOTRUN -> [SKIP][19] ([fdo#109271] / [i915#3777])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip.html

  * igt@kms_big_fb@y-tiled-8bpp-rotate-270:
    - shard-tglb:         NOTRUN -> [SKIP][20] ([fdo#111614])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_big_fb@y-tiled-8bpp-rotate-270.html

  * igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip:
    - shard-kbl:          NOTRUN -> [SKIP][21] ([fdo#109271]) +78 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip.html

  * igt@kms_ccs@pipe-a-missing-ccs-buffer-y_tiled_gen12_mc_ccs:
    - shard-iclb:         NOTRUN -> [SKIP][22] ([fdo#109278] / [i915#3886])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_ccs@pipe-a-missing-ccs-buffer-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-c-random-ccs-data-y_tiled_gen12_rc_ccs_cc:
    - shard-kbl:          NOTRUN -> [SKIP][23] ([fdo#109271] / [i915#3886]) +5 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@kms_ccs@pipe-c-random-ccs-data-y_tiled_gen12_rc_ccs_cc.html

  * igt@kms_ccs@pipe-d-missing-ccs-buffer-yf_tiled_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][24] ([i915#3689]) +5 similar issues
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_ccs@pipe-d-missing-ccs-buffer-yf_tiled_ccs.html

  * igt@kms_chamelium@dp-hpd-storm:
    - shard-skl:          NOTRUN -> [SKIP][25] ([fdo#109271] / [fdo#111827])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl6/igt@kms_chamelium@dp-hpd-storm.html

  * igt@kms_chamelium@vga-frame-dump:
    - shard-tglb:         NOTRUN -> [SKIP][26] ([fdo#109284] / [fdo#111827]) +4 similar issues
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_chamelium@vga-frame-dump.html

  * igt@kms_color_chamelium@pipe-c-ctm-limited-range:
    - shard-kbl:          NOTRUN -> [SKIP][27] ([fdo#109271] / [fdo#111827]) +8 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@kms_color_chamelium@pipe-c-ctm-limited-range.html

  * igt@kms_content_protection@lic:
    - shard-tglb:         NOTRUN -> [SKIP][28] ([fdo#111828])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_content_protection@lic.html

  * igt@kms_cursor_crc@pipe-a-cursor-512x512-onscreen:
    - shard-iclb:         NOTRUN -> [SKIP][29] ([fdo#109278] / [fdo#109279]) +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_cursor_crc@pipe-a-cursor-512x512-onscreen.html

  * igt@kms_cursor_crc@pipe-a-cursor-max-size-rapid-movement:
    - shard-tglb:         NOTRUN -> [SKIP][30] ([i915#3359]) +2 similar issues
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb5/igt@kms_cursor_crc@pipe-a-cursor-max-size-rapid-movement.html

  * igt@kms_cursor_crc@pipe-b-cursor-32x32-onscreen:
    - shard-tglb:         NOTRUN -> [SKIP][31] ([i915#3319])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_cursor_crc@pipe-b-cursor-32x32-onscreen.html

  * igt@kms_cursor_crc@pipe-d-cursor-64x21-sliding:
    - shard-iclb:         NOTRUN -> [SKIP][32] ([fdo#109278]) +1 similar issue
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_cursor_crc@pipe-d-cursor-64x21-sliding.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
    - shard-tglb:         NOTRUN -> [SKIP][33] ([i915#4103])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html

  * igt@kms_cursor_legacy@cursora-vs-flipb-atomic:
    - shard-iclb:         NOTRUN -> [SKIP][34] ([fdo#109274] / [fdo#109278])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_cursor_legacy@cursora-vs-flipb-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-legacy:
    - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#2346])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-legacy.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-legacy.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl2/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html

  * igt@kms_flip@flip-vs-suspend-interruptible@a-dp1:
    - shard-kbl:          [PASS][39] -> [DMESG-WARN][40] ([i915#180]) +6 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl2/igt@kms_flip@flip-vs-suspend-interruptible@a-dp1.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl7/igt@kms_flip@flip-vs-suspend-interruptible@a-dp1.html

  * igt@kms_flip@flip-vs-suspend-interruptible@a-edp1:
    - shard-tglb:         [PASS][41] -> [INCOMPLETE][42] ([i915#2411] / [i915#456]) +1 similar issue
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb1/igt@kms_flip@flip-vs-suspend-interruptible@a-edp1.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb7/igt@kms_flip@flip-vs-suspend-interruptible@a-edp1.html

  * igt@kms_flip@flip-vs-suspend@a-edp1:
    - shard-tglb:         [PASS][43] -> [INCOMPLETE][44] ([i915#456])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb6/igt@kms_flip@flip-vs-suspend@a-edp1.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb7/igt@kms_flip@flip-vs-suspend@a-edp1.html

  * igt@kms_flip@plain-flip-fb-recreate@b-edp1:
    - shard-tglb:         [PASS][45] -> [FAIL][46] ([i915#2122])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb5/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html

  * igt@kms_flip@plain-flip-fb-recreate@c-edp1:
    - shard-skl:          [PASS][47] -> [FAIL][48] ([i915#2122])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl3/igt@kms_flip@plain-flip-fb-recreate@c-edp1.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl6/igt@kms_flip@plain-flip-fb-recreate@c-edp1.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff:
    - shard-skl:          NOTRUN -> [SKIP][49] ([fdo#109271]) +7 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl6/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff.html

  * igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-blt:
    - shard-iclb:         NOTRUN -> [SKIP][50] ([fdo#109280])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-blt.html

  * igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-gtt:
    - shard-tglb:         NOTRUN -> [SKIP][51] ([fdo#111825]) +9 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-gtt.html

  * igt@kms_hdr@bpc-switch-dpms:
    - shard-skl:          [PASS][52] -> [FAIL][53] ([i915#1188])
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl10/igt@kms_hdr@bpc-switch-dpms.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl4/igt@kms_hdr@bpc-switch-dpms.html

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-max:
    - shard-kbl:          NOTRUN -> [FAIL][54] ([fdo#108145] / [i915#265]) +1 similar issue
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-max.html

  * igt@kms_psr2_sf@cursor-plane-update-sf:
    - shard-tglb:         NOTRUN -> [SKIP][55] ([i915#2920])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_psr2_sf@cursor-plane-update-sf.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-2:
    - shard-kbl:          NOTRUN -> [SKIP][56] ([fdo#109271] / [i915#658])
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl4/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-2.html

  * igt@kms_psr@psr2_cursor_mmap_gtt:
    - shard-tglb:         NOTRUN -> [FAIL][57] ([i915#132] / [i915#3467]) +1 similar issue
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_psr@psr2_cursor_mmap_gtt.html

  * igt@kms_psr@psr2_sprite_plane_move:
    - shard-iclb:         [PASS][58] -> [SKIP][59] ([fdo#109441]) +2 similar issues
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_psr@psr2_sprite_plane_move.html

  * igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0:
    - shard-tglb:         NOTRUN -> [SKIP][60] ([fdo#111615]) +2 similar issues
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html

  * igt@kms_sysfs_edid_timing:
    - shard-kbl:          NOTRUN -> [FAIL][61] ([IGT#2])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl4/igt@kms_sysfs_edid_timing.html

  * igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend:
    - shard-skl:          [PASS][62] -> [INCOMPLETE][63] ([i915#198])
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl8/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl3/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html

  * igt@kms_vrr@flip-dpms:
    - shard-tglb:         NOTRUN -> [SKIP][64] ([fdo#109502])
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb3/igt@kms_vrr@flip-dpms.html

  * igt@nouveau_crc@pipe-a-source-rg:
    - shard-iclb:         NOTRUN -> [SKIP][65] ([i915#2530])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb6/igt@nouveau_crc@pipe-a-source-rg.html

  * igt@nouveau_crc@pipe-c-ctx-flip-skip-current-frame:
    - shard-tglb:         NOTRUN -> [SKIP][66] ([i915#2530])
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@nouveau_crc@pipe-c-ctx-flip-skip-current-frame.html

  * igt@prime_nv_pcopy@test3_4:
    - shard-tglb:         NOTRUN -> [SKIP][67] ([fdo#109291])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@prime_nv_pcopy@test3_4.html

  * igt@sysfs_clients@fair-1:
    - shard-tglb:         NOTRUN -> [SKIP][68] ([i915#2994])
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@sysfs_clients@fair-1.html

  
#### Possible fixes ####

  * igt@core_hotunplug@unbind-rebind:
    - shard-iclb:         [INCOMPLETE][69] ([i915#4130]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb4/igt@core_hotunplug@unbind-rebind.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb6/igt@core_hotunplug@unbind-rebind.html

  * igt@gem_ctx_isolation@preservation-s3@bcs0:
    - shard-tglb:         [INCOMPLETE][71] -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb7/igt@gem_ctx_isolation@preservation-s3@bcs0.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb2/igt@gem_ctx_isolation@preservation-s3@bcs0.html

  * igt@gem_exec_fair@basic-pace-share@rcs0:
    - shard-tglb:         [FAIL][73] ([i915#2842]) -> [PASS][74] +2 similar issues
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb6/igt@gem_exec_fair@basic-pace-share@rcs0.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb1/igt@gem_exec_fair@basic-pace-share@rcs0.html

  * igt@gem_exec_fair@basic-pace@vecs0:
    - shard-kbl:          [FAIL][75] ([i915#2842]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl1/igt@gem_exec_fair@basic-pace@vecs0.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl3/igt@gem_exec_fair@basic-pace@vecs0.html

  * igt@i915_suspend@debugfs-reader:
    - shard-tglb:         [INCOMPLETE][77] ([i915#456]) -> [PASS][78] +1 similar issue
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-tglb8/igt@i915_suspend@debugfs-reader.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-tglb6/igt@i915_suspend@debugfs-reader.html

  * igt@kms_color@pipe-b-ctm-0-75:
    - shard-skl:          [DMESG-WARN][79] ([i915#1982]) -> [PASS][80] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl4/igt@kms_color@pipe-b-ctm-0-75.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl2/igt@kms_color@pipe-b-ctm-0-75.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-skl:          [FAIL][81] ([i915#2346] / [i915#533]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl8/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl7/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1:
    - shard-skl:          [FAIL][83] ([i915#79]) -> [PASS][84]
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl2/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1.html

  * igt@kms_flip@flip-vs-suspend@c-dp1:
    - shard-kbl:          [DMESG-WARN][85] ([i915#180]) -> [PASS][86] +6 similar issues
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl4/igt@kms_flip@flip-vs-suspend@c-dp1.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl1/igt@kms_flip@flip-vs-suspend@c-dp1.html

  * igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1:
    - shard-skl:          [FAIL][87] ([i915#2122]) -> [PASS][88]
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl1/igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl10/igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1.html

  * igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile:
    - shard-iclb:         [SKIP][89] ([i915#3701]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb2/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb5/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [FAIL][91] ([fdo#108145] / [i915#265]) -> [PASS][92]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl8/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl7/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_psr@psr2_primary_mmap_gtt:
    - shard-iclb:         [SKIP][93] ([fdo#109441]) -> [PASS][94] +1 similar issue
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb3/igt@kms_psr@psr2_primary_mmap_gtt.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb2/igt@kms_psr@psr2_primary_mmap_gtt.html

  * igt@perf@polling-parameterized:
    - shard-skl:          [FAIL][95] ([i915#1542]) -> [PASS][96]
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl8/igt@perf@polling-parameterized.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl5/igt@perf@polling-parameterized.html

  * igt@perf@polling-small-buf:
    - shard-skl:          [FAIL][97] ([i915#1722]) -> [PASS][98]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl1/igt@perf@polling-small-buf.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl10/igt@perf@polling-small-buf.html

  
#### Warnings ####

  * igt@kms_psr2_sf@plane-move-sf-dmg-area-0:
    - shard-iclb:         [SKIP][99] ([i915#2920]) -> [SKIP][100] ([i915#658]) +1 similar issue
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb2/igt@kms_psr2_sf@plane-move-sf-dmg-area-0.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb8/igt@kms_psr2_sf@plane-move-sf-dmg-area-0.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1:
    - shard-iclb:         [SKIP][101] ([i915#658]) -> [SKIP][102] ([i915#2920]) +3 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb6/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb2/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1.html

  * igt@runner@aborted:
    - shard-kbl:          ([FAIL][103], [FAIL][104], [FAIL][105], [FAIL][106], [FAIL][107], [FAIL][108], [FAIL][109], [FAIL][110], [FAIL][111], [FAIL][112], [FAIL][113], [FAIL][114], [FAIL][115]) ([fdo#109271] / [i915#1436] / [i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#602]) -> ([FAIL][116], [FAIL][117], [FAIL][118], [FAIL][119], [FAIL][120], [FAIL][121], [FAIL][122], [FAIL][123], [FAIL][124], [FAIL][125], [FAIL][126], [FAIL][127]) ([i915#1436] / [i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#602])
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl1/igt@runner@aborted.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl1/igt@runner@aborted.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl3/igt@runner@aborted.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl1/igt@runner@aborted.html
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl7/igt@runner@aborted.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl6/igt@runner@aborted.html
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl7/igt@runner@aborted.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl4/igt@runner@aborted.html
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl6/igt@runner@aborted.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl4/igt@runner@aborted.html
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl6/igt@runner@aborted.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl7/igt@runner@aborted.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-kbl6/igt@runner@aborted.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl6/igt@runner@aborted.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl7/igt@runner@aborted.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl7/igt@runner@aborted.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl6/igt@runner@aborted.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl1/igt@runner@aborted.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl6/igt@runner@aborted.html
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl7/igt@runner@aborted.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl1/igt@runner@aborted.html
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl1/igt@runner@aborted.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl4/igt@runner@aborted.html
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl7/igt@runner@aborted.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-kbl1/igt@runner@aborted.html
    - shard-iclb:         ([FAIL][128], [FAIL][129]) ([i915#1814] / [i915#3002]) -> ([FAIL][130], [FAIL][131], [FAIL][132]) ([i915#1814] / [i915#2722] / [i915#3002])
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb7/igt@runner@aborted.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-iclb8/igt@runner@aborted.html
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb8/igt@runner@aborted.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb6/igt@runner@aborted.html
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-iclb1/igt@runner@aborted.html
    - shard-skl:          ([FAIL][133], [FAIL][134]) ([i915#1814] / [i915#3002] / [i915#3363]) -> ([FAIL][135], [FAIL][136], [FAIL][137]) ([i915#1436] / [i915#1814] / [i915#3002] / [i915#3363])
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl6/igt@runner@aborted.html
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10604/shard-skl6/igt@runner@aborted.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl3/igt@runner@aborted.html
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl5/igt@runner@aborted.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/shard-skl1/igt@runner@aborted.html

  
  [IGT#2]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/2
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109274]: https://bugs.freedesktop.org/show_bug.cgi?id=109274
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109279]: https://bugs.freedesktop.org/show_bug.cgi?id=109279
  [fdo#109280]: https://bugs.freedesktop.org/show_bug.cgi?id=109280
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109291]: https://bugs.freedesktop.org/show_bug.cgi?id=109291
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109502]: https://bugs.freedesktop.org/show_bug.cgi?id=109502
  [fdo#111614]: https://bugs.freedesktop.org/show_bug.cgi?id=111614
  [fdo#111615]: https://bugs.freedesktop.org/show_bug.cgi?id=111615
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [fdo#111828]: https://bugs.freedesktop.org/show_bug.cgi?id=111828
  [fdo#112283]: https://bugs.freedesktop.org/show_bug.cgi?id=112283
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#132]: https://gitlab.freedesktop.org/drm/intel/issues/132
  [i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#1722]: https://gitlab.freedesktop.org/drm/intel/issues/1722
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1814]: https://gitlab.freedesktop.org/drm/intel/issues/1814
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2346]: https://gitlab.freedesktop.org/drm/intel/issues/2346
  [i915#2369]: https://gitlab.freedesktop.org/drm/intel/issues/2369
  [i915#2411]: https://gitlab.freedesktop.org/drm/intel/issues/2411
  [i915#2530]: https://gitlab.freedesktop.org/drm/intel/issues/2530
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2658]: https://gitlab.freedesktop.org/drm/intel/issues/2658
  [i915#2722]: https://gitlab.freedesktop.org/drm/intel/issues/2722
  [i915#280]: https://gitlab.freedesktop.org/drm/intel/issues/280
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2856]: https://gitlab.freedesktop.org/drm/intel/issues/2856
  [i915#2920]: https://gitlab.freedesktop.org/drm/intel/issues/2920
  [i915#2994]: https://gitlab.freedesktop.org/drm/intel/issues/2994
  [i915#3002]: https://gitlab.freedesktop.org/drm/intel/issues/3002
  [i915#3063]: https://gitlab.freedesktop.org/drm/intel/issues/3063
  [i915#3297]: https://gitlab.freedesktop.org/drm/intel/issues/3297
  [i915#3319]: https://gitlab.freedesktop.org/drm/intel/issues/3319
  [i915#3359]: https://gitlab.freedesktop.org/drm/intel/issues/3359
  [i915#3363]: https://gitlab.freedesktop.org/drm/intel/issues/3363
  [i915#3467]: https://gitlab.freedesktop.org/drm/intel/issues/3467
  [i915#3648]: https://gitlab.freedesktop.org/drm/intel/issues/3648
  [i915#3689]: https://gitlab.freedesktop.org/drm/intel/issues/3689
  [i915#3701]: https://gitlab.freedesktop.org/drm/intel/issues/3701
  [i915#3777]: https://gitl

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21084/index.html

[-- Attachment #2: Type: text/html, Size: 36233 bytes --]

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
  2021-09-17 14:35     ` [Intel-gfx] " Daniel Vetter
@ 2021-09-20  7:23       ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:23 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:35 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:34:50PM +0200, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled else where.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
>>   1 file changed, 35 insertions(+), 51 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index a3c79a99fb44..406150dea5e4 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
>>    */
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   {
>> -	struct dma_resv_list *src_list, *dst_list;
>> -	struct dma_fence *old, *new;
>> -	unsigned int i;
>> +	struct dma_resv_iter cursor;
>> +	struct dma_resv_list *list;
>> +	struct dma_fence *f, *excl;
>>   
>>   	dma_resv_assert_held(dst);
>>   
>> -	rcu_read_lock();
>> -	src_list = dma_resv_shared_list(src);
>> -
>> -retry:
>> -	if (src_list) {
>> -		unsigned int shared_count = src_list->shared_count;
>> -
>> -		rcu_read_unlock();
>> +	list = NULL;
>> +	excl = NULL;
>>   
>> -		dst_list = dma_resv_list_alloc(shared_count);
>> -		if (!dst_list)
>> -			return -ENOMEM;
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, src, true);
>> +	dma_resv_for_each_fence_unlocked(&cursor, f) {
>>   
>> -		rcu_read_lock();
>> -		src_list = dma_resv_shared_list(src);
>> -		if (!src_list || src_list->shared_count > shared_count) {
>> -			kfree(dst_list);
>> -			goto retry;
>> -		}
>> +		if (cursor.is_first) {
> Maybe have a wrapper for this, like dma_resv_iter_is_reset or is_first or
> is_restart (my preference) with some nice docs that this returns true
> everytime we had to restart the sequence?

Exactly that's what I wanted to avoid since the is_first (or whatever) 
function should only be used inside of the dma_resv.c code.

On the other hand I can just make that static here and document that it 
should never be exported.

Christian.

>
> Otherwise I fully agree, this is so much better with all the hairy
> restarting and get_rcu and test_bit shovelled away somewhere.
>
> Either way (but I much prefer a wrapper for is_first):
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>> +			dma_resv_list_free(list);
>> +			dma_fence_put(excl);
>>   
>> -		dst_list->shared_count = 0;
>> -		for (i = 0; i < src_list->shared_count; ++i) {
>> -			struct dma_fence __rcu **dst;
>> -			struct dma_fence *fence;
>> +			if (cursor.fences) {
>> +				unsigned int cnt = cursor.fences->shared_count;
>>   
>> -			fence = rcu_dereference(src_list->shared[i]);
>> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>> -				     &fence->flags))
>> -				continue;
>> +				rcu_read_unlock();
>> +				list = dma_resv_list_alloc(cnt);
>> +				if (!list) {
>> +					dma_resv_iter_end(&cursor);
>> +					return -ENOMEM;
>> +				}
>>   
>> -			if (!dma_fence_get_rcu(fence)) {
>> -				dma_resv_list_free(dst_list);
>> -				src_list = dma_resv_shared_list(src);
>> -				goto retry;
>> -			}
>> +				list->shared_count = 0;
>> +				rcu_read_lock();
>>   
>> -			if (dma_fence_is_signaled(fence)) {
>> -				dma_fence_put(fence);
>> -				continue;
>> +			} else {
>> +				list = NULL;
>>   			}
>> -
>> -			dst = &dst_list->shared[dst_list->shared_count++];
>> -			rcu_assign_pointer(*dst, fence);
>> +			excl = NULL;
>>   		}
>> -	} else {
>> -		dst_list = NULL;
>> -	}
>>   
>> -	new = dma_fence_get_rcu_safe(&src->fence_excl);
>> +		dma_fence_get(f);
>> +		if (dma_resv_iter_is_exclusive(&cursor))
>> +			excl = f;
>> +		else
>> +			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
>> +	}
>> +	dma_resv_iter_end(&cursor);
>>   	rcu_read_unlock();
>>   
>> -	src_list = dma_resv_shared_list(dst);
>> -	old = dma_resv_excl_fence(dst);
>> -
>>   	write_seqcount_begin(&dst->seq);
>> -	/* write_seqcount_begin provides the necessary memory barrier */
>> -	RCU_INIT_POINTER(dst->fence_excl, new);
>> -	RCU_INIT_POINTER(dst->fence, dst_list);
>> +	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
>> +	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
>>   	write_seqcount_end(&dst->seq);
>>   
>> -	dma_resv_list_free(src_list);
>> -	dma_fence_put(old);
>> +	dma_resv_list_free(list);
>> +	dma_fence_put(excl);
>>   
>>   	return 0;
>>   }
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences
@ 2021-09-20  7:23       ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:23 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:35 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:34:50PM +0200, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled else where.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 86 ++++++++++++++++----------------------
>>   1 file changed, 35 insertions(+), 51 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index a3c79a99fb44..406150dea5e4 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -426,74 +426,58 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_walk);
>>    */
>>   int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   {
>> -	struct dma_resv_list *src_list, *dst_list;
>> -	struct dma_fence *old, *new;
>> -	unsigned int i;
>> +	struct dma_resv_iter cursor;
>> +	struct dma_resv_list *list;
>> +	struct dma_fence *f, *excl;
>>   
>>   	dma_resv_assert_held(dst);
>>   
>> -	rcu_read_lock();
>> -	src_list = dma_resv_shared_list(src);
>> -
>> -retry:
>> -	if (src_list) {
>> -		unsigned int shared_count = src_list->shared_count;
>> -
>> -		rcu_read_unlock();
>> +	list = NULL;
>> +	excl = NULL;
>>   
>> -		dst_list = dma_resv_list_alloc(shared_count);
>> -		if (!dst_list)
>> -			return -ENOMEM;
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, src, true);
>> +	dma_resv_for_each_fence_unlocked(&cursor, f) {
>>   
>> -		rcu_read_lock();
>> -		src_list = dma_resv_shared_list(src);
>> -		if (!src_list || src_list->shared_count > shared_count) {
>> -			kfree(dst_list);
>> -			goto retry;
>> -		}
>> +		if (cursor.is_first) {
> Maybe have a wrapper for this, like dma_resv_iter_is_reset or is_first or
> is_restart (my preference) with some nice docs that this returns true
> everytime we had to restart the sequence?

Exactly that's what I wanted to avoid since the is_first (or whatever) 
function should only be used inside of the dma_resv.c code.

On the other hand I can just make that static here and document that it 
should never be exported.

Christian.

>
> Otherwise I fully agree, this is so much better with all the hairy
> restarting and get_rcu and test_bit shovelled away somewhere.
>
> Either way (but I much prefer a wrapper for is_first):
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>> +			dma_resv_list_free(list);
>> +			dma_fence_put(excl);
>>   
>> -		dst_list->shared_count = 0;
>> -		for (i = 0; i < src_list->shared_count; ++i) {
>> -			struct dma_fence __rcu **dst;
>> -			struct dma_fence *fence;
>> +			if (cursor.fences) {
>> +				unsigned int cnt = cursor.fences->shared_count;
>>   
>> -			fence = rcu_dereference(src_list->shared[i]);
>> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>> -				     &fence->flags))
>> -				continue;
>> +				rcu_read_unlock();
>> +				list = dma_resv_list_alloc(cnt);
>> +				if (!list) {
>> +					dma_resv_iter_end(&cursor);
>> +					return -ENOMEM;
>> +				}
>>   
>> -			if (!dma_fence_get_rcu(fence)) {
>> -				dma_resv_list_free(dst_list);
>> -				src_list = dma_resv_shared_list(src);
>> -				goto retry;
>> -			}
>> +				list->shared_count = 0;
>> +				rcu_read_lock();
>>   
>> -			if (dma_fence_is_signaled(fence)) {
>> -				dma_fence_put(fence);
>> -				continue;
>> +			} else {
>> +				list = NULL;
>>   			}
>> -
>> -			dst = &dst_list->shared[dst_list->shared_count++];
>> -			rcu_assign_pointer(*dst, fence);
>> +			excl = NULL;
>>   		}
>> -	} else {
>> -		dst_list = NULL;
>> -	}
>>   
>> -	new = dma_fence_get_rcu_safe(&src->fence_excl);
>> +		dma_fence_get(f);
>> +		if (dma_resv_iter_is_exclusive(&cursor))
>> +			excl = f;
>> +		else
>> +			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
>> +	}
>> +	dma_resv_iter_end(&cursor);
>>   	rcu_read_unlock();
>>   
>> -	src_list = dma_resv_shared_list(dst);
>> -	old = dma_resv_excl_fence(dst);
>> -
>>   	write_seqcount_begin(&dst->seq);
>> -	/* write_seqcount_begin provides the necessary memory barrier */
>> -	RCU_INIT_POINTER(dst->fence_excl, new);
>> -	RCU_INIT_POINTER(dst->fence, dst_list);
>> +	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
>> +	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
>>   	write_seqcount_end(&dst->seq);
>>   
>> -	dma_resv_list_free(src_list);
>> -	dma_fence_put(old);
>> +	dma_resv_list_free(list);
>> +	dma_fence_put(excl);
>>   
>>   	return 0;
>>   }
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
  2021-09-17 14:43     ` [Intel-gfx] " Daniel Vetter
@ 2021-09-20  7:27       ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:27 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:43 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:34:52PM +0200, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled elsewhere.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
>>   1 file changed, 10 insertions(+), 58 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 9b90bd9ac018..c7db553ab115 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>   			   unsigned long timeout)
>>   {
>>   	long ret = timeout ? timeout : 1;
>> -	unsigned int seq, shared_count;
>> +	struct dma_resv_iter cursor;
>>   	struct dma_fence *fence;
>> -	int i;
>>   
>> -retry:
>> -	shared_count = 0;
>> -	seq = read_seqcount_begin(&obj->seq);
>>   	rcu_read_lock();
> I missed this in my previous conversion reviews, but pls move the
> rcu_read_lock into the iterator. That should simplify the flow in all of
> these quite a bit more, and since the iter_next_unlocked grabs a full
> reference for the iteration body we really don't need that protected by
> rcu.

I intentionally didn't do that because it makes it much more clear that 
we are using RCU here and there is absolutely no guarantee that the 
collection won't change.

But I'm fine if we go down that route instead if you think that's the 
way to go.

Thanks,
Christian.

>
> We can't toss rcu protection for dma_resv anytime soon (if ever), but we
> can at least make it an implementation detail.
>
>> -	i = -1;
>> -
>> -	fence = dma_resv_excl_fence(obj);
>> -	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>> -		if (!dma_fence_get_rcu(fence))
>> -			goto unlock_retry;
>> +	dma_resv_iter_begin(&cursor, obj, wait_all);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>>   
>> -		if (dma_fence_is_signaled(fence)) {
>> -			dma_fence_put(fence);
>> -			fence = NULL;
>> +		ret = dma_fence_wait_timeout(fence, intr, ret);
>> +		if (ret <= 0) {
>> +			dma_resv_iter_end(&cursor);
>> +			return ret;
>>   		}
>>   
>> -	} else {
>> -		fence = NULL;
>> -	}
>> -
>> -	if (wait_all) {
>> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
>> -
>> -		if (fobj)
>> -			shared_count = fobj->shared_count;
>> -
>> -		for (i = 0; !fence && i < shared_count; ++i) {
>> -			struct dma_fence *lfence;
>> -
>> -			lfence = rcu_dereference(fobj->shared[i]);
>> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>> -				     &lfence->flags))
>> -				continue;
>> -
>> -			if (!dma_fence_get_rcu(lfence))
>> -				goto unlock_retry;
>> -
>> -			if (dma_fence_is_signaled(lfence)) {
>> -				dma_fence_put(lfence);
>> -				continue;
>> -			}
>> -
>> -			fence = lfence;
>> -			break;
>> -		}
>> +		rcu_read_lock();
>>   	}
>> -
>> +	dma_resv_iter_end(&cursor);
>>   	rcu_read_unlock();
>> -	if (fence) {
>> -		if (read_seqcount_retry(&obj->seq, seq)) {
>> -			dma_fence_put(fence);
>> -			goto retry;
>> -		}
>>   
>> -		ret = dma_fence_wait_timeout(fence, intr, ret);
>> -		dma_fence_put(fence);
>> -		if (ret > 0 && wait_all && (i + 1 < shared_count))
>> -			goto retry;
>> -	}
>>   	return ret;
>> -
>> -unlock_retry:
>> -	rcu_read_unlock();
>> -	goto retry;
> I think we still have the same semantics, and it's so much tidier.
>
> With the rcu_read_unlock stuff into iterators (also applies to previous
> two patches):
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>>   }
>>   EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>>   
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout
@ 2021-09-20  7:27       ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:27 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:43 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:34:52PM +0200, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled elsewhere.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 68 ++++++--------------------------------
>>   1 file changed, 10 insertions(+), 58 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 9b90bd9ac018..c7db553ab115 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -569,74 +569,26 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
>>   			   unsigned long timeout)
>>   {
>>   	long ret = timeout ? timeout : 1;
>> -	unsigned int seq, shared_count;
>> +	struct dma_resv_iter cursor;
>>   	struct dma_fence *fence;
>> -	int i;
>>   
>> -retry:
>> -	shared_count = 0;
>> -	seq = read_seqcount_begin(&obj->seq);
>>   	rcu_read_lock();
> I missed this in my previous conversion reviews, but pls move the
> rcu_read_lock into the iterator. That should simplify the flow in all of
> these quite a bit more, and since the iter_next_unlocked grabs a full
> reference for the iteration body we really don't need that protected by
> rcu.

I intentionally didn't do that because it makes it much more clear that 
we are using RCU here and there is absolutely no guarantee that the 
collection won't change.

But I'm fine if we go down that route instead if you think that's the 
way to go.

Thanks,
Christian.

>
> We can't toss rcu protection for dma_resv anytime soon (if ever), but we
> can at least make it an implementation detail.
>
>> -	i = -1;
>> -
>> -	fence = dma_resv_excl_fence(obj);
>> -	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>> -		if (!dma_fence_get_rcu(fence))
>> -			goto unlock_retry;
>> +	dma_resv_iter_begin(&cursor, obj, wait_all);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>>   
>> -		if (dma_fence_is_signaled(fence)) {
>> -			dma_fence_put(fence);
>> -			fence = NULL;
>> +		ret = dma_fence_wait_timeout(fence, intr, ret);
>> +		if (ret <= 0) {
>> +			dma_resv_iter_end(&cursor);
>> +			return ret;
>>   		}
>>   
>> -	} else {
>> -		fence = NULL;
>> -	}
>> -
>> -	if (wait_all) {
>> -		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
>> -
>> -		if (fobj)
>> -			shared_count = fobj->shared_count;
>> -
>> -		for (i = 0; !fence && i < shared_count; ++i) {
>> -			struct dma_fence *lfence;
>> -
>> -			lfence = rcu_dereference(fobj->shared[i]);
>> -			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>> -				     &lfence->flags))
>> -				continue;
>> -
>> -			if (!dma_fence_get_rcu(lfence))
>> -				goto unlock_retry;
>> -
>> -			if (dma_fence_is_signaled(lfence)) {
>> -				dma_fence_put(lfence);
>> -				continue;
>> -			}
>> -
>> -			fence = lfence;
>> -			break;
>> -		}
>> +		rcu_read_lock();
>>   	}
>> -
>> +	dma_resv_iter_end(&cursor);
>>   	rcu_read_unlock();
>> -	if (fence) {
>> -		if (read_seqcount_retry(&obj->seq, seq)) {
>> -			dma_fence_put(fence);
>> -			goto retry;
>> -		}
>>   
>> -		ret = dma_fence_wait_timeout(fence, intr, ret);
>> -		dma_fence_put(fence);
>> -		if (ret > 0 && wait_all && (i + 1 < shared_count))
>> -			goto retry;
>> -	}
>>   	return ret;
>> -
>> -unlock_retry:
>> -	rcu_read_unlock();
>> -	goto retry;
> I think we still have the same semantics, and it's so much tidier.
>
> With the rcu_read_unlock stuff into iterators (also applies to previous
> two patches):
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
>>   }
>>   EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
>>   
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
  2021-09-17 14:53     ` [Intel-gfx] " Daniel Vetter
@ 2021-09-20  7:31       ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:31 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:53 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:35:07PM +0200, Christian König wrote:
>> Simplifying the code a bit.
>>
>> v2: add missing rcu_read_lock()/unlock()
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> This will be gone as soon as I can land the last conversion patches. Plus
> it's always called with dma_resv_lock held.

Yeah, already thought so as well. I will just keep that around to get 
rid of dma_resv_get_excl_unlocked() for now until your patch lands.

Regards,
Christian.

>
> I wouldn't bother tbh.
> -Daniel
>
>> ---
>>   drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
>>   1 file changed, 12 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 09c820045859..c2c41b668f40 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>>   				     struct drm_gem_object *obj,
>>   				     bool write)
>>   {
>> -	int ret;
>> -	struct dma_fence **fences;
>> -	unsigned int i, fence_count;
>> -
>> -	if (!write) {
>> -		struct dma_fence *fence =
>> -			dma_resv_get_excl_unlocked(obj->resv);
>> -
>> -		return drm_gem_fence_array_add(fence_array, fence);
>> -	}
>> -
>> -	ret = dma_resv_get_fences(obj->resv, NULL,
>> -						&fence_count, &fences);
>> -	if (ret || !fence_count)
>> -		return ret;
>> -
>> -	for (i = 0; i < fence_count; i++) {
>> -		ret = drm_gem_fence_array_add(fence_array, fences[i]);
>> +	struct dma_resv_iter cursor;
>> +	struct dma_fence *fence;
>> +	int ret = 0;
>> +
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, obj->resv, write);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>> +		ret = drm_gem_fence_array_add(fence_array, fence);
>> +		rcu_read_lock();
>>   		if (ret)
>>   			break;
>>   	}
>> -
>> -	for (; i < fence_count; i++)
>> -		dma_fence_put(fences[i]);
>> -	kfree(fences);
>> +	dma_resv_iter_end(&cursor);
>> +	rcu_read_unlock();
>>   	return ret;
>>   }
>>   EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
@ 2021-09-20  7:31       ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:31 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:53 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:35:07PM +0200, Christian König wrote:
>> Simplifying the code a bit.
>>
>> v2: add missing rcu_read_lock()/unlock()
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> This will be gone as soon as I can land the last conversion patches. Plus
> it's always called with dma_resv_lock held.

Yeah, already thought so as well. I will just keep that around to get 
rid of dma_resv_get_excl_unlocked() for now until your patch lands.

Regards,
Christian.

>
> I wouldn't bother tbh.
> -Daniel
>
>> ---
>>   drivers/gpu/drm/drm_gem.c | 34 ++++++++++++----------------------
>>   1 file changed, 12 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 09c820045859..c2c41b668f40 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>>   				     struct drm_gem_object *obj,
>>   				     bool write)
>>   {
>> -	int ret;
>> -	struct dma_fence **fences;
>> -	unsigned int i, fence_count;
>> -
>> -	if (!write) {
>> -		struct dma_fence *fence =
>> -			dma_resv_get_excl_unlocked(obj->resv);
>> -
>> -		return drm_gem_fence_array_add(fence_array, fence);
>> -	}
>> -
>> -	ret = dma_resv_get_fences(obj->resv, NULL,
>> -						&fence_count, &fences);
>> -	if (ret || !fence_count)
>> -		return ret;
>> -
>> -	for (i = 0; i < fence_count; i++) {
>> -		ret = drm_gem_fence_array_add(fence_array, fences[i]);
>> +	struct dma_resv_iter cursor;
>> +	struct dma_fence *fence;
>> +	int ret = 0;
>> +
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, obj->resv, write);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>> +		ret = drm_gem_fence_array_add(fence_array, fence);
>> +		rcu_read_lock();
>>   		if (ret)
>>   			break;
>>   	}
>> -
>> -	for (; i < fence_count; i++)
>> -		dma_fence_put(fences[i]);
>> -	kfree(fences);
>> +	dma_resv_iter_end(&cursor);
>> +	rcu_read_unlock();
>>   	return ret;
>>   }
>>   EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
  2021-09-17 14:55     ` [Intel-gfx] " Daniel Vetter
@ 2021-09-20  7:35       ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:35 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:55 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:35:08PM +0200, Christian König wrote:
>> Makes the handling a bit more complex, but avoids the use of
>> dma_resv_get_excl_unlocked().
>>
>> v2: add missing rcu_read_lock()/unlock()
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
>>   1 file changed, 12 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
>> index e570398abd78..d8f9c6432544 100644
>> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
>> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
>> @@ -143,6 +143,7 @@
>>    */
>>   int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>>   {
>> +	struct dma_resv_iter cursor;
>>   	struct drm_gem_object *obj;
>>   	struct dma_fence *fence;
>>   
>> @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>>   		return 0;
>>   
>>   	obj = drm_gem_fb_get_obj(state->fb, 0);
>> -	fence = dma_resv_get_excl_unlocked(obj->resv);
>> -	drm_atomic_set_fence_for_plane(state, fence);
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, obj->resv, false);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>> +		/* TODO: We only use the first write fence here */
>> +		drm_atomic_set_fence_for_plane(state, fence);
> Yeah I wonder whether we should/need to collate them all together. But I
> guesss whomever hits that first with their funny multi-plane yuv or
> whatever gets to do that. Or I'm not clear on what exactly your TODO here
> means?

Yeah, exactly that. Basically we have use cases where where we have more 
than one fence to wait for.

The TODO is here because adding that to the atomic helper is just not my 
construction site at the moment.

Regards,
Christian.

>
>> +		return 0;
>> +	}
>> +	dma_resv_iter_end(&cursor);
>> +	rcu_read_unlock();
> Imo we should do full dma_resv_lock here. atomic helpers are designed to
> allow this, and it simplifies things. Also it really doesn't matter for
> atomic, we should be able to do 60fps*a few planes easily :-)
> -Daniel
>
>>   
>> +	drm_atomic_set_fence_for_plane(state, NULL);
>>   	return 0;
>>   }
>>   EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2
@ 2021-09-20  7:35       ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20  7:35 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 17.09.21 um 16:55 schrieb Daniel Vetter:
> On Fri, Sep 17, 2021 at 02:35:08PM +0200, Christian König wrote:
>> Makes the handling a bit more complex, but avoids the use of
>> dma_resv_get_excl_unlocked().
>>
>> v2: add missing rcu_read_lock()/unlock()
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/drm_gem_atomic_helper.c | 14 ++++++++++++--
>>   1 file changed, 12 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
>> index e570398abd78..d8f9c6432544 100644
>> --- a/drivers/gpu/drm/drm_gem_atomic_helper.c
>> +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
>> @@ -143,6 +143,7 @@
>>    */
>>   int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
>>   {
>> +	struct dma_resv_iter cursor;
>>   	struct drm_gem_object *obj;
>>   	struct dma_fence *fence;
>>   
>> @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
>>   		return 0;
>>   
>>   	obj = drm_gem_fb_get_obj(state->fb, 0);
>> -	fence = dma_resv_get_excl_unlocked(obj->resv);
>> -	drm_atomic_set_fence_for_plane(state, fence);
>> +	rcu_read_lock();
>> +	dma_resv_iter_begin(&cursor, obj->resv, false);
>> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +		rcu_read_unlock();
>> +		/* TODO: We only use the first write fence here */
>> +		drm_atomic_set_fence_for_plane(state, fence);
> Yeah I wonder whether we should/need to collate them all together. But I
> guesss whomever hits that first with their funny multi-plane yuv or
> whatever gets to do that. Or I'm not clear on what exactly your TODO here
> means?

Yeah, exactly that. Basically we have use cases where where we have more 
than one fence to wait for.

The TODO is here because adding that to the atomic helper is just not my 
construction site at the moment.

Regards,
Christian.

>
>> +		return 0;
>> +	}
>> +	dma_resv_iter_end(&cursor);
>> +	rcu_read_unlock();
> Imo we should do full dma_resv_lock here. atomic helpers are designed to
> allow this, and it simplifies things. Also it really doesn't matter for
> atomic, we should be able to do 60fps*a few planes easily :-)
> -Daniel
>
>>   
>> +	drm_atomic_set_fence_for_plane(state, NULL);
>>   	return 0;
>>   }
>>   EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
>> -- 
>> 2.25.1
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-17 13:23     ` [Intel-gfx] " Daniel Vetter
  (?)
@ 2021-09-20  8:43     ` Tvrtko Ursulin
  2021-09-20 10:09       ` Christian König
  -1 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20  8:43 UTC (permalink / raw)
  To: Daniel Vetter, Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx


On 17/09/2021 14:23, Daniel Vetter wrote:
> On Fri, Sep 17, 2021 at 02:34:48PM +0200, Christian König wrote:
>> Abstract the complexity of iterating over all the fences
>> in a dma_resv object.
>>
>> The new loop handles the whole RCU and retry dance and
>> returns only fences where we can be sure we grabbed the
>> right one.
>>
>> v2: fix accessing the shared fences while they might be freed,
>>      improve kerneldoc, rename _cursor to _iter, add
>>      dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
>>   include/linux/dma-resv.h   | 84 ++++++++++++++++++++++++++++++++++++++
>>   2 files changed, 145 insertions(+)
>>
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index 84fbe60629e3..3e77cad2c9d4 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>>   
>> +/**
>> + * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
>> + * @cursor: cursor to record the current position
>> + * @first: if we should start over
>> + *
>> + * Return all the fences in the dma_resv object which are not yet signaled.
>> + * The returned fence has an extra local reference so will stay alive.
>> + * If a concurrent modify is detected the whole iterration is started over again.
>> + */
>> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
> 
> Bit ocd, but I'd still just call that iter_next.
> 
>> +					      bool first)
> 
> Hm I'd put all the init code into iter_begin ...

@Christian:

Could you engineer something in here which would, at least in debug 
builds, catch failures to call "iter begin" before using the iterator macro?

> 
>> +{
>> +	struct dma_resv *obj = cursor->obj;
> 
> Aren't we missing rcu_read_lock() around the entire thing here?
> 
>> +
>> +	first |= read_seqcount_retry(&obj->seq, cursor->seq);
>> +	do {
>> +		/* Drop the reference from the previous round */
>> +		dma_fence_put(cursor->fence);
>> +
>> +		cursor->is_first = first;
>> +		if (first) {
>> +			cursor->seq = read_seqcount_begin(&obj->seq);
>> +			cursor->index = -1;
>> +			cursor->fences = dma_resv_shared_list(obj);
> 
> And then also call iter_begin from here. That way we guarantee that
> read_seqcount_begin is always called before _retry(). It's not a problem
> with the seqcount implementation (I think at least), but it definitely
> looks funny.
> 
> Calling iter_begin here also makes it clear that we're essentially
> restarting.
> 
>> +
>> +			cursor->fence = dma_resv_excl_fence(obj);
>> +			if (cursor->fence &&
>> +			    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> 
> Please use the right dma_fence wrapper here for this and don't look at the
> bits/flags outside of dma_fence.[hc] code. I just realized that we don't
> have the right amount of barriers in there for the fastpath, i.e. if we
> have:
> 
> x = 0; /* static initializer */
> 
> thread a
> 	x = 1;
> 	dma_fence_signal(fence);
> 
> 
> thread b;
> 	if (dma_fence_is_signalled(fence))
> 		printk("%i\n", x);
> 
> Then you might actually be able to observe x == 0 in thread b. Which is
> not what we want at all.

@Daniel:

What do you mean here - in terms of if 'x' is "external" (not part of 
dma-fence), then are you suggesting dma-fence code should serialise it 
by using barriers?

That would sound incorrect to me, or in other words, I think it's fine 
if x == 0 is observed in your example thread B since that code is mixing 
external data with dma-fence.

Hm also, there is that annoying bit where by using dma_fence_is_signaled 
any code becomes a fence signaling critical path, which I never bought 
into. There should be a way to test the signaled status without actually 
doing the signaling. Or I am misunderstanding something so badly that is 
really really has to be like this?

> So no open-coding of dma_fence flag bits code outside of drm_fence.[hc]
> please. And yes i915-gem code is unfortunately a disaster.

Don't even miss an opportunity for some good trashing no? :D

But yes, deconstructed dma_fence_signal I thought we were supposed to 
add to core. Or at least propose, don't exactly remember how that went.

> 
>> +				     &cursor->fence->flags))
>> +				cursor->fence = NULL;
>> +		} else {
>> +			cursor->fence = NULL;
>> +		}
>> +
>> +		if (cursor->fence) {
>> +			cursor->fence = dma_fence_get_rcu(cursor->fence);
>> +		} else if (cursor->all_fences && cursor->fences) {
>> +			struct dma_resv_list *fences = cursor->fences;
>> +
>> +			while (++cursor->index < fences->shared_count) {
>> +				cursor->fence = rcu_dereference(
>> +					fences->shared[cursor->index]);
>> +				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>> +					      &cursor->fence->flags))
>> +					break;
>> +			}
>> +			if (cursor->index < fences->shared_count)
>> +				cursor->fence =
>> +					dma_fence_get_rcu(cursor->fence);
>> +			else
>> +				cursor->fence = NULL;
>> +		}
> 
> The control flow here is very hairy, but I'm not sure how to best do this.
> With my suggestion to move the read_seqcount_begin into iter_begin maybe
> something like this:
> 
> iter_next()
> {
> 	do {
> 		dma_fence_put(cursor->fence)
> 		cursor->fence = NULL;
> 
> 		if (cursor->index == -1) { /* reset by iter_begin()
> 			cursor->fence = get_exclusive();
> 			cusor->index++;
> 		} else {
> 			cursor->fence = shared_fences[++cursor->index]
> 		}
> 
> 		if (!dma_fence_is_signalled(cursor->fence))
> 			continue; /* just grab the next fence. */
> 
> 		cursor->fence =  dma_fence_get_rcu(cursor->fence);
> 
> 		if (!cursor->fence || read_seqcount_retry()) {
> 			/* we lost the race, restart completely */
> 			iter_begin(); /* ->fence will be cleaned up at beginning of the loop */
> 			continue;
> 		}
> 
> 		return cursor->fence;
> 	} while (true);
> }
> 
> Maybe I missed something, but that avoids the duplication of all the
> tricky code, i.e. checking for signalling, rcu protected conditional
> fence_get, and the retry is also nicely at the end.
>> +
>> +		/* For the eventually next round */
>> +		first = true;
>> +	} while (read_seqcount_retry(&obj->seq, cursor->seq));
>> +
>> +	return cursor->fence;
>> +}
>> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
>> +
>>   /**
>>    * dma_resv_copy_fences - Copy all fences from src to dst.
>>    * @dst: the destination reservation object
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index 9100dd3dc21f..693d16117153 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -149,6 +149,90 @@ struct dma_resv {
>>   	struct dma_resv_list __rcu *fence;
>>   };
>>   
>> +/**
>> + * struct dma_resv_iter - current position into the dma_resv fences
>> + *
>> + * Don't touch this directly in the driver, use the accessor function instead.
>> + */
>> +struct dma_resv_iter {
>> +	/** @obj: The dma_resv object we iterate over */
>> +	struct dma_resv *obj;
>> +
>> +	/** @all_fences: If all fences should be returned */
>> +	bool all_fences;
>> +
>> +	/** @fence: the currently handled fence */
>> +	struct dma_fence *fence;
>> +
>> +	/** @seq: sequence number to check for modifications */
>> +	unsigned int seq;
>> +
>> +	/** @index: index into the shared fences */
> 
> If you go with my suggestion (assuming it works): Please add "-1 indicates
> to pick the exclusive fence instead."
> 
>> +	unsigned int index;
>> +
>> +	/** @fences: the shared fences */
>> +	struct dma_resv_list *fences;
>> +
>> +	/** @is_first: true if this is the first returned fence */
>> +	bool is_first;
> 
> I think if we just rely on -1 == exclusive fence/is_first we don't need
> this one here?
> 
>> +};
>> +
>> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor,
>> +					      bool first);
>> +
>> +/**
>> + * dma_resv_iter_begin - initialize a dma_resv_iter object
>> + * @cursor: The dma_resv_iter object to initialize
>> + * @obj: The dma_resv object which we want to iterator over
>> + * @all_fences: If all fences should be returned or just the exclusive one
> 
> Please add: "Callers must clean up the iterator with dma_resv_iter_end()."
> 
>> + */
>> +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
>> +					struct dma_resv *obj,
>> +					bool all_fences)
>> +{
>> +	cursor->obj = obj;
>> +	cursor->all_fences = all_fences;
>> +	cursor->fence = NULL;
>> +}
>> +
>> +/**
>> + * dma_resv_iter_end - cleanup a dma_resv_iter object
>> + * @cursor: the dma_resv_iter object which should be cleaned up
>> + *
>> + * Make sure that the reference to the fence in the cursor is properly
>> + * dropped.
> 
> Please add:
> 
> "This function must be called every time dma_resv_iter_begin() was called
> to clean up any references."
>> + */
>> +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
>> +{
>> +	dma_fence_put(cursor->fence);
>> +}
>> +
>> +/**
>> + * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
>> + * @cursor: the cursor of the current position
>> + *
>> + * Returns true if the currently returned fence is the exclusive one.
>> + */
>> +static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
>> +{
>> +	return cursor->index == -1;
>> +}
>> +
>> +/**
>> + * dma_resv_for_each_fence_unlocked - unlocked fence iterator
>> + * @cursor: a struct dma_resv_iter pointer
>> + * @fence: the current fence
>> + *
>> + * Iterate over the fences in a struct dma_resv object without holding the
>> + * dma_resv::lock. The RCU read side lock must be hold when using this, but can
>> + * be dropped and re-taken as necessary inside the loop. The cursor needs to be
>> + * initialized with dma_resv_iter_begin_unlocked() and cleaned up with
> 
> We don't have an _unlocked version?

@Christian:

I'd also mention that the fence reference is held during the walk so 
someone is less likely to grab extra ones.

> 
>> + * dma_resv_iter_end_unlocked().
>> + */
>> +#define dma_resv_for_each_fence_unlocked(cursor, fence)			\
>> +	for (fence = dma_resv_iter_walk_unlocked(cursor, true);		\
>> +	     fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
>> +
>>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>>   
>> -- 
>> 2.25.1
>>
> 

Regards,

Tvrtko


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
  (?)
@ 2021-09-20  8:45   ` Tvrtko Ursulin
  2021-09-20 10:13     ` Christian König
  -1 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20  8:45 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 17/09/2021 13:35, Christian König wrote:
> This makes the function much simpler since the complex
> retry logic is now handled else where.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>   1 file changed, 11 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index 6234e17259c1..b1cb7ba688da 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>   {
>   	struct drm_i915_gem_busy *args = data;
>   	struct drm_i915_gem_object *obj;
> -	struct dma_resv_list *list;
> -	unsigned int seq;
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>   	int err;
>   
>   	err = -ENOENT;
> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>   	 * to report the overall busyness. This is what the wait-ioctl does.
>   	 *
>   	 */
> -retry:
> -	seq = raw_read_seqcount(&obj->base.resv->seq);
> -
> -	/* Translate the exclusive fence to the READ *and* WRITE engine */
> -	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
> -
> -	/* Translate shared fences to READ set of engines */
> -	list = dma_resv_shared_list(obj->base.resv);
> -	if (list) {
> -		unsigned int shared_count = list->shared_count, i;
> -
> -		for (i = 0; i < shared_count; ++i) {
> -			struct dma_fence *fence =
> -				rcu_dereference(list->shared[i]);
> -
> +	args->busy = false;
> +	dma_resv_iter_begin(&cursor, obj->base.resv, true);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {

You did not agree with my suggestion to reset args->busy on restart and 
so preserve current behaviour?

Regards,

Tvrtko

> +		if (dma_resv_iter_is_exclusive(&cursor))
> +			/* Translate the exclusive fence to the READ *and* WRITE engine */
> +			args->busy = busy_check_writer(fence);
> +		else
> +			/* Translate shared fences to READ set of engines */
>   			args->busy |= busy_check_reader(fence);
> -		}
>   	}
> -
> -	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
> -		goto retry;
> +	dma_resv_iter_end(&cursor);
>   
>   	err = 0;
>   out:
> 

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
  (?)
@ 2021-09-20  8:45   ` Tvrtko Ursulin
  2021-09-20  8:47     ` Tvrtko Ursulin
  -1 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20  8:45 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 17/09/2021 13:35, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence instead, according to Tvrtko the lock is
>      held here anyway.
> v3: back to using dma_resv_for_each_fence_unlocked.

It did not work out - what happened?

Regards,

Tvrtko

> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/i915/i915_sw_fence.c | 57 ++++++++--------------------
>   1 file changed, 15 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
> index c589a681da77..7635b0478ea5 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
> @@ -572,56 +572,29 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
>   				    unsigned long timeout,
>   				    gfp_t gfp)
>   {
> -	struct dma_fence *excl;
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *f;
>   	int ret = 0, pending;
>   
>   	debug_fence_assert(fence);
>   	might_sleep_if(gfpflags_allow_blocking(gfp));
>   
> -	if (write) {
> -		struct dma_fence **shared;
> -		unsigned int count, i;
> -
> -		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
> -		if (ret)
> -			return ret;
> -
> -		for (i = 0; i < count; i++) {
> -			if (shared[i]->ops == exclude)
> -				continue;
> -
> -			pending = i915_sw_fence_await_dma_fence(fence,
> -								shared[i],
> -								timeout,
> -								gfp);
> -			if (pending < 0) {
> -				ret = pending;
> -				break;
> -			}
> -
> -			ret |= pending;
> -		}
> -
> -		for (i = 0; i < count; i++)
> -			dma_fence_put(shared[i]);
> -		kfree(shared);
> -	} else {
> -		excl = dma_resv_get_excl_unlocked(resv);
> -	}
> -
> -	if (ret >= 0 && excl && excl->ops != exclude) {
> -		pending = i915_sw_fence_await_dma_fence(fence,
> -							excl,
> -							timeout,
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, resv, write);
> +	dma_resv_for_each_fence_unlocked(&cursor, f) {
> +		rcu_read_unlock();
> +		pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
>   							gfp);
> -		if (pending < 0)
> +		rcu_read_lock();
> +		if (pending < 0) {
>   			ret = pending;
> -		else
> -			ret |= pending;
> -	}
> -
> -	dma_fence_put(excl);
> +			break;
> +		}
>   
> +		ret |= pending;
> +	}
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>   	return ret;
>   }
>   
> 

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
  2021-09-20  8:45   ` Tvrtko Ursulin
@ 2021-09-20  8:47     ` Tvrtko Ursulin
  2021-09-20 10:14       ` Christian König
  0 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20  8:47 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 20/09/2021 09:45, Tvrtko Ursulin wrote:
> 
> On 17/09/2021 13:35, Christian König wrote:
>> Simplifying the code a bit.
>>
>> v2: use dma_resv_for_each_fence instead, according to Tvrtko the lock is
>>      held here anyway.
>> v3: back to using dma_resv_for_each_fence_unlocked.
> 
> It did not work out - what happened?
Wait, my suggestion to try the locked iterator was against 
i915_request_await_object. I haven't looked at this one at the time or 
even now.

Regards,

Tvrtko


> Regards,
> 
> Tvrtko
> 
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/i915/i915_sw_fence.c | 57 ++++++++--------------------
>>   1 file changed, 15 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c 
>> b/drivers/gpu/drm/i915/i915_sw_fence.c
>> index c589a681da77..7635b0478ea5 100644
>> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
>> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
>> @@ -572,56 +572,29 @@ int i915_sw_fence_await_reservation(struct 
>> i915_sw_fence *fence,
>>                       unsigned long timeout,
>>                       gfp_t gfp)
>>   {
>> -    struct dma_fence *excl;
>> +    struct dma_resv_iter cursor;
>> +    struct dma_fence *f;
>>       int ret = 0, pending;
>>       debug_fence_assert(fence);
>>       might_sleep_if(gfpflags_allow_blocking(gfp));
>> -    if (write) {
>> -        struct dma_fence **shared;
>> -        unsigned int count, i;
>> -
>> -        ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>> -        if (ret)
>> -            return ret;
>> -
>> -        for (i = 0; i < count; i++) {
>> -            if (shared[i]->ops == exclude)
>> -                continue;
>> -
>> -            pending = i915_sw_fence_await_dma_fence(fence,
>> -                                shared[i],
>> -                                timeout,
>> -                                gfp);
>> -            if (pending < 0) {
>> -                ret = pending;
>> -                break;
>> -            }
>> -
>> -            ret |= pending;
>> -        }
>> -
>> -        for (i = 0; i < count; i++)
>> -            dma_fence_put(shared[i]);
>> -        kfree(shared);
>> -    } else {
>> -        excl = dma_resv_get_excl_unlocked(resv);
>> -    }
>> -
>> -    if (ret >= 0 && excl && excl->ops != exclude) {
>> -        pending = i915_sw_fence_await_dma_fence(fence,
>> -                            excl,
>> -                            timeout,
>> +    rcu_read_lock();
>> +    dma_resv_iter_begin(&cursor, resv, write);
>> +    dma_resv_for_each_fence_unlocked(&cursor, f) {
>> +        rcu_read_unlock();
>> +        pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
>>                               gfp);
>> -        if (pending < 0)
>> +        rcu_read_lock();
>> +        if (pending < 0) {
>>               ret = pending;
>> -        else
>> -            ret |= pending;
>> -    }
>> -
>> -    dma_fence_put(excl);
>> +            break;
>> +        }
>> +        ret |= pending;
>> +    }
>> +    dma_resv_iter_end(&cursor);
>> +    rcu_read_unlock();
>>       return ret;
>>   }
>>

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 16/26] drm/i915: use new iterator in i915_gem_object_wait_reservation v2
  2021-09-17 12:35   ` [Intel-gfx] " Christian König
  (?)
@ 2021-09-20 10:00   ` Tvrtko Ursulin
  2021-09-21 17:35     ` Christian König
  -1 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20 10:00 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 17/09/2021 13:35, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: add missing rcu read unlock.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_wait.c | 57 ++++++------------------
>   1 file changed, 14 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> index f909aaa09d9c..e416cf528635 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -37,55 +37,26 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
>   				 unsigned int flags,
>   				 long timeout)
>   {
> -	struct dma_fence *excl;
> -	bool prune_fences = false;
> -
> -	if (flags & I915_WAIT_ALL) {
> -		struct dma_fence **shared;
> -		unsigned int count, i;
> -		int ret;
> -
> -		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
> -		if (ret)
> -			return ret;
> -
> -		for (i = 0; i < count; i++) {
> -			timeout = i915_gem_object_wait_fence(shared[i],
> -							     flags, timeout);
> -			if (timeout < 0)
> -				break;
> -
> -			dma_fence_put(shared[i]);
> -		}
> -
> -		for (; i < count; i++)
> -			dma_fence_put(shared[i]);
> -		kfree(shared);
> -
> -		/*
> -		 * If both shared fences and an exclusive fence exist,
> -		 * then by construction the shared fences must be later
> -		 * than the exclusive fence. If we successfully wait for
> -		 * all the shared fences, we know that the exclusive fence
> -		 * must all be signaled. If all the shared fences are
> -		 * signaled, we can prune the array and recover the
> -		 * floating references on the fences/requests.
> -		 */
> -		prune_fences = count && timeout >= 0;
> -	} else {
> -		excl = dma_resv_get_excl_unlocked(resv);
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
> +
> +	rcu_read_lock();
> +	dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		rcu_read_unlock();
> +		timeout = i915_gem_object_wait_fence(fence, flags, timeout);

Converting this one could be problematic. It's the wait ioctl which used 
to grab an atomic snapshot and wait for that rendering to complete. With 
this change I think it has the potential to run forever keeps catching 
new activity against the same object.

I am not sure whether or not the difference is relevant for how 
userspace uses it but I think needs discussion.

Hm actually there are internal callers as well, and at least some of 
those have the object locked. Would a wider refactoring to separate 
those into buckets (locked vs unlocked) make sense?

Regards,

Tvrtko


> +		rcu_read_lock();
> +		if (timeout < 0)
> +			break;
>   	}
> -
> -	if (excl && timeout >= 0)
> -		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
> -
> -	dma_fence_put(excl);
> +	dma_resv_iter_end(&cursor);
> +	rcu_read_unlock();
>   
>   	/*
>   	 * Opportunistically prune the fences iff we know they have *all* been
>   	 * signaled.
>   	 */
> -	if (prune_fences)
> +	if (timeout > 0)
>   		dma_resv_prune(resv);
>   
>   	return timeout;
> 

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-20  8:43     ` Tvrtko Ursulin
@ 2021-09-20 10:09       ` Christian König
  2021-09-20 10:26         ` Tvrtko Ursulin
  0 siblings, 1 reply; 115+ messages in thread
From: Christian König @ 2021-09-20 10:09 UTC (permalink / raw)
  To: Tvrtko Ursulin, Daniel Vetter
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 20.09.21 um 10:43 schrieb Tvrtko Ursulin:
> On 17/09/2021 14:23, Daniel Vetter wrote:
>> On Fri, Sep 17, 2021 at 02:34:48PM +0200, Christian König wrote:
>>> Abstract the complexity of iterating over all the fences
>>> in a dma_resv object.
>>>
>>> The new loop handles the whole RCU and retry dance and
>>> returns only fences where we can be sure we grabbed the
>>> right one.
>>>
>>> v2: fix accessing the shared fences while they might be freed,
>>>      improve kerneldoc, rename _cursor to _iter, add
>>>      dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
>>>   include/linux/dma-resv.h   | 84 
>>> ++++++++++++++++++++++++++++++++++++++
>>>   2 files changed, 145 insertions(+)
>>>
>>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>>> index 84fbe60629e3..3e77cad2c9d4 100644
>>> --- a/drivers/dma-buf/dma-resv.c
>>> +++ b/drivers/dma-buf/dma-resv.c
>>> @@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv 
>>> *obj, struct dma_fence *fence)
>>>   }
>>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>>>   +/**
>>> + * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
>>> + * @cursor: cursor to record the current position
>>> + * @first: if we should start over
>>> + *
>>> + * Return all the fences in the dma_resv object which are not yet 
>>> signaled.
>>> + * The returned fence has an extra local reference so will stay alive.
>>> + * If a concurrent modify is detected the whole iterration is 
>>> started over again.
>>> + */
>>> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter 
>>> *cursor,
>>
>> Bit ocd, but I'd still just call that iter_next.
>>
>>> +                          bool first)
>>
>> Hm I'd put all the init code into iter_begin ...
>
> @Christian:
>
> Could you engineer something in here which would, at least in debug 
> builds, catch failures to call "iter begin" before using the iterator 
> macro?

Yeah, I've already played with the thought of somehow teaching lockdep 
that. But then abandoned this as abusive of lockdep.

>
>>
>>> +{
>>> +    struct dma_resv *obj = cursor->obj;
>>
>> Aren't we missing rcu_read_lock() around the entire thing here?
>>
>>> +
>>> +    first |= read_seqcount_retry(&obj->seq, cursor->seq);
>>> +    do {
>>> +        /* Drop the reference from the previous round */
>>> +        dma_fence_put(cursor->fence);
>>> +
>>> +        cursor->is_first = first;
>>> +        if (first) {
>>> +            cursor->seq = read_seqcount_begin(&obj->seq);
>>> +            cursor->index = -1;
>>> +            cursor->fences = dma_resv_shared_list(obj);
>>
>> And then also call iter_begin from here. That way we guarantee that
>> read_seqcount_begin is always called before _retry(). It's not a problem
>> with the seqcount implementation (I think at least), but it definitely
>> looks funny.
>>
>> Calling iter_begin here also makes it clear that we're essentially
>> restarting.
>>
>>> +
>>> +            cursor->fence = dma_resv_excl_fence(obj);
>>> +            if (cursor->fence &&
>>> +                test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>>
>> Please use the right dma_fence wrapper here for this and don't look 
>> at the
>> bits/flags outside of dma_fence.[hc] code. I just realized that we don't
>> have the right amount of barriers in there for the fastpath, i.e. if we
>> have:
>>
>> x = 0; /* static initializer */
>>
>> thread a
>>     x = 1;
>>     dma_fence_signal(fence);
>>
>>
>> thread b;
>>     if (dma_fence_is_signalled(fence))
>>         printk("%i\n", x);
>>
>> Then you might actually be able to observe x == 0 in thread b. Which is
>> not what we want at all.
>
> @Daniel:
>
> What do you mean here - in terms of if 'x' is "external" (not part of 
> dma-fence), then are you suggesting dma-fence code should serialise it 
> by using barriers?
>
> That would sound incorrect to me, or in other words, I think it's fine 
> if x == 0 is observed in your example thread B since that code is 
> mixing external data with dma-fence.

No, Daniel is right. The problem is that on architectures other than x86 
barriers are per memory address (or rather cache line in practice).

So you need to be really careful that you see the fully consistent state 
and not just one variable but others in the old state.

But this was buggy before as well. I'm just pulling the existing test 
into the new iterator.

>
> Hm also, there is that annoying bit where by using 
> dma_fence_is_signaled any code becomes a fence signaling critical 
> path, which I never bought into. There should be a way to test the 
> signaled status without actually doing the signaling. Or I am 
> misunderstanding something so badly that is really really has to be 
> like this?

You are mixing things up. Testing is unproblematic, signaling is the 
problematic one.

>
>> So no open-coding of dma_fence flag bits code outside of drm_fence.[hc]
>> please. And yes i915-gem code is unfortunately a disaster.
>
> Don't even miss an opportunity for some good trashing no? :D
>
> But yes, deconstructed dma_fence_signal I thought we were supposed to 
> add to core. Or at least propose, don't exactly remember how that went.

The problem is that you need to grab a reference to call 
dma_fence_signal while testing the flag works without one.

Regards,
Christian.

>
>>
>>> + &cursor->fence->flags))
>>> +                cursor->fence = NULL;
>>> +        } else {
>>> +            cursor->fence = NULL;
>>> +        }
>>> +
>>> +        if (cursor->fence) {
>>> +            cursor->fence = dma_fence_get_rcu(cursor->fence);
>>> +        } else if (cursor->all_fences && cursor->fences) {
>>> +            struct dma_resv_list *fences = cursor->fences;
>>> +
>>> +            while (++cursor->index < fences->shared_count) {
>>> +                cursor->fence = rcu_dereference(
>>> +                    fences->shared[cursor->index]);
>>> +                if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>>> +                          &cursor->fence->flags))
>>> +                    break;
>>> +            }
>>> +            if (cursor->index < fences->shared_count)
>>> +                cursor->fence =
>>> +                    dma_fence_get_rcu(cursor->fence);
>>> +            else
>>> +                cursor->fence = NULL;
>>> +        }
>>
>> The control flow here is very hairy, but I'm not sure how to best do 
>> this.
>> With my suggestion to move the read_seqcount_begin into iter_begin maybe
>> something like this:
>>
>> iter_next()
>> {
>>     do {
>>         dma_fence_put(cursor->fence)
>>         cursor->fence = NULL;
>>
>>         if (cursor->index == -1) { /* reset by iter_begin()
>>             cursor->fence = get_exclusive();
>>             cusor->index++;
>>         } else {
>>             cursor->fence = shared_fences[++cursor->index]
>>         }
>>
>>         if (!dma_fence_is_signalled(cursor->fence))
>>             continue; /* just grab the next fence. */
>>
>>         cursor->fence =  dma_fence_get_rcu(cursor->fence);
>>
>>         if (!cursor->fence || read_seqcount_retry()) {
>>             /* we lost the race, restart completely */
>>             iter_begin(); /* ->fence will be cleaned up at beginning 
>> of the loop */
>>             continue;
>>         }
>>
>>         return cursor->fence;
>>     } while (true);
>> }
>>
>> Maybe I missed something, but that avoids the duplication of all the
>> tricky code, i.e. checking for signalling, rcu protected conditional
>> fence_get, and the retry is also nicely at the end.
>>> +
>>> +        /* For the eventually next round */
>>> +        first = true;
>>> +    } while (read_seqcount_retry(&obj->seq, cursor->seq));
>>> +
>>> +    return cursor->fence;
>>> +}
>>> +EXPORT_SYMBOL_GPL(dma_resv_iter_walk_unlocked);
>>> +
>>>   /**
>>>    * dma_resv_copy_fences - Copy all fences from src to dst.
>>>    * @dst: the destination reservation object
>>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>>> index 9100dd3dc21f..693d16117153 100644
>>> --- a/include/linux/dma-resv.h
>>> +++ b/include/linux/dma-resv.h
>>> @@ -149,6 +149,90 @@ struct dma_resv {
>>>       struct dma_resv_list __rcu *fence;
>>>   };
>>>   +/**
>>> + * struct dma_resv_iter - current position into the dma_resv fences
>>> + *
>>> + * Don't touch this directly in the driver, use the accessor 
>>> function instead.
>>> + */
>>> +struct dma_resv_iter {
>>> +    /** @obj: The dma_resv object we iterate over */
>>> +    struct dma_resv *obj;
>>> +
>>> +    /** @all_fences: If all fences should be returned */
>>> +    bool all_fences;
>>> +
>>> +    /** @fence: the currently handled fence */
>>> +    struct dma_fence *fence;
>>> +
>>> +    /** @seq: sequence number to check for modifications */
>>> +    unsigned int seq;
>>> +
>>> +    /** @index: index into the shared fences */
>>
>> If you go with my suggestion (assuming it works): Please add "-1 
>> indicates
>> to pick the exclusive fence instead."
>>
>>> +    unsigned int index;
>>> +
>>> +    /** @fences: the shared fences */
>>> +    struct dma_resv_list *fences;
>>> +
>>> +    /** @is_first: true if this is the first returned fence */
>>> +    bool is_first;
>>
>> I think if we just rely on -1 == exclusive fence/is_first we don't need
>> this one here?
>>
>>> +};
>>> +
>>> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter 
>>> *cursor,
>>> +                          bool first);
>>> +
>>> +/**
>>> + * dma_resv_iter_begin - initialize a dma_resv_iter object
>>> + * @cursor: The dma_resv_iter object to initialize
>>> + * @obj: The dma_resv object which we want to iterator over
>>> + * @all_fences: If all fences should be returned or just the 
>>> exclusive one
>>
>> Please add: "Callers must clean up the iterator with 
>> dma_resv_iter_end()."
>>
>>> + */
>>> +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
>>> +                    struct dma_resv *obj,
>>> +                    bool all_fences)
>>> +{
>>> +    cursor->obj = obj;
>>> +    cursor->all_fences = all_fences;
>>> +    cursor->fence = NULL;
>>> +}
>>> +
>>> +/**
>>> + * dma_resv_iter_end - cleanup a dma_resv_iter object
>>> + * @cursor: the dma_resv_iter object which should be cleaned up
>>> + *
>>> + * Make sure that the reference to the fence in the cursor is properly
>>> + * dropped.
>>
>> Please add:
>>
>> "This function must be called every time dma_resv_iter_begin() was 
>> called
>> to clean up any references."
>>> + */
>>> +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
>>> +{
>>> +    dma_fence_put(cursor->fence);
>>> +}
>>> +
>>> +/**
>>> + * dma_resv_iter_is_exclusive - test if the current fence is the 
>>> exclusive one
>>> + * @cursor: the cursor of the current position
>>> + *
>>> + * Returns true if the currently returned fence is the exclusive one.
>>> + */
>>> +static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter 
>>> *cursor)
>>> +{
>>> +    return cursor->index == -1;
>>> +}
>>> +
>>> +/**
>>> + * dma_resv_for_each_fence_unlocked - unlocked fence iterator
>>> + * @cursor: a struct dma_resv_iter pointer
>>> + * @fence: the current fence
>>> + *
>>> + * Iterate over the fences in a struct dma_resv object without 
>>> holding the
>>> + * dma_resv::lock. The RCU read side lock must be hold when using 
>>> this, but can
>>> + * be dropped and re-taken as necessary inside the loop. The cursor 
>>> needs to be
>>> + * initialized with dma_resv_iter_begin_unlocked() and cleaned up with
>>
>> We don't have an _unlocked version?
>
> @Christian:
>
> I'd also mention that the fence reference is held during the walk so 
> someone is less likely to grab extra ones.
>
>>
>>> + * dma_resv_iter_end_unlocked().
>>> + */
>>> +#define dma_resv_for_each_fence_unlocked(cursor, fence)            \
>>> +    for (fence = dma_resv_iter_walk_unlocked(cursor, true);        \
>>> +         fence; fence = dma_resv_iter_walk_unlocked(cursor, false))
>>> +
>>>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>>>   #define dma_resv_assert_held(obj) 
>>> lockdep_assert_held(&(obj)->lock.base)
>>>   --
>>> 2.25.1
>>>
>>
>
> Regards,
>
> Tvrtko
>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-20  8:45   ` Tvrtko Ursulin
@ 2021-09-20 10:13     ` Christian König
  2021-09-20 10:33       ` Tvrtko Ursulin
  0 siblings, 1 reply; 115+ messages in thread
From: Christian König @ 2021-09-20 10:13 UTC (permalink / raw)
  To: Tvrtko Ursulin, linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>
> On 17/09/2021 13:35, Christian König wrote:
>> This makes the function much simpler since the complex
>> retry logic is now handled else where.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> index 6234e17259c1..b1cb7ba688da 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>> *data,
>>   {
>>       struct drm_i915_gem_busy *args = data;
>>       struct drm_i915_gem_object *obj;
>> -    struct dma_resv_list *list;
>> -    unsigned int seq;
>> +    struct dma_resv_iter cursor;
>> +    struct dma_fence *fence;
>>       int err;
>>         err = -ENOENT;
>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>> void *data,
>>        * to report the overall busyness. This is what the wait-ioctl 
>> does.
>>        *
>>        */
>> -retry:
>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>> -
>> -    /* Translate the exclusive fence to the READ *and* WRITE engine */
>> -    args->busy = 
>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>> -
>> -    /* Translate shared fences to READ set of engines */
>> -    list = dma_resv_shared_list(obj->base.resv);
>> -    if (list) {
>> -        unsigned int shared_count = list->shared_count, i;
>> -
>> -        for (i = 0; i < shared_count; ++i) {
>> -            struct dma_fence *fence =
>> -                rcu_dereference(list->shared[i]);
>> -
>> +    args->busy = false;
>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>
> You did not agree with my suggestion to reset args->busy on restart 
> and so preserve current behaviour?

No, I want to keep the restart behavior internally to the dma_resv 
object and as far as I can see it should not make a difference here.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>> +        if (dma_resv_iter_is_exclusive(&cursor))
>> +            /* Translate the exclusive fence to the READ *and* WRITE 
>> engine */
>> +            args->busy = busy_check_writer(fence);
>> +        else
>> +            /* Translate shared fences to READ set of engines */
>>               args->busy |= busy_check_reader(fence);
>> -        }
>>       }
>> -
>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>> -        goto retry;
>> +    dma_resv_iter_end(&cursor);
>>         err = 0;
>>   out:
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3
  2021-09-20  8:47     ` Tvrtko Ursulin
@ 2021-09-20 10:14       ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-20 10:14 UTC (permalink / raw)
  To: Tvrtko Ursulin, linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Am 20.09.21 um 10:47 schrieb Tvrtko Ursulin:
>
> On 20/09/2021 09:45, Tvrtko Ursulin wrote:
>>
>> On 17/09/2021 13:35, Christian König wrote:
>>> Simplifying the code a bit.
>>>
>>> v2: use dma_resv_for_each_fence instead, according to Tvrtko the 
>>> lock is
>>>      held here anyway.
>>> v3: back to using dma_resv_for_each_fence_unlocked.
>>
>> It did not work out - what happened?
> Wait, my suggestion to try the locked iterator was against 
> i915_request_await_object. I haven't looked at this one at the time or 
> even now.

Exactly! I've mixed the two up and this one here is really called 
without holding a lock.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>
>> Regards,
>>
>> Tvrtko
>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/i915/i915_sw_fence.c | 57 
>>> ++++++++--------------------
>>>   1 file changed, 15 insertions(+), 42 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c 
>>> b/drivers/gpu/drm/i915/i915_sw_fence.c
>>> index c589a681da77..7635b0478ea5 100644
>>> --- a/drivers/gpu/drm/i915/i915_sw_fence.c
>>> +++ b/drivers/gpu/drm/i915/i915_sw_fence.c
>>> @@ -572,56 +572,29 @@ int i915_sw_fence_await_reservation(struct 
>>> i915_sw_fence *fence,
>>>                       unsigned long timeout,
>>>                       gfp_t gfp)
>>>   {
>>> -    struct dma_fence *excl;
>>> +    struct dma_resv_iter cursor;
>>> +    struct dma_fence *f;
>>>       int ret = 0, pending;
>>>       debug_fence_assert(fence);
>>>       might_sleep_if(gfpflags_allow_blocking(gfp));
>>> -    if (write) {
>>> -        struct dma_fence **shared;
>>> -        unsigned int count, i;
>>> -
>>> -        ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>>> -        if (ret)
>>> -            return ret;
>>> -
>>> -        for (i = 0; i < count; i++) {
>>> -            if (shared[i]->ops == exclude)
>>> -                continue;
>>> -
>>> -            pending = i915_sw_fence_await_dma_fence(fence,
>>> -                                shared[i],
>>> -                                timeout,
>>> -                                gfp);
>>> -            if (pending < 0) {
>>> -                ret = pending;
>>> -                break;
>>> -            }
>>> -
>>> -            ret |= pending;
>>> -        }
>>> -
>>> -        for (i = 0; i < count; i++)
>>> -            dma_fence_put(shared[i]);
>>> -        kfree(shared);
>>> -    } else {
>>> -        excl = dma_resv_get_excl_unlocked(resv);
>>> -    }
>>> -
>>> -    if (ret >= 0 && excl && excl->ops != exclude) {
>>> -        pending = i915_sw_fence_await_dma_fence(fence,
>>> -                            excl,
>>> -                            timeout,
>>> +    rcu_read_lock();
>>> +    dma_resv_iter_begin(&cursor, resv, write);
>>> +    dma_resv_for_each_fence_unlocked(&cursor, f) {
>>> +        rcu_read_unlock();
>>> +        pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
>>>                               gfp);
>>> -        if (pending < 0)
>>> +        rcu_read_lock();
>>> +        if (pending < 0) {
>>>               ret = pending;
>>> -        else
>>> -            ret |= pending;
>>> -    }
>>> -
>>> -    dma_fence_put(excl);
>>> +            break;
>>> +        }
>>> +        ret |= pending;
>>> +    }
>>> +    dma_resv_iter_end(&cursor);
>>> +    rcu_read_unlock();
>>>       return ret;
>>>   }
>>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2
  2021-09-20 10:09       ` Christian König
@ 2021-09-20 10:26         ` Tvrtko Ursulin
  0 siblings, 0 replies; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20 10:26 UTC (permalink / raw)
  To: Christian König, Daniel Vetter
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx


On 20/09/2021 11:09, Christian König wrote:
> Am 20.09.21 um 10:43 schrieb Tvrtko Ursulin:
>> On 17/09/2021 14:23, Daniel Vetter wrote:
>>> On Fri, Sep 17, 2021 at 02:34:48PM +0200, Christian König wrote:
>>>> Abstract the complexity of iterating over all the fences
>>>> in a dma_resv object.
>>>>
>>>> The new loop handles the whole RCU and retry dance and
>>>> returns only fences where we can be sure we grabbed the
>>>> right one.
>>>>
>>>> v2: fix accessing the shared fences while they might be freed,
>>>>      improve kerneldoc, rename _cursor to _iter, add
>>>>      dma_resv_iter_is_exclusive, add dma_resv_iter_begin/end
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/dma-buf/dma-resv.c | 61 +++++++++++++++++++++++++++
>>>>   include/linux/dma-resv.h   | 84 
>>>> ++++++++++++++++++++++++++++++++++++++
>>>>   2 files changed, 145 insertions(+)
>>>>
>>>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>>>> index 84fbe60629e3..3e77cad2c9d4 100644
>>>> --- a/drivers/dma-buf/dma-resv.c
>>>> +++ b/drivers/dma-buf/dma-resv.c
>>>> @@ -323,6 +323,67 @@ void dma_resv_add_excl_fence(struct dma_resv 
>>>> *obj, struct dma_fence *fence)
>>>>   }
>>>>   EXPORT_SYMBOL(dma_resv_add_excl_fence);
>>>>   +/**
>>>> + * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
>>>> + * @cursor: cursor to record the current position
>>>> + * @first: if we should start over
>>>> + *
>>>> + * Return all the fences in the dma_resv object which are not yet 
>>>> signaled.
>>>> + * The returned fence has an extra local reference so will stay alive.
>>>> + * If a concurrent modify is detected the whole iterration is 
>>>> started over again.
>>>> + */
>>>> +struct dma_fence *dma_resv_iter_walk_unlocked(struct dma_resv_iter 
>>>> *cursor,
>>>
>>> Bit ocd, but I'd still just call that iter_next.
>>>
>>>> +                          bool first)
>>>
>>> Hm I'd put all the init code into iter_begin ...
>>
>> @Christian:
>>
>> Could you engineer something in here which would, at least in debug 
>> builds, catch failures to call "iter begin" before using the iterator 
>> macro?
> 
> Yeah, I've already played with the thought of somehow teaching lockdep 
> that. But then abandoned this as abusive of lockdep.

Yes probably not lockdep but would need to be a separate build time 
option akin to DEBUG_WW_MUTEXES and similar.

>>
>>>
>>>> +{
>>>> +    struct dma_resv *obj = cursor->obj;
>>>
>>> Aren't we missing rcu_read_lock() around the entire thing here?
>>>
>>>> +
>>>> +    first |= read_seqcount_retry(&obj->seq, cursor->seq);
>>>> +    do {
>>>> +        /* Drop the reference from the previous round */
>>>> +        dma_fence_put(cursor->fence);
>>>> +
>>>> +        cursor->is_first = first;
>>>> +        if (first) {
>>>> +            cursor->seq = read_seqcount_begin(&obj->seq);
>>>> +            cursor->index = -1;
>>>> +            cursor->fences = dma_resv_shared_list(obj);
>>>
>>> And then also call iter_begin from here. That way we guarantee that
>>> read_seqcount_begin is always called before _retry(). It's not a problem
>>> with the seqcount implementation (I think at least), but it definitely
>>> looks funny.
>>>
>>> Calling iter_begin here also makes it clear that we're essentially
>>> restarting.
>>>
>>>> +
>>>> +            cursor->fence = dma_resv_excl_fence(obj);
>>>> +            if (cursor->fence &&
>>>> +                test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
>>>
>>> Please use the right dma_fence wrapper here for this and don't look 
>>> at the
>>> bits/flags outside of dma_fence.[hc] code. I just realized that we don't
>>> have the right amount of barriers in there for the fastpath, i.e. if we
>>> have:
>>>
>>> x = 0; /* static initializer */
>>>
>>> thread a
>>>     x = 1;
>>>     dma_fence_signal(fence);
>>>
>>>
>>> thread b;
>>>     if (dma_fence_is_signalled(fence))
>>>         printk("%i\n", x);
>>>
>>> Then you might actually be able to observe x == 0 in thread b. Which is
>>> not what we want at all.
>>
>> @Daniel:
>>
>> What do you mean here - in terms of if 'x' is "external" (not part of 
>> dma-fence), then are you suggesting dma-fence code should serialise it 
>> by using barriers?
>>
>> That would sound incorrect to me, or in other words, I think it's fine 
>> if x == 0 is observed in your example thread B since that code is 
>> mixing external data with dma-fence.
> 
> No, Daniel is right. The problem is that on architectures other than x86 
> barriers are per memory address (or rather cache line in practice).
> 
> So you need to be really careful that you see the fully consistent state 
> and not just one variable but others in the old state.

I don't see it yet - what are the variables we are talking about here? 
Ordering relating to the iterator code in here or something truly external?

Iterator can obviously race and "return" and already signaled fence 
(transitioned from unsignaled to signaled between iterator checking and 
deciding to walk it). But that I don't think you can, or plan to, fix.

> 
> But this was buggy before as well. I'm just pulling the existing test 
> into the new iterator.

Okay.

> 
>>
>> Hm also, there is that annoying bit where by using 
>> dma_fence_is_signaled any code becomes a fence signaling critical 
>> path, which I never bought into. There should be a way to test the 
>> signaled status without actually doing the signaling. Or I am 
>> misunderstanding something so badly that is really really has to be 
>> like this?
> 
> You are mixing things up. Testing is unproblematic, signaling is the 
> problematic one.

I was pointing out dma_fence_is_signaled can call dma_fence_signal. And 
that has in the past, AFAIR at least, caused some fence annotation 
splats which IMO are questionable.

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-20 10:13     ` Christian König
@ 2021-09-20 10:33       ` Tvrtko Ursulin
  2021-09-21  9:41         ` Christian König
  0 siblings, 1 reply; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-20 10:33 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 20/09/2021 11:13, Christian König wrote:
> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>
>> On 17/09/2021 13:35, Christian König wrote:
>>> This makes the function much simpler since the complex
>>> retry logic is now handled else where.
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 ++++++++----------------
>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> index 6234e17259c1..b1cb7ba688da 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>> *data,
>>>   {
>>>       struct drm_i915_gem_busy *args = data;
>>>       struct drm_i915_gem_object *obj;
>>> -    struct dma_resv_list *list;
>>> -    unsigned int seq;
>>> +    struct dma_resv_iter cursor;
>>> +    struct dma_fence *fence;
>>>       int err;
>>>         err = -ENOENT;
>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>> void *data,
>>>        * to report the overall busyness. This is what the wait-ioctl 
>>> does.
>>>        *
>>>        */
>>> -retry:
>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>> -
>>> -    /* Translate the exclusive fence to the READ *and* WRITE engine */
>>> -    args->busy = 
>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>> -
>>> -    /* Translate shared fences to READ set of engines */
>>> -    list = dma_resv_shared_list(obj->base.resv);
>>> -    if (list) {
>>> -        unsigned int shared_count = list->shared_count, i;
>>> -
>>> -        for (i = 0; i < shared_count; ++i) {
>>> -            struct dma_fence *fence =
>>> -                rcu_dereference(list->shared[i]);
>>> -
>>> +    args->busy = false;
>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>
>> You did not agree with my suggestion to reset args->busy on restart 
>> and so preserve current behaviour?
> 
> No, I want to keep the restart behavior internally to the dma_resv 
> object and as far as I can see it should not make a difference here.

To be clear, on paper difference between old and new implementation is 
if the restart happens while processing the shared fences.

Old implementation unconditionally goes to "args->busy =
 >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
overwrites the set of flags returned to userspace.

New implementation can merge new read flags to the old set of flags and 
so return a composition of past and current fences.

Maybe it does not matter hugely in this case, depends if userspace 
typically just restarts until flags are clear. But I am not sure.

On the higher level - what do you mean with wanting to keep the restart 
behaviour internal? Not providing iterators users means of detecting it? 
I think it has to be provided.

Regards,

Tvrtko

> Regards,
> Christian.
> 
>>
>> Regards,
>>
>> Tvrtko
>>
>>> +        if (dma_resv_iter_is_exclusive(&cursor))
>>> +            /* Translate the exclusive fence to the READ *and* WRITE 
>>> engine */
>>> +            args->busy = busy_check_writer(fence);
>>> +        else
>>> +            /* Translate shared fences to READ set of engines */
>>>               args->busy |= busy_check_reader(fence);
>>> -        }
>>>       }
>>> -
>>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>>> -        goto retry;
>>> +    dma_resv_iter_end(&cursor);
>>>         err = 0;
>>>   out:
>>>
> 

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-20 10:33       ` Tvrtko Ursulin
@ 2021-09-21  9:41         ` Christian König
  2021-09-21 13:10           ` Tvrtko Ursulin
  0 siblings, 1 reply; 115+ messages in thread
From: Christian König @ 2021-09-21  9:41 UTC (permalink / raw)
  To: Tvrtko Ursulin, linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Am 20.09.21 um 12:33 schrieb Tvrtko Ursulin:
> On 20/09/2021 11:13, Christian König wrote:
>> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>>
>>> On 17/09/2021 13:35, Christian König wrote:
>>>> This makes the function much simpler since the complex
>>>> retry logic is now handled else where.
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 
>>>> ++++++++----------------
>>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> index 6234e17259c1..b1cb7ba688da 100644
>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>>> *data,
>>>>   {
>>>>       struct drm_i915_gem_busy *args = data;
>>>>       struct drm_i915_gem_object *obj;
>>>> -    struct dma_resv_list *list;
>>>> -    unsigned int seq;
>>>> +    struct dma_resv_iter cursor;
>>>> +    struct dma_fence *fence;
>>>>       int err;
>>>>         err = -ENOENT;
>>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>>> void *data,
>>>>        * to report the overall busyness. This is what the 
>>>> wait-ioctl does.
>>>>        *
>>>>        */
>>>> -retry:
>>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>>> -
>>>> -    /* Translate the exclusive fence to the READ *and* WRITE 
>>>> engine */
>>>> -    args->busy = 
>>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>>> -
>>>> -    /* Translate shared fences to READ set of engines */
>>>> -    list = dma_resv_shared_list(obj->base.resv);
>>>> -    if (list) {
>>>> -        unsigned int shared_count = list->shared_count, i;
>>>> -
>>>> -        for (i = 0; i < shared_count; ++i) {
>>>> -            struct dma_fence *fence =
>>>> -                rcu_dereference(list->shared[i]);
>>>> -
>>>> +    args->busy = false;
>>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>>
>>> You did not agree with my suggestion to reset args->busy on restart 
>>> and so preserve current behaviour?
>>
>> No, I want to keep the restart behavior internally to the dma_resv 
>> object and as far as I can see it should not make a difference here.
>
> To be clear, on paper difference between old and new implementation is 
> if the restart happens while processing the shared fences.
>
> Old implementation unconditionally goes to "args->busy =
> >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
> overwrites the set of flags returned to userspace.
>
> New implementation can merge new read flags to the old set of flags 
> and so return a composition of past and current fences.
>
> Maybe it does not matter hugely in this case, depends if userspace 
> typically just restarts until flags are clear. But I am not sure.
>
> On the higher level - what do you mean with wanting to keep the 
> restart behaviour internal? Not providing iterators users means of 
> detecting it? I think it has to be provided.

Ok I will adjust that for now to get the patch set upstream. But in 
general when somebody outside of the dma_resv code base depends on the 
restart behavior then that's a bug inside the design of that code.

The callers should only care about what unsignaled fences are inside the 
dma_resv container and it shouldn't matter if those fences are presented 
once or multiple times because of a reset..

When this makes a difference we have a bug in the handling and should 
probably consider taking the dma_resv.lock instead.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>>
>>> Tvrtko
>>>
>>>> +        if (dma_resv_iter_is_exclusive(&cursor))
>>>> +            /* Translate the exclusive fence to the READ *and* 
>>>> WRITE engine */
>>>> +            args->busy = busy_check_writer(fence);
>>>> +        else
>>>> +            /* Translate shared fences to READ set of engines */
>>>>               args->busy |= busy_check_reader(fence);
>>>> -        }
>>>>       }
>>>> -
>>>> -    if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>>>> -        goto retry;
>>>> +    dma_resv_iter_end(&cursor);
>>>>         err = 0;
>>>>   out:
>>>>
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl
  2021-09-21  9:41         ` Christian König
@ 2021-09-21 13:10           ` Tvrtko Ursulin
  0 siblings, 0 replies; 115+ messages in thread
From: Tvrtko Ursulin @ 2021-09-21 13:10 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx
  Cc: daniel


On 21/09/2021 10:41, Christian König wrote:
> Am 20.09.21 um 12:33 schrieb Tvrtko Ursulin:
>> On 20/09/2021 11:13, Christian König wrote:
>>> Am 20.09.21 um 10:45 schrieb Tvrtko Ursulin:
>>>>
>>>> On 17/09/2021 13:35, Christian König wrote:
>>>>> This makes the function much simpler since the complex
>>>>> retry logic is now handled else where.
>>>>>
>>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>>> ---
>>>>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c | 32 
>>>>> ++++++++----------------
>>>>>   1 file changed, 11 insertions(+), 21 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c 
>>>>> b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> index 6234e17259c1..b1cb7ba688da 100644
>>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>>>>> @@ -82,8 +82,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void 
>>>>> *data,
>>>>>   {
>>>>>       struct drm_i915_gem_busy *args = data;
>>>>>       struct drm_i915_gem_object *obj;
>>>>> -    struct dma_resv_list *list;
>>>>> -    unsigned int seq;
>>>>> +    struct dma_resv_iter cursor;
>>>>> +    struct dma_fence *fence;
>>>>>       int err;
>>>>>         err = -ENOENT;
>>>>> @@ -109,27 +109,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, 
>>>>> void *data,
>>>>>        * to report the overall busyness. This is what the 
>>>>> wait-ioctl does.
>>>>>        *
>>>>>        */
>>>>> -retry:
>>>>> -    seq = raw_read_seqcount(&obj->base.resv->seq);
>>>>> -
>>>>> -    /* Translate the exclusive fence to the READ *and* WRITE 
>>>>> engine */
>>>>> -    args->busy = 
>>>>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));
>>>>> -
>>>>> -    /* Translate shared fences to READ set of engines */
>>>>> -    list = dma_resv_shared_list(obj->base.resv);
>>>>> -    if (list) {
>>>>> -        unsigned int shared_count = list->shared_count, i;
>>>>> -
>>>>> -        for (i = 0; i < shared_count; ++i) {
>>>>> -            struct dma_fence *fence =
>>>>> -                rcu_dereference(list->shared[i]);
>>>>> -
>>>>> +    args->busy = false;
>>>>> +    dma_resv_iter_begin(&cursor, obj->base.resv, true);
>>>>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>>>>
>>>> You did not agree with my suggestion to reset args->busy on restart 
>>>> and so preserve current behaviour?
>>>
>>> No, I want to keep the restart behavior internally to the dma_resv 
>>> object and as far as I can see it should not make a difference here.
>>
>> To be clear, on paper difference between old and new implementation is 
>> if the restart happens while processing the shared fences.
>>
>> Old implementation unconditionally goes to "args->busy =
>> >>> busy_check_writer(dma_resv_excl_fence(obj->base.resv));" and so 
>> overwrites the set of flags returned to userspace.
>>
>> New implementation can merge new read flags to the old set of flags 
>> and so return a composition of past and current fences.
>>
>> Maybe it does not matter hugely in this case, depends if userspace 
>> typically just restarts until flags are clear. But I am not sure.
>>
>> On the higher level - what do you mean with wanting to keep the 
>> restart behaviour internal? Not providing iterators users means of 
>> detecting it? I think it has to be provided.
> 
> Ok I will adjust that for now to get the patch set upstream. But in 
> general when somebody outside of the dma_resv code base depends on the 
> restart behavior then that's a bug inside the design of that code.

Thanks, no change in behaviour makes for an easy r-b. :)

> The callers should only care about what unsignaled fences are inside the 
> dma_resv container and it shouldn't matter if those fences are presented 
> once or multiple times because of a reset..
> 
> When this makes a difference we have a bug in the handling and should 
> probably consider taking the dma_resv.lock instead.

I agree, which is why I was mentioning earlier how it would be good to 
completely sort locked from unlocked iterators and avoid situations 
where unlocked one is called from a path where object is locked.

Unfortunately for the display code path I cannot easily help with the 
audit of call paths. And I think there are at least two patches in your 
series which need KMS expertise.

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 16/26] drm/i915: use new iterator in i915_gem_object_wait_reservation v2
  2021-09-20 10:00   ` Tvrtko Ursulin
@ 2021-09-21 17:35     ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-21 17:35 UTC (permalink / raw)
  To: Tvrtko Ursulin, linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel

Am 20.09.21 um 12:00 schrieb Tvrtko Ursulin:
>
> On 17/09/2021 13:35, Christian König wrote:
>> Simplifying the code a bit.
>>
>> v2: add missing rcu read unlock.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_wait.c | 57 ++++++------------------
>>   1 file changed, 14 insertions(+), 43 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c 
>> b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> index f909aaa09d9c..e416cf528635 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
>> @@ -37,55 +37,26 @@ i915_gem_object_wait_reservation(struct dma_resv 
>> *resv,
>>                    unsigned int flags,
>>                    long timeout)
>>   {
>> -    struct dma_fence *excl;
>> -    bool prune_fences = false;
>> -
>> -    if (flags & I915_WAIT_ALL) {
>> -        struct dma_fence **shared;
>> -        unsigned int count, i;
>> -        int ret;
>> -
>> -        ret = dma_resv_get_fences(resv, &excl, &count, &shared);
>> -        if (ret)
>> -            return ret;
>> -
>> -        for (i = 0; i < count; i++) {
>> -            timeout = i915_gem_object_wait_fence(shared[i],
>> -                                 flags, timeout);
>> -            if (timeout < 0)
>> -                break;
>> -
>> -            dma_fence_put(shared[i]);
>> -        }
>> -
>> -        for (; i < count; i++)
>> -            dma_fence_put(shared[i]);
>> -        kfree(shared);
>> -
>> -        /*
>> -         * If both shared fences and an exclusive fence exist,
>> -         * then by construction the shared fences must be later
>> -         * than the exclusive fence. If we successfully wait for
>> -         * all the shared fences, we know that the exclusive fence
>> -         * must all be signaled. If all the shared fences are
>> -         * signaled, we can prune the array and recover the
>> -         * floating references on the fences/requests.
>> -         */
>> -        prune_fences = count && timeout >= 0;
>> -    } else {
>> -        excl = dma_resv_get_excl_unlocked(resv);
>> +    struct dma_resv_iter cursor;
>> +    struct dma_fence *fence;
>> +
>> +    rcu_read_lock();
>> +    dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
>> +    dma_resv_for_each_fence_unlocked(&cursor, fence) {
>> +        rcu_read_unlock();
>> +        timeout = i915_gem_object_wait_fence(fence, flags, timeout);
>
> Converting this one could be problematic. It's the wait ioctl which 
> used to grab an atomic snapshot and wait for that rendering to 
> complete. With this change I think it has the potential to run forever 
> keeps catching new activity against the same object.
>
> I am not sure whether or not the difference is relevant for how 
> userspace uses it but I think needs discussion.

It was years ago, but IIRC we had the same discussion for the 
dma_resv_wait_timeout() function and the result was that this is not a 
valid use case and waiting forever if you see new work over and over 
again is a valid result.

Let me double check the history of this code here as well.

> Hm actually there are internal callers as well, and at least some of 
> those have the object locked. Would a wider refactoring to separate 
> those into buckets (locked vs unlocked) make sense?

Yes definitely.

Regards,
Christian.

>
> Regards,
>
> Tvrtko
>
>
>> +        rcu_read_lock();
>> +        if (timeout < 0)
>> +            break;
>>       }
>> -
>> -    if (excl && timeout >= 0)
>> -        timeout = i915_gem_object_wait_fence(excl, flags, timeout);
>> -
>> -    dma_fence_put(excl);
>> +    dma_resv_iter_end(&cursor);
>> +    rcu_read_unlock();
>>         /*
>>        * Opportunistically prune the fences iff we know they have 
>> *all* been
>>        * signaled.
>>        */
>> -    if (prune_fences)
>> +    if (timeout > 0)
>>           dma_resv_prune(resv);
>>         return timeout;
>>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-09-17 12:34   ` [Intel-gfx] " Christian König
  (?)
@ 2021-11-15 14:03     ` Sascha Hauer
  -1 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-15 14:03 UTC (permalink / raw)
  To: Christian König
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel

Hi,

On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>  1 file changed, 6 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 042c16b5d54a..5bc5f775abe1 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>  					    struct drm_gem_object *obj,
>  					    bool write)
>  {
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_sched_job_add_dependency(job, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
>  
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_sched_job_add_dependency(job, fences[i]);
> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> +		ret = drm_sched_job_add_dependency(job, fence);
>  		if (ret)
> -			break;
> +			return ret;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>  

This patch lets the panfrost driver explode on v5.16-rc1 with the
following. I didn't bisect it, but it goes away when I revert this
patch. I only started weston, nothing more.

Any idea what goes wrong here?

Sascha

[   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
[   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.514056] Modules linked in:
[   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.514621] ------------[ cut here ]------------
[   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.515049] pc : dma_fence_release+0xac/0xe8
[   12.515056] lr : dma_fence_release+0xac/0xe8
[   12.515061] sp : ffff8000123ebb20
[   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
[   12.515518] refcount_t: addition on 0; use-after-free.
[   12.516015]  x27: 0000000000000000
[   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
[   12.516992] x26: 0000000000000001
[   12.517366] Modules linked in:
[   12.517654]  x25: ffff000004b051c0
[   12.518108]
[   12.518555]  x24: 0000000000000000
[   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.519576]
[   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.520133] x23: 0000000000000000
[   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.520559]  x22: ffff800010d41b78
[   12.520856] pc : refcount_warn_saturate+0x98/0x140
[   12.521625]  x21: ffff000004b05050
[   12.521755] lr : refcount_warn_saturate+0x98/0x140
[   12.522299]
[   12.522588] sp : ffff8000122b3bc0
[   12.523192] x20: ffff000004b05040
[   12.523489] x29: ffff8000122b3bc0
[   12.523906]  x19: ffff000004b05078
[   12.524203]  x28: 0000000000000000
[   12.524620]  x18: 0000000000000010
[   12.524751]  x27: ffff000003791880
[   12.525040]
[   12.525329]
[   12.525618] x17: 0000000000000000
[   12.525915] x26: ffff8000122b3d30
[   12.526212]  x16: 0000000000000000
[   12.526509]  x25: 0000000000000001
[   12.526806]  x15: ffff0000050e2dc0
[   12.526937]  x24: ffff000003791a10
[   12.527067]
[   12.527357]
[   12.527646] x14: 00000000000001b5
[   12.527942] x23: 0000000000000000
[   12.528240]  x13: ffff0000050e2dc0
[   12.528536]  x22: ffff000003505280
[   12.528833]  x12: 00000000ffffffea
[   12.528964]  x21: ffff000003a2a220
[   12.529095]
[   12.529384]
[   12.529673] x11: ffff800011761ec8
[   12.529970] x20: ffff000004b05078
[   12.530267]  x10: ffff8000115e1e88
[   12.530564]  x19: ffff000004b05000
[   12.530861]  x9 : ffff8000115e1ee0
[   12.530992]  x18: 0000000000000010
[   12.531123]
[   12.531412]
[   12.531701] x8 : 000000000017ffe8
[   12.531998] x17: 0000000000500600
[   12.532294]  x7 : c0000000fffeffff
[   12.532591]  x16: 0000000000000000
[   12.532888]  x6 : 0000000000000001
[   12.533019]  x15: ffff000003505700
[   12.533150]
[   12.533439]
[   12.533728] x5 : ffff00007fb8c9a0
[   12.534025] x14: 0000000000000000
[   12.534322]  x4 : 0000000000000000
[   12.534619]  x13: 292d2d3d45505954
[   12.534914]  x3 : 0000000000000001
[   12.535045]  x12: 4220534253532d20
[   12.535176]
[   12.535465]
[   12.535754] x2 : ffff00007fb8c9a8
[   12.536051] x11: 5449442d204f4354
[   12.536347]  x1 : ea6e0584a53f2200
[   12.536643]  x10: 2d204f41552d204e
[   12.536941]  x0 : 0000000000000000
[   12.537073]  x9 : 4e41502b20666961
[   12.537203]
[   12.537492]
[   12.537782] Call trace:
[   12.538078] x8 : 642076635a6e2820
[   12.538377]  dma_fence_release+0xac/0xe8
[   12.538671]  x7 : 205d343430353135
[   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.539098]  x6 : 352e32312020205b
[   12.539230]  panfrost_job_push+0x1bc/0x200
[   12.539442]
[   12.539732]  panfrost_ioctl_submit+0x358/0x438
[   12.540073] x5 : ffff00007fb539a0
[   12.540370]  drm_ioctl_kernel+0xb8/0x170
[   12.540771]  x4 : 0000000000000000
[   12.541069]  drm_ioctl+0x214/0x450
[   12.541424]  x3 : 0000000000000001
[   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
[   12.541943]
[   12.542233]  invoke_syscall+0x40/0xf8
[   12.542573] x2 : ffff00007fb539a8
[   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.543167]  x1 : 0ac4fb7a0680bb00
[   12.543465]  do_el0_svc+0x20/0x80
[   12.543805]  x0 : 0000000000000000
[   12.543936]  el0_svc+0x1c/0x50
[   12.544255]
[   12.544544]  el0t_64_sync_handler+0xa8/0xb0
[   12.544955] Call trace:
[   12.545250]  el0t_64_sync+0x16c/0x170
[   12.545540]  refcount_warn_saturate+0x98/0x140
[   12.545837] ---[ end trace ba74542f51246288 ]---
[   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
[   12.546285] ------------[ cut here ]------------
[   12.546598]  drm_sched_job_done_cb+0x10/0x18
[   12.546813] refcount_t: underflow; use-after-free.
[   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
[   12.547920]  dma_fence_signal_locked+0x20/0x30
[   12.548336] Modules linked in:
[   12.548737]  panfrost_job_handle_done+0x34/0x50
[   12.549110]
[   12.549525]  panfrost_job_handle_irqs+0x358/0x570
[   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
[   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.551373]  irq_thread_fn+0x28/0x98
[   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.551899]  irq_thread+0x12c/0x230
[   12.552309] pc : refcount_warn_saturate+0xec/0x140
[   12.553131]  kthread+0x174/0x180
[   12.553578] lr : refcount_warn_saturate+0xec/0x140
[   12.554121]  ret_from_fork+0x10/0x20
[   12.554432] sp : ffff8000123ebaa0
[   12.555038] ---[ end trace ba74542f51246289 ]---
[   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
[   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
[   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
[   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
[   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
[   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.563733] Call trace:
[   12.563950]  refcount_warn_saturate+0xec/0x140
[   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
[   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.565216]  dma_fence_release+0xd4/0xe8
[   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.565970]  panfrost_job_push+0x1bc/0x200
[   12.566333]  panfrost_ioctl_submit+0x358/0x438
[   12.566726]  drm_ioctl_kernel+0xb8/0x170
[   12.567072]  drm_ioctl+0x214/0x450
[   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
[   12.567721]  invoke_syscall+0x40/0xf8
[   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.568463]  do_el0_svc+0x20/0x80
[   12.568755]  el0_svc+0x1c/0x50
[   12.569030]  el0t_64_sync_handler+0xa8/0xb0
[   12.569399]  el0t_64_sync+0x16c/0x170
[   12.569724] ---[ end trace ba74542f5124628a ]---
[   12.595086] ------------[ cut here ]------------
[   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
[   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.596934] Modules linked in:
[   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.599202] pc : dma_fence_release+0xac/0xe8
[   12.599584] lr : dma_fence_release+0xac/0xe8
[   12.599960] sp : ffff8000123ebb20
[   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.606542] Call trace:
[   12.606760]  dma_fence_release+0xac/0xe8
[   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.607517]  panfrost_job_push+0x1bc/0x200
[   12.607882]  panfrost_ioctl_submit+0x358/0x438
[   12.608274]  drm_ioctl_kernel+0xb8/0x170
[   12.608622]  drm_ioctl+0x214/0x450
[   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
[   12.609269]  invoke_syscall+0x40/0xf8
[   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.610011]  do_el0_svc+0x20/0x80
[   12.610304]  el0_svc+0x1c/0x50
[   12.610577]  el0t_64_sync_handler+0xa8/0xb0
[   12.610946]  el0t_64_sync+0x16c/0x170
[   12.611276] ---[ end trace ba74542f5124628b ]---
[   12.612869] ------------[ cut here ]------------
[   12.613288] refcount_t: saturated; leaking memory.
[   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
[   12.614476] Modules linked in:
[   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.616773] pc : refcount_warn_saturate+0xc0/0x140
[   12.617200] lr : refcount_warn_saturate+0xc0/0x140
[   12.617622] sp : ffff8000123eba60
[   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
[   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
[   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
[   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
[   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
[   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.624256] Call trace:
[   12.624474]  refcount_warn_saturate+0xc0/0x140
[   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
[   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
[   12.625811]  drm_atomic_helper_commit+0x80/0x360
[   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
[   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
[   12.627050]  drm_ioctl_kernel+0xb8/0x170
[   12.627397]  drm_ioctl+0x214/0x450
[   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
[   12.628046]  invoke_syscall+0x40/0xf8
[   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.628787]  do_el0_svc+0x20/0x80
[   12.629079]  el0_svc+0x1c/0x50
[   12.629354]  el0t_64_sync_handler+0xa8/0xb0
[   12.629723]  el0t_64_sync+0x16c/0x170
[   12.630048] ---[ end trace ba74542f5124628c ]---
[   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
[   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
[   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
[   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
[   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
[   13.296693] ------------[ cut here ]------------
[   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
[   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.298560] Modules linked in:
[   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.300830] pc : dma_fence_release+0xac/0xe8
[   13.301208] lr : dma_fence_release+0xac/0xe8
[   13.301585] sp : ffff8000123ebb20
[   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.308149] Call trace:
[   13.308367]  dma_fence_release+0xac/0xe8
[   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.309119]  panfrost_job_push+0x1bc/0x200
[   13.309483]  panfrost_ioctl_submit+0x358/0x438
[   13.309875]  drm_ioctl_kernel+0xb8/0x170
[   13.310221]  drm_ioctl+0x214/0x450
[   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
[   13.310868]  invoke_syscall+0x40/0xf8
[   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.311609]  do_el0_svc+0x20/0x80
[   13.311903]  el0_svc+0x1c/0x50
[   13.312177]  el0t_64_sync_handler+0xa8/0xb0
[   13.312545]  el0t_64_sync+0x16c/0x170
[   13.312869] ---[ end trace ba74542f5124628d ]---
[   13.340454] ------------[ cut here ]------------
[   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
[   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.342318] Modules linked in:
[   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.344584] pc : dma_fence_release+0xac/0xe8
[   13.344961] lr : dma_fence_release+0xac/0xe8
[   13.345338] sp : ffff8000123ebb20
[   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.351900] Call trace:
[   13.352116]  dma_fence_release+0xac/0xe8
[   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.352869]  panfrost_job_push+0x1bc/0x200
[   13.353232]  panfrost_ioctl_submit+0x358/0x438
[   13.353624]  drm_ioctl_kernel+0xb8/0x170
[   13.353971]  drm_ioctl+0x214/0x450
[   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
[   13.354616]  invoke_syscall+0x40/0xf8
[   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.355356]  do_el0_svc+0x20/0x80
[   13.355650]  el0_svc+0x1c/0x50
[   13.355925]  el0t_64_sync_handler+0xa8/0xb0
[   13.356293]  el0t_64_sync+0x16c/0x170
[   13.356618] ---[ end trace ba74542f5124628e ]---
[   13.379841] ------------[ cut here ]------------
[   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
[   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.381680] Modules linked in:
[   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.383937] pc : dma_fence_release+0xac/0xe8
[   13.384314] lr : dma_fence_release+0xac/0xe8
[   13.384690] sp : ffff8000123ebb20
[   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.391247] Call trace:
[   13.391464]  dma_fence_release+0xac/0xe8
[   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.392217]  panfrost_job_push+0x1bc/0x200
[   13.392581]  panfrost_ioctl_submit+0x358/0x438
[   13.392972]  drm_ioctl_kernel+0xb8/0x170
[   13.393319]  drm_ioctl+0x214/0x450
[   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
[   13.393967]  invoke_syscall+0x40/0xf8
[   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.394708]  do_el0_svc+0x20/0x80
[   13.395002]  el0_svc+0x1c/0x50
[   13.395275]  el0t_64_sync_handler+0xa8/0xb0
[   13.395643]  el0t_64_sync+0x16c/0x170
[   13.395968] ---[ end trace ba74542f5124628f ]---
[   13.398130] ------------[ cut here ]------------
[   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
[   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.400011] Modules linked in:
[   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.402269] pc : dma_fence_release+0xac/0xe8
[   13.402646] lr : dma_fence_release+0xac/0xe8
[   13.403024] sp : ffff8000123ebb20
[   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
[   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
[   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
[   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
[   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
[   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
[   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
[   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.409583] Call trace:
[   13.409800]  dma_fence_release+0xac/0xe8
[   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.410553]  panfrost_job_push+0x1bc/0x200
[   13.410917]  panfrost_ioctl_submit+0x358/0x438
[   13.411309]  drm_ioctl_kernel+0xb8/0x170
[   13.411656]  drm_ioctl+0x214/0x450
[   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
[   13.412303]  invoke_syscall+0x40/0xf8
[   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.413042]  do_el0_svc+0x20/0x80
[   13.413335]  el0_svc+0x1c/0x50
[   13.413607]  el0t_64_sync_handler+0xa8/0xb0
[   13.413976]  el0t_64_sync+0x16c/0x170
[   13.414298] ---[ end trace ba74542f51246290 ]---
[   13.430129] ------------[ cut here ]------------
[   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
[   13.430557] refcount_t: saturated; leaking memory.
[   13.431321] Mem abort info:
[   13.431324]   ESR = 0x96000044
[   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
[   13.431330]   SET = 0, FnV = 0
[   13.431333]   EA = 0, S1PTW = 0
[   13.431335]   FSC = 0x04: level 0 translation fault
[   13.431337] Data abort info:
[   13.431339]   ISV = 0, ISS = 0x00000044
[   13.431340]   CM = 0, WnR = 1
[   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
[   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
[   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
[   13.431359] Modules linked in:
[   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
[   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
[   13.432059] lr : dma_fence_signal+0x30/0x60
[   13.432327] Modules linked in:
[   13.432789] sp : ffff8000122b3b50
[   13.433057]
[   13.433331] x29: ffff8000122b3b50
[   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.434008]  x28: 0000000000000000
[   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.434601]  x27: ffff000003791880
[   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.435751]
[   13.435753] x26: ffff8000122b3d30
[   13.436237] pc : refcount_warn_saturate+0x6c/0x140
[   13.436504]  x25: 0000000000000001
[   13.437393] lr : refcount_warn_saturate+0x6c/0x140
[   13.437938]  x24: ffff000003791a10
[   13.438542] sp : ffff8000123ebb40
[   13.439042]
[   13.439767] x29: ffff8000123ebb40
[   13.440130] x23: 0000000000000000
[   13.440398]  x28: ffff8000123ebd58
[   13.440687]  x22: ffff000003505280
[   13.440819]  x27: 0000000000000000
[   13.441108]  x21: ffff8000122b3b88
[   13.441931]
[   13.442228]
[   13.442773] x26: 0000000000000001
[   13.443070] x20: ffff000004b051c0
[   13.443674]  x25: ffff000004b051c0
[   13.443806]  x19: ffff000004b051c0
[   13.444095]  x24: 0000000000000000
[   13.444513]  x18: 0000000000000000
[   13.444811]
[   13.445227]
[   13.445524] x23: 0000000000000000
[   13.445814] x17: 3837783028203032
[   13.445945]  x22: ffff000004b051c0
[   13.446236]  x16: 3139323835323120
[   13.446525]  x21: ffff000004d73100
[   13.446822]  x15: 00000205aa24947a
[   13.447120]
[   13.447417]
[   13.447715] x20: ffff000004b05400
[   13.447846] x14: 0000000000000326
[   13.447977]  x19: 00000000ffffffff
[   13.448266]  x13: 0000000000000000
[   13.448555]  x18: 0000000000000010
[   13.448851]  x12: 0000000000000000
[   13.449148]
[   13.449446]
[   13.449743] x17: 0000000000000000
[   13.449874] x11: 0000000000000001
[   13.450006]  x16: 0000000000000000
[   13.450296]  x10: ffff8000122b3d48
[   13.450585]  x15: 000060978994e822
[   13.450882]  x9 : 00000000000019e0
[   13.451179]
[   13.451477]
[   13.451774] x14: 00000000000000b6
[   13.451905] x8 : ffff8000122b3d78
[   13.452037]  x13: 00000000000000b6
[   13.452326]  x7 : 0000000000000000
[   13.452614]  x12: 0000000000000000
[   13.452912]  x6 : 000000001fcf847e
[   13.453209]
[   13.453506]
[   13.453803] x11: 0000000000000001
[   13.453934] x5 : 00ffffffffffffff
[   13.454066]  x10: 00000000000009a0
[   13.454356]  x4 : 0015ef3c03fd7c00
[   13.454643]  x9 : ffff8000123eb8c0
[   13.454941]  x3 : 0000000000000018
[   13.455238]
[   13.455536]
[   13.455833] x8 : ffff0000050e3340
[   13.455965] x2 : ffff000004b051f0
[   13.456096]  x7 : ffff00007fb92a80
[   13.456386]  x1 : 000000032053be4d
[   13.456676]  x6 : 0000000000000115
[   13.456973]  x0 : 0000000000000000
[   13.457271]
[   13.457568]
[   13.457866] x5 : 0000000000000000
[   13.457998] Call trace:
[   13.458128]  x4 : ffff00007fb8c9a0
[   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
[   13.458707]  x3 : ffff00007fb8f950
[   13.459005]  dma_fence_signal+0x30/0x60
[   13.459302]
[   13.459600]  drm_sched_fence_finished+0x10/0x18
[   13.459897] x2 : ffff00007fb8c9a0
[   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
[   13.460159]  x1 : ea6e0584a53f2200
[   13.460449]  drm_sched_job_done_cb+0x10/0x18
[   13.460738]  x0 : 0000000000000000
[   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   13.461333]
[   13.461631]  dma_fence_signal_locked+0x20/0x30
[   13.461929] Call trace:
[   13.462060]  panfrost_job_handle_done+0x34/0x50
[   13.462192]  refcount_warn_saturate+0x6c/0x140
[   13.462481]  panfrost_job_handle_irqs+0x358/0x570
[   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
[   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
[   13.463462]  panfrost_job_push+0x1bc/0x200
[   13.463760]  irq_thread_fn+0x28/0x98
[   13.464094]  panfrost_ioctl_submit+0x358/0x438
[   13.464225]  irq_thread+0x12c/0x230
[   13.464620]  drm_ioctl_kernel+0xb8/0x170
[   13.464909]  kthread+0x174/0x180
[   13.465319]  drm_ioctl+0x214/0x450
[   13.465617]  ret_from_fork+0x10/0x20
[   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
[   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
[   13.466756]  invoke_syscall+0x40/0xf8
[   13.466891] ---[ end trace ba74542f51246291 ]---
[   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
[   13.467883]  do_el0_svc+0x20/0x80
[   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
[   13.468678]  el0_svc+0x1c/0x50
[   13.475908]  el0t_64_sync_handler+0xa8/0xb0
[   13.476277]  el0t_64_sync+0x16c/0x170
[   13.476601] ---[ end trace ba74542f51246292 ]---
[   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
[   14.462959] sched: RT throttling activated
[   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
[   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
[   34.476312]  (t=5250 jiffies g=505 q=301)
[   34.476667] Task dump for CPU 3:
[   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   34.477820] Call trace:
[   34.478035]  dump_backtrace+0x0/0x198
[   34.478365]  show_stack+0x14/0x60
[   34.478659]  sched_show_task+0x148/0x168
[   34.479008]  dump_cpu_task+0x40/0x4c
[   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
[   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
[   34.480072]  update_process_times+0x94/0xd8
[   34.480440]  tick_sched_handle.isra.0+0x30/0x50
[   34.480840]  tick_sched_timer+0x48/0x98
[   34.481178]  __hrtimer_run_queues+0x110/0x1b0
[   34.481562]  hrtimer_interrupt+0xe4/0x238
[   34.481917]  arch_timer_handler_phys+0x28/0x40
[   34.482310]  handle_percpu_devid_irq+0x80/0x130
[   34.482710]  generic_handle_domain_irq+0x38/0x58
[   34.483116]  gic_handle_irq+0x4c/0x110
[   34.483450]  call_on_irq_stack+0x28/0x3c
[   34.483798]  do_interrupt_handler+0x78/0x80
[   34.484166]  el1_interrupt+0x34/0x80
[   34.484484]  el1h_64_irq_handler+0x14/0x20
[   34.484846]  el1h_64_irq+0x74/0x78
[   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
[   34.485568]  _raw_spin_lock+0x5c/0x68
[   34.485895]  panfrost_job_run+0x24c/0x3f8
[   34.486250]  drm_sched_main+0x130/0x390
[   34.486591]  kthread+0x174/0x180
[   34.486878]  ret_from_fork+0x10/0x20
[   35.810989] vcc3v3_lcd1_n: disabling
[   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
[   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
[   97.488326]  (t=21003 jiffies g=505 q=379)
[   97.488687] Task dump for CPU 3:
[   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   97.489842] Call trace:
[   97.490056]  dump_backtrace+0x0/0x198
[   97.490388]  show_stack+0x14/0x60
[   97.490682]  sched_show_task+0x148/0x168
[   97.491030]  dump_cpu_task+0x40/0x4c
[   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
[   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
[   97.492095]  update_process_times+0x94/0xd8
[   97.492463]  tick_sched_handle.isra.0+0x30/0x50
[   97.492862]  tick_sched_timer+0x48/0x98
[   97.493200]  __hrtimer_run_queues+0x110/0x1b0
[   97.493582]  hrtimer_interrupt+0xe4/0x238
[   97.493937]  arch_timer_handler_phys+0x28/0x40
[   97.494330]  handle_percpu_devid_irq+0x80/0x130
[   97.494730]  generic_handle_domain_irq+0x38/0x58
[   97.495136]  gic_handle_irq+0x4c/0x110
[   97.495473]  call_on_irq_stack+0x28/0x3c
[   97.495818]  do_interrupt_handler+0x78/0x80
[   97.496186]  el1_interrupt+0x34/0x80
[   97.496503]  el1h_64_irq_handler+0x14/0x20
[   97.496865]  el1h_64_irq+0x74/0x78
[   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
[   97.497588]  _raw_spin_lock+0x5c/0x68
[   97.497912]  panfrost_job_run+0x24c/0x3f8
[   97.498268]  drm_sched_main+0x130/0x390
[   97.498607]  kthread+0x174/0x180
[   97.498895]  ret_from_fork+0x10/0x20
[  140.108141] random: crng init done
[  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
[  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
[  160.500322]  (t=36756 jiffies g=505 q=482)
[  160.500684] Task dump for CPU 3:
[  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  160.501837] Call trace:
[  160.502054]  dump_backtrace+0x0/0x198
[  160.502384]  show_stack+0x14/0x60
[  160.502679]  sched_show_task+0x148/0x168
[  160.503027]  dump_cpu_task+0x40/0x4c
[  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
[  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
[  160.504091]  update_process_times+0x94/0xd8
[  160.504458]  tick_sched_handle.isra.0+0x30/0x50
[  160.504858]  tick_sched_timer+0x48/0x98
[  160.505195]  __hrtimer_run_queues+0x110/0x1b0
[  160.505580]  hrtimer_interrupt+0xe4/0x238
[  160.505934]  arch_timer_handler_phys+0x28/0x40
[  160.506327]  handle_percpu_devid_irq+0x80/0x130
[  160.506727]  generic_handle_domain_irq+0x38/0x58
[  160.507133]  gic_handle_irq+0x4c/0x110
[  160.507467]  call_on_irq_stack+0x28/0x3c
[  160.507813]  do_interrupt_handler+0x78/0x80
[  160.508181]  el1_interrupt+0x34/0x80
[  160.508497]  el1h_64_irq_handler+0x14/0x20
[  160.508858]  el1h_64_irq+0x74/0x78
[  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
[  160.509579]  _raw_spin_lock+0x5c/0x68
[  160.509903]  panfrost_job_run+0x24c/0x3f8
[  160.510259]  drm_sched_main+0x130/0x390
[  160.510599]  kthread+0x174/0x180
[  160.510886]  ret_from_fork+0x10/0x20
[  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
[  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
[  223.512325]  (t=52509 jiffies g=505 q=536)
[  223.512688] Task dump for CPU 3:
[  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  223.513842] Call trace:
[  223.514056]  dump_backtrace+0x0/0x198
[  223.514387]  show_stack+0x14/0x60
[  223.514681]  sched_show_task+0x148/0x168
[  223.515029]  dump_cpu_task+0x40/0x4c
[  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
[  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
[  223.516094]  update_process_times+0x94/0xd8
[  223.516462]  tick_sched_handle.isra.0+0x30/0x50
[  223.516860]  tick_sched_timer+0x48/0x98
[  223.517198]  __hrtimer_run_queues+0x110/0x1b0
[  223.517582]  hrtimer_interrupt+0xe4/0x238
[  223.517935]  arch_timer_handler_phys+0x28/0x40
[  223.518327]  handle_percpu_devid_irq+0x80/0x130
[  223.518727]  generic_handle_domain_irq+0x38/0x58
[  223.519133]  gic_handle_irq+0x4c/0x110
[  223.519466]  call_on_irq_stack+0x28/0x3c
[  223.519812]  do_interrupt_handler+0x78/0x80
[  223.520181]  el1_interrupt+0x34/0x80
[  223.520498]  el1h_64_irq_handler+0x14/0x20
[  223.520860]  el1h_64_irq+0x74/0x78
[  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
[  223.521584]  _raw_spin_lock+0x5c/0x68
[  223.521908]  panfrost_job_run+0x24c/0x3f8
[  223.522264]  drm_sched_main+0x130/0x390
[  223.522605]  kthread+0x174/0x180
[  223.522892]  ret_from_fork+0x10/0x20
[  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
[  286.524325]  (t=68262 jiffies g=505 q=612)
[  286.524687] Task dump for CPU 3:
[  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  286.525840] Call trace:
[  286.526057]  dump_backtrace+0x0/0x198
[  286.526387]  show_stack+0x14/0x60
[  286.526681]  sched_show_task+0x148/0x168
[  286.527029]  dump_cpu_task+0x40/0x4c
[  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
[  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
[  286.528092]  update_process_times+0x94/0xd8
[  286.528459]  tick_sched_handle.isra.0+0x30/0x50
[  286.528859]  tick_sched_timer+0x48/0x98
[  286.529197]  __hrtimer_run_queues+0x110/0x1b0
[  286.529579]  hrtimer_interrupt+0xe4/0x238
[  286.529933]  arch_timer_handler_phys+0x28/0x40
[  286.530326]  handle_percpu_devid_irq+0x80/0x130
[  286.530726]  generic_handle_domain_irq+0x38/0x58
[  286.531132]  gic_handle_irq+0x4c/0x110
[  286.531466]  call_on_irq_stack+0x28/0x3c
[  286.531812]  do_interrupt_handler+0x78/0x80
[  286.532180]  el1_interrupt+0x34/0x80
[  286.532496]  el1h_64_irq_handler+0x14/0x20
[  286.532857]  el1h_64_irq+0x74/0x78
[  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
[  286.533580]  _raw_spin_lock+0x5c/0x68
[  286.533904]  panfrost_job_run+0x24c/0x3f8
[  286.534259]  drm_sched_main+0x130/0x390
[  286.534600]  kthread+0x174/0x180
[  286.534887]  ret_from_fork+0x10/0x20
[  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
[  349.536324]  (t=84015 jiffies g=505 q=716)
[  349.536687] Task dump for CPU 3:
[  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  349.537839] Call trace:
[  349.538055]  dump_backtrace+0x0/0x198
[  349.538387]  show_stack+0x14/0x60
[  349.538681]  sched_show_task+0x148/0x168
[  349.539029]  dump_cpu_task+0x40/0x4c
[  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
[  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
[  349.540094]  update_process_times+0x94/0xd8
[  349.540462]  tick_sched_handle.isra.0+0x30/0x50
[  349.540862]  tick_sched_timer+0x48/0x98
[  349.541201]  __hrtimer_run_queues+0x110/0x1b0
[  349.541585]  hrtimer_interrupt+0xe4/0x238
[  349.541937]  arch_timer_handler_phys+0x28/0x40
[  349.542330]  handle_percpu_devid_irq+0x80/0x130
[  349.542730]  generic_handle_domain_irq+0x38/0x58
[  349.543136]  gic_handle_irq+0x4c/0x110
[  349.543469]  call_on_irq_stack+0x28/0x3c
[  349.543815]  do_interrupt_handler+0x78/0x80
[  349.544183]  el1_interrupt+0x34/0x80
[  349.544500]  el1h_64_irq_handler+0x14/0x20
[  349.544862]  el1h_64_irq+0x74/0x78
[  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
[  349.545586]  _raw_spin_lock+0x5c/0x68
[  349.545910]  panfrost_job_run+0x24c/0x3f8
[  349.546265]  drm_sched_main+0x130/0x390
[  349.546604]  kthread+0x174/0x180
[  349.546891]  ret_from_fork+0x10/0x20
[  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
[  412.548325]  (t=99768 jiffies g=505 q=784)
[  412.548686] Task dump for CPU 3:
[  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  412.549841] Call trace:
[  412.550058]  dump_backtrace+0x0/0x198
[  412.550389]  show_stack+0x14/0x60
[  412.550684]  sched_show_task+0x148/0x168
[  412.551031]  dump_cpu_task+0x40/0x4c
[  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
[  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
[  412.552095]  update_process_times+0x94/0xd8
[  412.552463]  tick_sched_handle.isra.0+0x30/0x50
[  412.552863]  tick_sched_timer+0x48/0x98
[  412.553201]  __hrtimer_run_queues+0x110/0x1b0
[  412.553583]  hrtimer_interrupt+0xe4/0x238
[  412.553936]  arch_timer_handler_phys+0x28/0x40
[  412.554331]  handle_percpu_devid_irq+0x80/0x130
[  412.554732]  generic_handle_domain_irq+0x38/0x58
[  412.555139]  gic_handle_irq+0x4c/0x110
[  412.555471]  call_on_irq_stack+0x28/0x3c
[  412.555817]  do_interrupt_handler+0x78/0x80
[  412.556186]  el1_interrupt+0x34/0x80
[  412.556502]  el1h_64_irq_handler+0x14/0x20
[  412.556864]  el1h_64_irq+0x74/0x78
[  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
[  412.557587]  _raw_spin_lock+0x5c/0x68
[  412.557912]  panfrost_job_run+0x24c/0x3f8
[  412.558267]  drm_sched_main+0x130/0x390
[  412.558607]  kthread+0x174/0x180
[  412.558894]  ret_from_fork+0x10/0x20
[  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
[  475.560329]  (t=115521 jiffies g=505 q=857)
[  475.560697] Task dump for CPU 3:
[  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  475.561850] Call trace:
[  475.562067]  dump_backtrace+0x0/0x198
[  475.562398]  show_stack+0x14/0x60
[  475.562693]  sched_show_task+0x148/0x168
[  475.563041]  dump_cpu_task+0x40/0x4c
[  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
[  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
[  475.564104]  update_process_times+0x94/0xd8
[  475.564472]  tick_sched_handle.isra.0+0x30/0x50
[  475.564871]  tick_sched_timer+0x48/0x98
[  475.565209]  __hrtimer_run_queues+0x110/0x1b0
[  475.565592]  hrtimer_interrupt+0xe4/0x238
[  475.565946]  arch_timer_handler_phys+0x28/0x40
[  475.566339]  handle_percpu_devid_irq+0x80/0x130
[  475.566739]  generic_handle_domain_irq+0x38/0x58
[  475.567145]  gic_handle_irq+0x4c/0x110
[  475.567477]  call_on_irq_stack+0x28/0x3c
[  475.567822]  do_interrupt_handler+0x78/0x80
[  475.568190]  el1_interrupt+0x34/0x80
[  475.568507]  el1h_64_irq_handler+0x14/0x20
[  475.568869]  el1h_64_irq+0x74/0x78
[  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
[  475.569593]  _raw_spin_lock+0x5c/0x68
[  475.569915]  panfrost_job_run+0x24c/0x3f8
[  475.570270]  drm_sched_main+0x130/0x390
[  475.570610]  kthread+0x174/0x180
[  475.570897]  ret_from_fork+0x10/0x20
[  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
[  538.572333]  (t=131274 jiffies g=505 q=947)
[  538.572701] Task dump for CPU 3:
[  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  538.573854] Call trace:
[  538.574070]  dump_backtrace+0x0/0x198
[  538.574402]  show_stack+0x14/0x60
[  538.574696]  sched_show_task+0x148/0x168
[  538.575044]  dump_cpu_task+0x40/0x4c
[  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
[  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
[  538.576109]  update_process_times+0x94/0xd8
[  538.576477]  tick_sched_handle.isra.0+0x30/0x50
[  538.576878]  tick_sched_timer+0x48/0x98
[  538.577216]  __hrtimer_run_queues+0x110/0x1b0
[  538.577599]  hrtimer_interrupt+0xe4/0x238
[  538.577953]  arch_timer_handler_phys+0x28/0x40
[  538.578346]  handle_percpu_devid_irq+0x80/0x130
[  538.578745]  generic_handle_domain_irq+0x38/0x58
[  538.579151]  gic_handle_irq+0x4c/0x110
[  538.579487]  call_on_irq_stack+0x28/0x3c
[  538.579833]  do_interrupt_handler+0x78/0x80
[  538.580201]  el1_interrupt+0x34/0x80
[  538.580518]  el1h_64_irq_handler+0x14/0x20
[  538.580880]  el1h_64_irq+0x74/0x78
[  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
[  538.581603]  _raw_spin_lock+0x5c/0x68
[  538.581927]  panfrost_job_run+0x24c/0x3f8
[  538.582283]  drm_sched_main+0x130/0x390
[  538.582623]  kthread+0x174/0x180
[  538.582910]  ret_from_fork+0x10/0x20
[  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
[  601.584330]  (t=147027 jiffies g=505 q=1018)
[  601.584706] Task dump for CPU 3:
[  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  601.585859] Call trace:
[  601.586075]  dump_backtrace+0x0/0x198
[  601.586406]  show_stack+0x14/0x60
[  601.586701]  sched_show_task+0x148/0x168
[  601.587048]  dump_cpu_task+0x40/0x4c
[  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
[  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
[  601.588112]  update_process_times+0x94/0xd8
[  601.588480]  tick_sched_handle.isra.0+0x30/0x50
[  601.588880]  tick_sched_timer+0x48/0x98
[  601.589218]  __hrtimer_run_queues+0x110/0x1b0
[  601.589602]  hrtimer_interrupt+0xe4/0x238
[  601.589956]  arch_timer_handler_phys+0x28/0x40
[  601.590348]  handle_percpu_devid_irq+0x80/0x130
[  601.590747]  generic_handle_domain_irq+0x38/0x58
[  601.591153]  gic_handle_irq+0x4c/0x110
[  601.591486]  call_on_irq_stack+0x28/0x3c
[  601.591832]  do_interrupt_handler+0x78/0x80
[  601.592201]  el1_interrupt+0x34/0x80
[  601.592517]  el1h_64_irq_handler+0x14/0x20
[  601.592879]  el1h_64_irq+0x74/0x78
[  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
[  601.593603]  _raw_spin_lock+0x5c/0x68
[  601.593927]  panfrost_job_run+0x24c/0x3f8
[  601.594283]  drm_sched_main+0x130/0x390
[  601.594623]  kthread+0x174/0x180
[  601.594910]  ret_from_fork+0x10/0x20
[  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
[  664.596333]  (t=162780 jiffies g=505 q=1086)
[  664.596709] Task dump for CPU 3:
[  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  664.597862] Call trace:
[  664.598078]  dump_backtrace+0x0/0x198
[  664.598409]  show_stack+0x14/0x60
[  664.598704]  sched_show_task+0x148/0x168
[  664.599052]  dump_cpu_task+0x40/0x4c
[  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
[  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
[  664.600114]  update_process_times+0x94/0xd8
[  664.600482]  tick_sched_handle.isra.0+0x30/0x50
[  664.600882]  tick_sched_timer+0x48/0x98
[  664.601220]  __hrtimer_run_queues+0x110/0x1b0
[  664.601604]  hrtimer_interrupt+0xe4/0x238
[  664.601958]  arch_timer_handler_phys+0x28/0x40
[  664.602352]  handle_percpu_devid_irq+0x80/0x130
[  664.602751]  generic_handle_domain_irq+0x38/0x58
[  664.603158]  gic_handle_irq+0x4c/0x110
[  664.603491]  call_on_irq_stack+0x28/0x3c
[  664.603838]  do_interrupt_handler+0x78/0x80
[  664.604206]  el1_interrupt+0x34/0x80
[  664.604522]  el1h_64_irq_handler+0x14/0x20
[  664.604883]  el1h_64_irq+0x74/0x78
[  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
[  664.605609]  _raw_spin_lock+0x5c/0x68
[  664.605934]  panfrost_job_run+0x24c/0x3f8
[  664.606290]  drm_sched_main+0x130/0x390
[  664.606631]  kthread+0x174/0x180
[  664.606918]  ret_from_fork+0x10/0x20
[  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
[  727.608331]  (t=178533 jiffies g=505 q=1152)
[  727.608706] Task dump for CPU 3:
[  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  727.609858] Call trace:
[  727.610074]  dump_backtrace+0x0/0x198
[  727.610403]  show_stack+0x14/0x60
[  727.610698]  sched_show_task+0x148/0x168
[  727.611047]  dump_cpu_task+0x40/0x4c
[  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
[  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
[  727.612112]  update_process_times+0x94/0xd8
[  727.612479]  tick_sched_handle.isra.0+0x30/0x50
[  727.612879]  tick_sched_timer+0x48/0x98
[  727.613216]  __hrtimer_run_queues+0x110/0x1b0
[  727.613601]  hrtimer_interrupt+0xe4/0x238
[  727.613955]  arch_timer_handler_phys+0x28/0x40
[  727.614348]  handle_percpu_devid_irq+0x80/0x130
[  727.614748]  generic_handle_domain_irq+0x38/0x58
[  727.615154]  gic_handle_irq+0x4c/0x110
[  727.615485]  call_on_irq_stack+0x28/0x3c
[  727.615832]  do_interrupt_handler+0x78/0x80
[  727.616200]  el1_interrupt+0x34/0x80
[  727.616517]  el1h_64_irq_handler+0x14/0x20
[  727.616879]  el1h_64_irq+0x74/0x78
[  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
[  727.617602]  _raw_spin_lock+0x5c/0x68
[  727.617926]  panfrost_job_run+0x24c/0x3f8
[  727.618282]  drm_sched_main+0x130/0x390
[  727.618621]  kthread+0x174/0x180
[  727.618908]  ret_from_fork+0x10/0x20
[  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
[  790.620331]  (t=194286 jiffies g=505 q=1219)
[  790.620708] Task dump for CPU 3:
[  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  790.621860] Call trace:
[  790.622075]  dump_backtrace+0x0/0x198
[  790.622405]  show_stack+0x14/0x60
[  790.622699]  sched_show_task+0x148/0x168
[  790.623049]  dump_cpu_task+0x40/0x4c
[  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
[  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
[  790.624113]  update_process_times+0x94/0xd8
[  790.624481]  tick_sched_handle.isra.0+0x30/0x50
[  790.624880]  tick_sched_timer+0x48/0x98
[  790.625218]  __hrtimer_run_queues+0x110/0x1b0
[  790.625603]  hrtimer_interrupt+0xe4/0x238
[  790.625957]  arch_timer_handler_phys+0x28/0x40
[  790.626350]  handle_percpu_devid_irq+0x80/0x130
[  790.626752]  generic_handle_domain_irq+0x38/0x58
[  790.627158]  gic_handle_irq+0x4c/0x110
[  790.627493]  call_on_irq_stack+0x28/0x3c
[  790.627839]  do_interrupt_handler+0x78/0x80
[  790.628208]  el1_interrupt+0x34/0x80
[  790.628526]  el1h_64_irq_handler+0x14/0x20
[  790.628888]  el1h_64_irq+0x74/0x78
[  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
[  790.629613]  _raw_spin_lock+0x5c/0x68
[  790.629937]  panfrost_job_run+0x24c/0x3f8
[  790.630292]  drm_sched_main+0x130/0x390
[  790.630632]  kthread+0x174/0x180
[  790.630919]  ret_from_fork+0x10/0x20
[  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
[  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
[  853.632340]  (t=210039 jiffies g=505 q=1318)
[  853.632716] Task dump for CPU 3:
[  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  853.633869] Call trace:
[  853.634084]  dump_backtrace+0x0/0x198
[  853.634418]  show_stack+0x14/0x60
[  853.634712]  sched_show_task+0x148/0x168
[  853.635061]  dump_cpu_task+0x40/0x4c
[  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
[  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
[  853.636124]  update_process_times+0x94/0xd8
[  853.636492]  tick_sched_handle.isra.0+0x30/0x50
[  853.636892]  tick_sched_timer+0x48/0x98
[  853.637230]  __hrtimer_run_queues+0x110/0x1b0
[  853.637613]  hrtimer_interrupt+0xe4/0x238
[  853.637965]  arch_timer_handler_phys+0x28/0x40
[  853.638358]  handle_percpu_devid_irq+0x80/0x130
[  853.638760]  generic_handle_domain_irq+0x38/0x58
[  853.639166]  gic_handle_irq+0x4c/0x110
[  853.639499]  call_on_irq_stack+0x28/0x3c
[  853.639845]  do_interrupt_handler+0x78/0x80
[  853.640213]  el1_interrupt+0x34/0x80
[  853.640530]  el1h_64_irq_handler+0x14/0x20
[  853.640892]  el1h_64_irq+0x74/0x78
[  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
[  853.641616]  _raw_spin_lock+0x5c/0x68
[  853.641940]  panfrost_job_run+0x24c/0x3f8
[  853.642295]  drm_sched_main+0x130/0x390
[  853.642634]  kthread+0x174/0x180
[  853.642921]  ret_from_fork+0x10/0x20
[  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
[  916.644339]  (t=225792 jiffies g=505 q=1390)
[  916.644715] Task dump for CPU 3:
[  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  916.645868] Call trace:
[  916.646083]  dump_backtrace+0x0/0x198
[  916.646414]  show_stack+0x14/0x60
[  916.646708]  sched_show_task+0x148/0x168
[  916.647055]  dump_cpu_task+0x40/0x4c
[  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
[  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
[  916.648119]  update_process_times+0x94/0xd8
[  916.648488]  tick_sched_handle.isra.0+0x30/0x50
[  916.648887]  tick_sched_timer+0x48/0x98
[  916.649225]  __hrtimer_run_queues+0x110/0x1b0
[  916.649608]  hrtimer_interrupt+0xe4/0x238
[  916.649962]  arch_timer_handler_phys+0x28/0x40
[  916.650355]  handle_percpu_devid_irq+0x80/0x130
[  916.650756]  generic_handle_domain_irq+0x38/0x58
[  916.651162]  gic_handle_irq+0x4c/0x110
[  916.651495]  call_on_irq_stack+0x28/0x3c
[  916.651842]  do_interrupt_handler+0x78/0x80
[  916.652210]  el1_interrupt+0x34/0x80
[  916.652527]  el1h_64_irq_handler+0x14/0x20
[  916.652889]  el1h_64_irq+0x74/0x78
[  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
[  916.653614]  _raw_spin_lock+0x5c/0x68
[  916.653937]  panfrost_job_run+0x24c/0x3f8
[  916.654293]  drm_sched_main+0x130/0x390
[  916.654632]  kthread+0x174/0x180
[  916.654920]  ret_from_fork+0x10/0x20

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 14:03     ` Sascha Hauer
  0 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-15 14:03 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media

Hi,

On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>  1 file changed, 6 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 042c16b5d54a..5bc5f775abe1 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>  					    struct drm_gem_object *obj,
>  					    bool write)
>  {
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_sched_job_add_dependency(job, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
>  
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_sched_job_add_dependency(job, fences[i]);
> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> +		ret = drm_sched_job_add_dependency(job, fence);
>  		if (ret)
> -			break;
> +			return ret;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>  

This patch lets the panfrost driver explode on v5.16-rc1 with the
following. I didn't bisect it, but it goes away when I revert this
patch. I only started weston, nothing more.

Any idea what goes wrong here?

Sascha

[   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
[   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.514056] Modules linked in:
[   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.514621] ------------[ cut here ]------------
[   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.515049] pc : dma_fence_release+0xac/0xe8
[   12.515056] lr : dma_fence_release+0xac/0xe8
[   12.515061] sp : ffff8000123ebb20
[   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
[   12.515518] refcount_t: addition on 0; use-after-free.
[   12.516015]  x27: 0000000000000000
[   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
[   12.516992] x26: 0000000000000001
[   12.517366] Modules linked in:
[   12.517654]  x25: ffff000004b051c0
[   12.518108]
[   12.518555]  x24: 0000000000000000
[   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.519576]
[   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.520133] x23: 0000000000000000
[   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.520559]  x22: ffff800010d41b78
[   12.520856] pc : refcount_warn_saturate+0x98/0x140
[   12.521625]  x21: ffff000004b05050
[   12.521755] lr : refcount_warn_saturate+0x98/0x140
[   12.522299]
[   12.522588] sp : ffff8000122b3bc0
[   12.523192] x20: ffff000004b05040
[   12.523489] x29: ffff8000122b3bc0
[   12.523906]  x19: ffff000004b05078
[   12.524203]  x28: 0000000000000000
[   12.524620]  x18: 0000000000000010
[   12.524751]  x27: ffff000003791880
[   12.525040]
[   12.525329]
[   12.525618] x17: 0000000000000000
[   12.525915] x26: ffff8000122b3d30
[   12.526212]  x16: 0000000000000000
[   12.526509]  x25: 0000000000000001
[   12.526806]  x15: ffff0000050e2dc0
[   12.526937]  x24: ffff000003791a10
[   12.527067]
[   12.527357]
[   12.527646] x14: 00000000000001b5
[   12.527942] x23: 0000000000000000
[   12.528240]  x13: ffff0000050e2dc0
[   12.528536]  x22: ffff000003505280
[   12.528833]  x12: 00000000ffffffea
[   12.528964]  x21: ffff000003a2a220
[   12.529095]
[   12.529384]
[   12.529673] x11: ffff800011761ec8
[   12.529970] x20: ffff000004b05078
[   12.530267]  x10: ffff8000115e1e88
[   12.530564]  x19: ffff000004b05000
[   12.530861]  x9 : ffff8000115e1ee0
[   12.530992]  x18: 0000000000000010
[   12.531123]
[   12.531412]
[   12.531701] x8 : 000000000017ffe8
[   12.531998] x17: 0000000000500600
[   12.532294]  x7 : c0000000fffeffff
[   12.532591]  x16: 0000000000000000
[   12.532888]  x6 : 0000000000000001
[   12.533019]  x15: ffff000003505700
[   12.533150]
[   12.533439]
[   12.533728] x5 : ffff00007fb8c9a0
[   12.534025] x14: 0000000000000000
[   12.534322]  x4 : 0000000000000000
[   12.534619]  x13: 292d2d3d45505954
[   12.534914]  x3 : 0000000000000001
[   12.535045]  x12: 4220534253532d20
[   12.535176]
[   12.535465]
[   12.535754] x2 : ffff00007fb8c9a8
[   12.536051] x11: 5449442d204f4354
[   12.536347]  x1 : ea6e0584a53f2200
[   12.536643]  x10: 2d204f41552d204e
[   12.536941]  x0 : 0000000000000000
[   12.537073]  x9 : 4e41502b20666961
[   12.537203]
[   12.537492]
[   12.537782] Call trace:
[   12.538078] x8 : 642076635a6e2820
[   12.538377]  dma_fence_release+0xac/0xe8
[   12.538671]  x7 : 205d343430353135
[   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.539098]  x6 : 352e32312020205b
[   12.539230]  panfrost_job_push+0x1bc/0x200
[   12.539442]
[   12.539732]  panfrost_ioctl_submit+0x358/0x438
[   12.540073] x5 : ffff00007fb539a0
[   12.540370]  drm_ioctl_kernel+0xb8/0x170
[   12.540771]  x4 : 0000000000000000
[   12.541069]  drm_ioctl+0x214/0x450
[   12.541424]  x3 : 0000000000000001
[   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
[   12.541943]
[   12.542233]  invoke_syscall+0x40/0xf8
[   12.542573] x2 : ffff00007fb539a8
[   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.543167]  x1 : 0ac4fb7a0680bb00
[   12.543465]  do_el0_svc+0x20/0x80
[   12.543805]  x0 : 0000000000000000
[   12.543936]  el0_svc+0x1c/0x50
[   12.544255]
[   12.544544]  el0t_64_sync_handler+0xa8/0xb0
[   12.544955] Call trace:
[   12.545250]  el0t_64_sync+0x16c/0x170
[   12.545540]  refcount_warn_saturate+0x98/0x140
[   12.545837] ---[ end trace ba74542f51246288 ]---
[   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
[   12.546285] ------------[ cut here ]------------
[   12.546598]  drm_sched_job_done_cb+0x10/0x18
[   12.546813] refcount_t: underflow; use-after-free.
[   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
[   12.547920]  dma_fence_signal_locked+0x20/0x30
[   12.548336] Modules linked in:
[   12.548737]  panfrost_job_handle_done+0x34/0x50
[   12.549110]
[   12.549525]  panfrost_job_handle_irqs+0x358/0x570
[   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
[   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.551373]  irq_thread_fn+0x28/0x98
[   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.551899]  irq_thread+0x12c/0x230
[   12.552309] pc : refcount_warn_saturate+0xec/0x140
[   12.553131]  kthread+0x174/0x180
[   12.553578] lr : refcount_warn_saturate+0xec/0x140
[   12.554121]  ret_from_fork+0x10/0x20
[   12.554432] sp : ffff8000123ebaa0
[   12.555038] ---[ end trace ba74542f51246289 ]---
[   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
[   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
[   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
[   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
[   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
[   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.563733] Call trace:
[   12.563950]  refcount_warn_saturate+0xec/0x140
[   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
[   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.565216]  dma_fence_release+0xd4/0xe8
[   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.565970]  panfrost_job_push+0x1bc/0x200
[   12.566333]  panfrost_ioctl_submit+0x358/0x438
[   12.566726]  drm_ioctl_kernel+0xb8/0x170
[   12.567072]  drm_ioctl+0x214/0x450
[   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
[   12.567721]  invoke_syscall+0x40/0xf8
[   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.568463]  do_el0_svc+0x20/0x80
[   12.568755]  el0_svc+0x1c/0x50
[   12.569030]  el0t_64_sync_handler+0xa8/0xb0
[   12.569399]  el0t_64_sync+0x16c/0x170
[   12.569724] ---[ end trace ba74542f5124628a ]---
[   12.595086] ------------[ cut here ]------------
[   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
[   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.596934] Modules linked in:
[   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.599202] pc : dma_fence_release+0xac/0xe8
[   12.599584] lr : dma_fence_release+0xac/0xe8
[   12.599960] sp : ffff8000123ebb20
[   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.606542] Call trace:
[   12.606760]  dma_fence_release+0xac/0xe8
[   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.607517]  panfrost_job_push+0x1bc/0x200
[   12.607882]  panfrost_ioctl_submit+0x358/0x438
[   12.608274]  drm_ioctl_kernel+0xb8/0x170
[   12.608622]  drm_ioctl+0x214/0x450
[   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
[   12.609269]  invoke_syscall+0x40/0xf8
[   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.610011]  do_el0_svc+0x20/0x80
[   12.610304]  el0_svc+0x1c/0x50
[   12.610577]  el0t_64_sync_handler+0xa8/0xb0
[   12.610946]  el0t_64_sync+0x16c/0x170
[   12.611276] ---[ end trace ba74542f5124628b ]---
[   12.612869] ------------[ cut here ]------------
[   12.613288] refcount_t: saturated; leaking memory.
[   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
[   12.614476] Modules linked in:
[   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.616773] pc : refcount_warn_saturate+0xc0/0x140
[   12.617200] lr : refcount_warn_saturate+0xc0/0x140
[   12.617622] sp : ffff8000123eba60
[   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
[   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
[   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
[   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
[   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
[   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.624256] Call trace:
[   12.624474]  refcount_warn_saturate+0xc0/0x140
[   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
[   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
[   12.625811]  drm_atomic_helper_commit+0x80/0x360
[   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
[   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
[   12.627050]  drm_ioctl_kernel+0xb8/0x170
[   12.627397]  drm_ioctl+0x214/0x450
[   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
[   12.628046]  invoke_syscall+0x40/0xf8
[   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.628787]  do_el0_svc+0x20/0x80
[   12.629079]  el0_svc+0x1c/0x50
[   12.629354]  el0t_64_sync_handler+0xa8/0xb0
[   12.629723]  el0t_64_sync+0x16c/0x170
[   12.630048] ---[ end trace ba74542f5124628c ]---
[   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
[   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
[   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
[   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
[   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
[   13.296693] ------------[ cut here ]------------
[   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
[   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.298560] Modules linked in:
[   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.300830] pc : dma_fence_release+0xac/0xe8
[   13.301208] lr : dma_fence_release+0xac/0xe8
[   13.301585] sp : ffff8000123ebb20
[   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.308149] Call trace:
[   13.308367]  dma_fence_release+0xac/0xe8
[   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.309119]  panfrost_job_push+0x1bc/0x200
[   13.309483]  panfrost_ioctl_submit+0x358/0x438
[   13.309875]  drm_ioctl_kernel+0xb8/0x170
[   13.310221]  drm_ioctl+0x214/0x450
[   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
[   13.310868]  invoke_syscall+0x40/0xf8
[   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.311609]  do_el0_svc+0x20/0x80
[   13.311903]  el0_svc+0x1c/0x50
[   13.312177]  el0t_64_sync_handler+0xa8/0xb0
[   13.312545]  el0t_64_sync+0x16c/0x170
[   13.312869] ---[ end trace ba74542f5124628d ]---
[   13.340454] ------------[ cut here ]------------
[   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
[   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.342318] Modules linked in:
[   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.344584] pc : dma_fence_release+0xac/0xe8
[   13.344961] lr : dma_fence_release+0xac/0xe8
[   13.345338] sp : ffff8000123ebb20
[   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.351900] Call trace:
[   13.352116]  dma_fence_release+0xac/0xe8
[   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.352869]  panfrost_job_push+0x1bc/0x200
[   13.353232]  panfrost_ioctl_submit+0x358/0x438
[   13.353624]  drm_ioctl_kernel+0xb8/0x170
[   13.353971]  drm_ioctl+0x214/0x450
[   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
[   13.354616]  invoke_syscall+0x40/0xf8
[   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.355356]  do_el0_svc+0x20/0x80
[   13.355650]  el0_svc+0x1c/0x50
[   13.355925]  el0t_64_sync_handler+0xa8/0xb0
[   13.356293]  el0t_64_sync+0x16c/0x170
[   13.356618] ---[ end trace ba74542f5124628e ]---
[   13.379841] ------------[ cut here ]------------
[   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
[   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.381680] Modules linked in:
[   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.383937] pc : dma_fence_release+0xac/0xe8
[   13.384314] lr : dma_fence_release+0xac/0xe8
[   13.384690] sp : ffff8000123ebb20
[   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.391247] Call trace:
[   13.391464]  dma_fence_release+0xac/0xe8
[   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.392217]  panfrost_job_push+0x1bc/0x200
[   13.392581]  panfrost_ioctl_submit+0x358/0x438
[   13.392972]  drm_ioctl_kernel+0xb8/0x170
[   13.393319]  drm_ioctl+0x214/0x450
[   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
[   13.393967]  invoke_syscall+0x40/0xf8
[   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.394708]  do_el0_svc+0x20/0x80
[   13.395002]  el0_svc+0x1c/0x50
[   13.395275]  el0t_64_sync_handler+0xa8/0xb0
[   13.395643]  el0t_64_sync+0x16c/0x170
[   13.395968] ---[ end trace ba74542f5124628f ]---
[   13.398130] ------------[ cut here ]------------
[   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
[   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.400011] Modules linked in:
[   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.402269] pc : dma_fence_release+0xac/0xe8
[   13.402646] lr : dma_fence_release+0xac/0xe8
[   13.403024] sp : ffff8000123ebb20
[   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
[   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
[   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
[   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
[   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
[   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
[   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
[   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.409583] Call trace:
[   13.409800]  dma_fence_release+0xac/0xe8
[   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.410553]  panfrost_job_push+0x1bc/0x200
[   13.410917]  panfrost_ioctl_submit+0x358/0x438
[   13.411309]  drm_ioctl_kernel+0xb8/0x170
[   13.411656]  drm_ioctl+0x214/0x450
[   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
[   13.412303]  invoke_syscall+0x40/0xf8
[   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.413042]  do_el0_svc+0x20/0x80
[   13.413335]  el0_svc+0x1c/0x50
[   13.413607]  el0t_64_sync_handler+0xa8/0xb0
[   13.413976]  el0t_64_sync+0x16c/0x170
[   13.414298] ---[ end trace ba74542f51246290 ]---
[   13.430129] ------------[ cut here ]------------
[   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
[   13.430557] refcount_t: saturated; leaking memory.
[   13.431321] Mem abort info:
[   13.431324]   ESR = 0x96000044
[   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
[   13.431330]   SET = 0, FnV = 0
[   13.431333]   EA = 0, S1PTW = 0
[   13.431335]   FSC = 0x04: level 0 translation fault
[   13.431337] Data abort info:
[   13.431339]   ISV = 0, ISS = 0x00000044
[   13.431340]   CM = 0, WnR = 1
[   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
[   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
[   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
[   13.431359] Modules linked in:
[   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
[   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
[   13.432059] lr : dma_fence_signal+0x30/0x60
[   13.432327] Modules linked in:
[   13.432789] sp : ffff8000122b3b50
[   13.433057]
[   13.433331] x29: ffff8000122b3b50
[   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.434008]  x28: 0000000000000000
[   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.434601]  x27: ffff000003791880
[   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.435751]
[   13.435753] x26: ffff8000122b3d30
[   13.436237] pc : refcount_warn_saturate+0x6c/0x140
[   13.436504]  x25: 0000000000000001
[   13.437393] lr : refcount_warn_saturate+0x6c/0x140
[   13.437938]  x24: ffff000003791a10
[   13.438542] sp : ffff8000123ebb40
[   13.439042]
[   13.439767] x29: ffff8000123ebb40
[   13.440130] x23: 0000000000000000
[   13.440398]  x28: ffff8000123ebd58
[   13.440687]  x22: ffff000003505280
[   13.440819]  x27: 0000000000000000
[   13.441108]  x21: ffff8000122b3b88
[   13.441931]
[   13.442228]
[   13.442773] x26: 0000000000000001
[   13.443070] x20: ffff000004b051c0
[   13.443674]  x25: ffff000004b051c0
[   13.443806]  x19: ffff000004b051c0
[   13.444095]  x24: 0000000000000000
[   13.444513]  x18: 0000000000000000
[   13.444811]
[   13.445227]
[   13.445524] x23: 0000000000000000
[   13.445814] x17: 3837783028203032
[   13.445945]  x22: ffff000004b051c0
[   13.446236]  x16: 3139323835323120
[   13.446525]  x21: ffff000004d73100
[   13.446822]  x15: 00000205aa24947a
[   13.447120]
[   13.447417]
[   13.447715] x20: ffff000004b05400
[   13.447846] x14: 0000000000000326
[   13.447977]  x19: 00000000ffffffff
[   13.448266]  x13: 0000000000000000
[   13.448555]  x18: 0000000000000010
[   13.448851]  x12: 0000000000000000
[   13.449148]
[   13.449446]
[   13.449743] x17: 0000000000000000
[   13.449874] x11: 0000000000000001
[   13.450006]  x16: 0000000000000000
[   13.450296]  x10: ffff8000122b3d48
[   13.450585]  x15: 000060978994e822
[   13.450882]  x9 : 00000000000019e0
[   13.451179]
[   13.451477]
[   13.451774] x14: 00000000000000b6
[   13.451905] x8 : ffff8000122b3d78
[   13.452037]  x13: 00000000000000b6
[   13.452326]  x7 : 0000000000000000
[   13.452614]  x12: 0000000000000000
[   13.452912]  x6 : 000000001fcf847e
[   13.453209]
[   13.453506]
[   13.453803] x11: 0000000000000001
[   13.453934] x5 : 00ffffffffffffff
[   13.454066]  x10: 00000000000009a0
[   13.454356]  x4 : 0015ef3c03fd7c00
[   13.454643]  x9 : ffff8000123eb8c0
[   13.454941]  x3 : 0000000000000018
[   13.455238]
[   13.455536]
[   13.455833] x8 : ffff0000050e3340
[   13.455965] x2 : ffff000004b051f0
[   13.456096]  x7 : ffff00007fb92a80
[   13.456386]  x1 : 000000032053be4d
[   13.456676]  x6 : 0000000000000115
[   13.456973]  x0 : 0000000000000000
[   13.457271]
[   13.457568]
[   13.457866] x5 : 0000000000000000
[   13.457998] Call trace:
[   13.458128]  x4 : ffff00007fb8c9a0
[   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
[   13.458707]  x3 : ffff00007fb8f950
[   13.459005]  dma_fence_signal+0x30/0x60
[   13.459302]
[   13.459600]  drm_sched_fence_finished+0x10/0x18
[   13.459897] x2 : ffff00007fb8c9a0
[   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
[   13.460159]  x1 : ea6e0584a53f2200
[   13.460449]  drm_sched_job_done_cb+0x10/0x18
[   13.460738]  x0 : 0000000000000000
[   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   13.461333]
[   13.461631]  dma_fence_signal_locked+0x20/0x30
[   13.461929] Call trace:
[   13.462060]  panfrost_job_handle_done+0x34/0x50
[   13.462192]  refcount_warn_saturate+0x6c/0x140
[   13.462481]  panfrost_job_handle_irqs+0x358/0x570
[   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
[   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
[   13.463462]  panfrost_job_push+0x1bc/0x200
[   13.463760]  irq_thread_fn+0x28/0x98
[   13.464094]  panfrost_ioctl_submit+0x358/0x438
[   13.464225]  irq_thread+0x12c/0x230
[   13.464620]  drm_ioctl_kernel+0xb8/0x170
[   13.464909]  kthread+0x174/0x180
[   13.465319]  drm_ioctl+0x214/0x450
[   13.465617]  ret_from_fork+0x10/0x20
[   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
[   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
[   13.466756]  invoke_syscall+0x40/0xf8
[   13.466891] ---[ end trace ba74542f51246291 ]---
[   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
[   13.467883]  do_el0_svc+0x20/0x80
[   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
[   13.468678]  el0_svc+0x1c/0x50
[   13.475908]  el0t_64_sync_handler+0xa8/0xb0
[   13.476277]  el0t_64_sync+0x16c/0x170
[   13.476601] ---[ end trace ba74542f51246292 ]---
[   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
[   14.462959] sched: RT throttling activated
[   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
[   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
[   34.476312]  (t=5250 jiffies g=505 q=301)
[   34.476667] Task dump for CPU 3:
[   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   34.477820] Call trace:
[   34.478035]  dump_backtrace+0x0/0x198
[   34.478365]  show_stack+0x14/0x60
[   34.478659]  sched_show_task+0x148/0x168
[   34.479008]  dump_cpu_task+0x40/0x4c
[   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
[   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
[   34.480072]  update_process_times+0x94/0xd8
[   34.480440]  tick_sched_handle.isra.0+0x30/0x50
[   34.480840]  tick_sched_timer+0x48/0x98
[   34.481178]  __hrtimer_run_queues+0x110/0x1b0
[   34.481562]  hrtimer_interrupt+0xe4/0x238
[   34.481917]  arch_timer_handler_phys+0x28/0x40
[   34.482310]  handle_percpu_devid_irq+0x80/0x130
[   34.482710]  generic_handle_domain_irq+0x38/0x58
[   34.483116]  gic_handle_irq+0x4c/0x110
[   34.483450]  call_on_irq_stack+0x28/0x3c
[   34.483798]  do_interrupt_handler+0x78/0x80
[   34.484166]  el1_interrupt+0x34/0x80
[   34.484484]  el1h_64_irq_handler+0x14/0x20
[   34.484846]  el1h_64_irq+0x74/0x78
[   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
[   34.485568]  _raw_spin_lock+0x5c/0x68
[   34.485895]  panfrost_job_run+0x24c/0x3f8
[   34.486250]  drm_sched_main+0x130/0x390
[   34.486591]  kthread+0x174/0x180
[   34.486878]  ret_from_fork+0x10/0x20
[   35.810989] vcc3v3_lcd1_n: disabling
[   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
[   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
[   97.488326]  (t=21003 jiffies g=505 q=379)
[   97.488687] Task dump for CPU 3:
[   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   97.489842] Call trace:
[   97.490056]  dump_backtrace+0x0/0x198
[   97.490388]  show_stack+0x14/0x60
[   97.490682]  sched_show_task+0x148/0x168
[   97.491030]  dump_cpu_task+0x40/0x4c
[   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
[   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
[   97.492095]  update_process_times+0x94/0xd8
[   97.492463]  tick_sched_handle.isra.0+0x30/0x50
[   97.492862]  tick_sched_timer+0x48/0x98
[   97.493200]  __hrtimer_run_queues+0x110/0x1b0
[   97.493582]  hrtimer_interrupt+0xe4/0x238
[   97.493937]  arch_timer_handler_phys+0x28/0x40
[   97.494330]  handle_percpu_devid_irq+0x80/0x130
[   97.494730]  generic_handle_domain_irq+0x38/0x58
[   97.495136]  gic_handle_irq+0x4c/0x110
[   97.495473]  call_on_irq_stack+0x28/0x3c
[   97.495818]  do_interrupt_handler+0x78/0x80
[   97.496186]  el1_interrupt+0x34/0x80
[   97.496503]  el1h_64_irq_handler+0x14/0x20
[   97.496865]  el1h_64_irq+0x74/0x78
[   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
[   97.497588]  _raw_spin_lock+0x5c/0x68
[   97.497912]  panfrost_job_run+0x24c/0x3f8
[   97.498268]  drm_sched_main+0x130/0x390
[   97.498607]  kthread+0x174/0x180
[   97.498895]  ret_from_fork+0x10/0x20
[  140.108141] random: crng init done
[  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
[  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
[  160.500322]  (t=36756 jiffies g=505 q=482)
[  160.500684] Task dump for CPU 3:
[  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  160.501837] Call trace:
[  160.502054]  dump_backtrace+0x0/0x198
[  160.502384]  show_stack+0x14/0x60
[  160.502679]  sched_show_task+0x148/0x168
[  160.503027]  dump_cpu_task+0x40/0x4c
[  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
[  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
[  160.504091]  update_process_times+0x94/0xd8
[  160.504458]  tick_sched_handle.isra.0+0x30/0x50
[  160.504858]  tick_sched_timer+0x48/0x98
[  160.505195]  __hrtimer_run_queues+0x110/0x1b0
[  160.505580]  hrtimer_interrupt+0xe4/0x238
[  160.505934]  arch_timer_handler_phys+0x28/0x40
[  160.506327]  handle_percpu_devid_irq+0x80/0x130
[  160.506727]  generic_handle_domain_irq+0x38/0x58
[  160.507133]  gic_handle_irq+0x4c/0x110
[  160.507467]  call_on_irq_stack+0x28/0x3c
[  160.507813]  do_interrupt_handler+0x78/0x80
[  160.508181]  el1_interrupt+0x34/0x80
[  160.508497]  el1h_64_irq_handler+0x14/0x20
[  160.508858]  el1h_64_irq+0x74/0x78
[  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
[  160.509579]  _raw_spin_lock+0x5c/0x68
[  160.509903]  panfrost_job_run+0x24c/0x3f8
[  160.510259]  drm_sched_main+0x130/0x390
[  160.510599]  kthread+0x174/0x180
[  160.510886]  ret_from_fork+0x10/0x20
[  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
[  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
[  223.512325]  (t=52509 jiffies g=505 q=536)
[  223.512688] Task dump for CPU 3:
[  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  223.513842] Call trace:
[  223.514056]  dump_backtrace+0x0/0x198
[  223.514387]  show_stack+0x14/0x60
[  223.514681]  sched_show_task+0x148/0x168
[  223.515029]  dump_cpu_task+0x40/0x4c
[  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
[  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
[  223.516094]  update_process_times+0x94/0xd8
[  223.516462]  tick_sched_handle.isra.0+0x30/0x50
[  223.516860]  tick_sched_timer+0x48/0x98
[  223.517198]  __hrtimer_run_queues+0x110/0x1b0
[  223.517582]  hrtimer_interrupt+0xe4/0x238
[  223.517935]  arch_timer_handler_phys+0x28/0x40
[  223.518327]  handle_percpu_devid_irq+0x80/0x130
[  223.518727]  generic_handle_domain_irq+0x38/0x58
[  223.519133]  gic_handle_irq+0x4c/0x110
[  223.519466]  call_on_irq_stack+0x28/0x3c
[  223.519812]  do_interrupt_handler+0x78/0x80
[  223.520181]  el1_interrupt+0x34/0x80
[  223.520498]  el1h_64_irq_handler+0x14/0x20
[  223.520860]  el1h_64_irq+0x74/0x78
[  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
[  223.521584]  _raw_spin_lock+0x5c/0x68
[  223.521908]  panfrost_job_run+0x24c/0x3f8
[  223.522264]  drm_sched_main+0x130/0x390
[  223.522605]  kthread+0x174/0x180
[  223.522892]  ret_from_fork+0x10/0x20
[  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
[  286.524325]  (t=68262 jiffies g=505 q=612)
[  286.524687] Task dump for CPU 3:
[  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  286.525840] Call trace:
[  286.526057]  dump_backtrace+0x0/0x198
[  286.526387]  show_stack+0x14/0x60
[  286.526681]  sched_show_task+0x148/0x168
[  286.527029]  dump_cpu_task+0x40/0x4c
[  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
[  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
[  286.528092]  update_process_times+0x94/0xd8
[  286.528459]  tick_sched_handle.isra.0+0x30/0x50
[  286.528859]  tick_sched_timer+0x48/0x98
[  286.529197]  __hrtimer_run_queues+0x110/0x1b0
[  286.529579]  hrtimer_interrupt+0xe4/0x238
[  286.529933]  arch_timer_handler_phys+0x28/0x40
[  286.530326]  handle_percpu_devid_irq+0x80/0x130
[  286.530726]  generic_handle_domain_irq+0x38/0x58
[  286.531132]  gic_handle_irq+0x4c/0x110
[  286.531466]  call_on_irq_stack+0x28/0x3c
[  286.531812]  do_interrupt_handler+0x78/0x80
[  286.532180]  el1_interrupt+0x34/0x80
[  286.532496]  el1h_64_irq_handler+0x14/0x20
[  286.532857]  el1h_64_irq+0x74/0x78
[  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
[  286.533580]  _raw_spin_lock+0x5c/0x68
[  286.533904]  panfrost_job_run+0x24c/0x3f8
[  286.534259]  drm_sched_main+0x130/0x390
[  286.534600]  kthread+0x174/0x180
[  286.534887]  ret_from_fork+0x10/0x20
[  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
[  349.536324]  (t=84015 jiffies g=505 q=716)
[  349.536687] Task dump for CPU 3:
[  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  349.537839] Call trace:
[  349.538055]  dump_backtrace+0x0/0x198
[  349.538387]  show_stack+0x14/0x60
[  349.538681]  sched_show_task+0x148/0x168
[  349.539029]  dump_cpu_task+0x40/0x4c
[  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
[  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
[  349.540094]  update_process_times+0x94/0xd8
[  349.540462]  tick_sched_handle.isra.0+0x30/0x50
[  349.540862]  tick_sched_timer+0x48/0x98
[  349.541201]  __hrtimer_run_queues+0x110/0x1b0
[  349.541585]  hrtimer_interrupt+0xe4/0x238
[  349.541937]  arch_timer_handler_phys+0x28/0x40
[  349.542330]  handle_percpu_devid_irq+0x80/0x130
[  349.542730]  generic_handle_domain_irq+0x38/0x58
[  349.543136]  gic_handle_irq+0x4c/0x110
[  349.543469]  call_on_irq_stack+0x28/0x3c
[  349.543815]  do_interrupt_handler+0x78/0x80
[  349.544183]  el1_interrupt+0x34/0x80
[  349.544500]  el1h_64_irq_handler+0x14/0x20
[  349.544862]  el1h_64_irq+0x74/0x78
[  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
[  349.545586]  _raw_spin_lock+0x5c/0x68
[  349.545910]  panfrost_job_run+0x24c/0x3f8
[  349.546265]  drm_sched_main+0x130/0x390
[  349.546604]  kthread+0x174/0x180
[  349.546891]  ret_from_fork+0x10/0x20
[  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
[  412.548325]  (t=99768 jiffies g=505 q=784)
[  412.548686] Task dump for CPU 3:
[  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  412.549841] Call trace:
[  412.550058]  dump_backtrace+0x0/0x198
[  412.550389]  show_stack+0x14/0x60
[  412.550684]  sched_show_task+0x148/0x168
[  412.551031]  dump_cpu_task+0x40/0x4c
[  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
[  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
[  412.552095]  update_process_times+0x94/0xd8
[  412.552463]  tick_sched_handle.isra.0+0x30/0x50
[  412.552863]  tick_sched_timer+0x48/0x98
[  412.553201]  __hrtimer_run_queues+0x110/0x1b0
[  412.553583]  hrtimer_interrupt+0xe4/0x238
[  412.553936]  arch_timer_handler_phys+0x28/0x40
[  412.554331]  handle_percpu_devid_irq+0x80/0x130
[  412.554732]  generic_handle_domain_irq+0x38/0x58
[  412.555139]  gic_handle_irq+0x4c/0x110
[  412.555471]  call_on_irq_stack+0x28/0x3c
[  412.555817]  do_interrupt_handler+0x78/0x80
[  412.556186]  el1_interrupt+0x34/0x80
[  412.556502]  el1h_64_irq_handler+0x14/0x20
[  412.556864]  el1h_64_irq+0x74/0x78
[  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
[  412.557587]  _raw_spin_lock+0x5c/0x68
[  412.557912]  panfrost_job_run+0x24c/0x3f8
[  412.558267]  drm_sched_main+0x130/0x390
[  412.558607]  kthread+0x174/0x180
[  412.558894]  ret_from_fork+0x10/0x20
[  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
[  475.560329]  (t=115521 jiffies g=505 q=857)
[  475.560697] Task dump for CPU 3:
[  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  475.561850] Call trace:
[  475.562067]  dump_backtrace+0x0/0x198
[  475.562398]  show_stack+0x14/0x60
[  475.562693]  sched_show_task+0x148/0x168
[  475.563041]  dump_cpu_task+0x40/0x4c
[  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
[  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
[  475.564104]  update_process_times+0x94/0xd8
[  475.564472]  tick_sched_handle.isra.0+0x30/0x50
[  475.564871]  tick_sched_timer+0x48/0x98
[  475.565209]  __hrtimer_run_queues+0x110/0x1b0
[  475.565592]  hrtimer_interrupt+0xe4/0x238
[  475.565946]  arch_timer_handler_phys+0x28/0x40
[  475.566339]  handle_percpu_devid_irq+0x80/0x130
[  475.566739]  generic_handle_domain_irq+0x38/0x58
[  475.567145]  gic_handle_irq+0x4c/0x110
[  475.567477]  call_on_irq_stack+0x28/0x3c
[  475.567822]  do_interrupt_handler+0x78/0x80
[  475.568190]  el1_interrupt+0x34/0x80
[  475.568507]  el1h_64_irq_handler+0x14/0x20
[  475.568869]  el1h_64_irq+0x74/0x78
[  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
[  475.569593]  _raw_spin_lock+0x5c/0x68
[  475.569915]  panfrost_job_run+0x24c/0x3f8
[  475.570270]  drm_sched_main+0x130/0x390
[  475.570610]  kthread+0x174/0x180
[  475.570897]  ret_from_fork+0x10/0x20
[  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
[  538.572333]  (t=131274 jiffies g=505 q=947)
[  538.572701] Task dump for CPU 3:
[  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  538.573854] Call trace:
[  538.574070]  dump_backtrace+0x0/0x198
[  538.574402]  show_stack+0x14/0x60
[  538.574696]  sched_show_task+0x148/0x168
[  538.575044]  dump_cpu_task+0x40/0x4c
[  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
[  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
[  538.576109]  update_process_times+0x94/0xd8
[  538.576477]  tick_sched_handle.isra.0+0x30/0x50
[  538.576878]  tick_sched_timer+0x48/0x98
[  538.577216]  __hrtimer_run_queues+0x110/0x1b0
[  538.577599]  hrtimer_interrupt+0xe4/0x238
[  538.577953]  arch_timer_handler_phys+0x28/0x40
[  538.578346]  handle_percpu_devid_irq+0x80/0x130
[  538.578745]  generic_handle_domain_irq+0x38/0x58
[  538.579151]  gic_handle_irq+0x4c/0x110
[  538.579487]  call_on_irq_stack+0x28/0x3c
[  538.579833]  do_interrupt_handler+0x78/0x80
[  538.580201]  el1_interrupt+0x34/0x80
[  538.580518]  el1h_64_irq_handler+0x14/0x20
[  538.580880]  el1h_64_irq+0x74/0x78
[  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
[  538.581603]  _raw_spin_lock+0x5c/0x68
[  538.581927]  panfrost_job_run+0x24c/0x3f8
[  538.582283]  drm_sched_main+0x130/0x390
[  538.582623]  kthread+0x174/0x180
[  538.582910]  ret_from_fork+0x10/0x20
[  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
[  601.584330]  (t=147027 jiffies g=505 q=1018)
[  601.584706] Task dump for CPU 3:
[  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  601.585859] Call trace:
[  601.586075]  dump_backtrace+0x0/0x198
[  601.586406]  show_stack+0x14/0x60
[  601.586701]  sched_show_task+0x148/0x168
[  601.587048]  dump_cpu_task+0x40/0x4c
[  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
[  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
[  601.588112]  update_process_times+0x94/0xd8
[  601.588480]  tick_sched_handle.isra.0+0x30/0x50
[  601.588880]  tick_sched_timer+0x48/0x98
[  601.589218]  __hrtimer_run_queues+0x110/0x1b0
[  601.589602]  hrtimer_interrupt+0xe4/0x238
[  601.589956]  arch_timer_handler_phys+0x28/0x40
[  601.590348]  handle_percpu_devid_irq+0x80/0x130
[  601.590747]  generic_handle_domain_irq+0x38/0x58
[  601.591153]  gic_handle_irq+0x4c/0x110
[  601.591486]  call_on_irq_stack+0x28/0x3c
[  601.591832]  do_interrupt_handler+0x78/0x80
[  601.592201]  el1_interrupt+0x34/0x80
[  601.592517]  el1h_64_irq_handler+0x14/0x20
[  601.592879]  el1h_64_irq+0x74/0x78
[  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
[  601.593603]  _raw_spin_lock+0x5c/0x68
[  601.593927]  panfrost_job_run+0x24c/0x3f8
[  601.594283]  drm_sched_main+0x130/0x390
[  601.594623]  kthread+0x174/0x180
[  601.594910]  ret_from_fork+0x10/0x20
[  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
[  664.596333]  (t=162780 jiffies g=505 q=1086)
[  664.596709] Task dump for CPU 3:
[  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  664.597862] Call trace:
[  664.598078]  dump_backtrace+0x0/0x198
[  664.598409]  show_stack+0x14/0x60
[  664.598704]  sched_show_task+0x148/0x168
[  664.599052]  dump_cpu_task+0x40/0x4c
[  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
[  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
[  664.600114]  update_process_times+0x94/0xd8
[  664.600482]  tick_sched_handle.isra.0+0x30/0x50
[  664.600882]  tick_sched_timer+0x48/0x98
[  664.601220]  __hrtimer_run_queues+0x110/0x1b0
[  664.601604]  hrtimer_interrupt+0xe4/0x238
[  664.601958]  arch_timer_handler_phys+0x28/0x40
[  664.602352]  handle_percpu_devid_irq+0x80/0x130
[  664.602751]  generic_handle_domain_irq+0x38/0x58
[  664.603158]  gic_handle_irq+0x4c/0x110
[  664.603491]  call_on_irq_stack+0x28/0x3c
[  664.603838]  do_interrupt_handler+0x78/0x80
[  664.604206]  el1_interrupt+0x34/0x80
[  664.604522]  el1h_64_irq_handler+0x14/0x20
[  664.604883]  el1h_64_irq+0x74/0x78
[  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
[  664.605609]  _raw_spin_lock+0x5c/0x68
[  664.605934]  panfrost_job_run+0x24c/0x3f8
[  664.606290]  drm_sched_main+0x130/0x390
[  664.606631]  kthread+0x174/0x180
[  664.606918]  ret_from_fork+0x10/0x20
[  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
[  727.608331]  (t=178533 jiffies g=505 q=1152)
[  727.608706] Task dump for CPU 3:
[  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  727.609858] Call trace:
[  727.610074]  dump_backtrace+0x0/0x198
[  727.610403]  show_stack+0x14/0x60
[  727.610698]  sched_show_task+0x148/0x168
[  727.611047]  dump_cpu_task+0x40/0x4c
[  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
[  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
[  727.612112]  update_process_times+0x94/0xd8
[  727.612479]  tick_sched_handle.isra.0+0x30/0x50
[  727.612879]  tick_sched_timer+0x48/0x98
[  727.613216]  __hrtimer_run_queues+0x110/0x1b0
[  727.613601]  hrtimer_interrupt+0xe4/0x238
[  727.613955]  arch_timer_handler_phys+0x28/0x40
[  727.614348]  handle_percpu_devid_irq+0x80/0x130
[  727.614748]  generic_handle_domain_irq+0x38/0x58
[  727.615154]  gic_handle_irq+0x4c/0x110
[  727.615485]  call_on_irq_stack+0x28/0x3c
[  727.615832]  do_interrupt_handler+0x78/0x80
[  727.616200]  el1_interrupt+0x34/0x80
[  727.616517]  el1h_64_irq_handler+0x14/0x20
[  727.616879]  el1h_64_irq+0x74/0x78
[  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
[  727.617602]  _raw_spin_lock+0x5c/0x68
[  727.617926]  panfrost_job_run+0x24c/0x3f8
[  727.618282]  drm_sched_main+0x130/0x390
[  727.618621]  kthread+0x174/0x180
[  727.618908]  ret_from_fork+0x10/0x20
[  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
[  790.620331]  (t=194286 jiffies g=505 q=1219)
[  790.620708] Task dump for CPU 3:
[  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  790.621860] Call trace:
[  790.622075]  dump_backtrace+0x0/0x198
[  790.622405]  show_stack+0x14/0x60
[  790.622699]  sched_show_task+0x148/0x168
[  790.623049]  dump_cpu_task+0x40/0x4c
[  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
[  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
[  790.624113]  update_process_times+0x94/0xd8
[  790.624481]  tick_sched_handle.isra.0+0x30/0x50
[  790.624880]  tick_sched_timer+0x48/0x98
[  790.625218]  __hrtimer_run_queues+0x110/0x1b0
[  790.625603]  hrtimer_interrupt+0xe4/0x238
[  790.625957]  arch_timer_handler_phys+0x28/0x40
[  790.626350]  handle_percpu_devid_irq+0x80/0x130
[  790.626752]  generic_handle_domain_irq+0x38/0x58
[  790.627158]  gic_handle_irq+0x4c/0x110
[  790.627493]  call_on_irq_stack+0x28/0x3c
[  790.627839]  do_interrupt_handler+0x78/0x80
[  790.628208]  el1_interrupt+0x34/0x80
[  790.628526]  el1h_64_irq_handler+0x14/0x20
[  790.628888]  el1h_64_irq+0x74/0x78
[  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
[  790.629613]  _raw_spin_lock+0x5c/0x68
[  790.629937]  panfrost_job_run+0x24c/0x3f8
[  790.630292]  drm_sched_main+0x130/0x390
[  790.630632]  kthread+0x174/0x180
[  790.630919]  ret_from_fork+0x10/0x20
[  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
[  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
[  853.632340]  (t=210039 jiffies g=505 q=1318)
[  853.632716] Task dump for CPU 3:
[  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  853.633869] Call trace:
[  853.634084]  dump_backtrace+0x0/0x198
[  853.634418]  show_stack+0x14/0x60
[  853.634712]  sched_show_task+0x148/0x168
[  853.635061]  dump_cpu_task+0x40/0x4c
[  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
[  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
[  853.636124]  update_process_times+0x94/0xd8
[  853.636492]  tick_sched_handle.isra.0+0x30/0x50
[  853.636892]  tick_sched_timer+0x48/0x98
[  853.637230]  __hrtimer_run_queues+0x110/0x1b0
[  853.637613]  hrtimer_interrupt+0xe4/0x238
[  853.637965]  arch_timer_handler_phys+0x28/0x40
[  853.638358]  handle_percpu_devid_irq+0x80/0x130
[  853.638760]  generic_handle_domain_irq+0x38/0x58
[  853.639166]  gic_handle_irq+0x4c/0x110
[  853.639499]  call_on_irq_stack+0x28/0x3c
[  853.639845]  do_interrupt_handler+0x78/0x80
[  853.640213]  el1_interrupt+0x34/0x80
[  853.640530]  el1h_64_irq_handler+0x14/0x20
[  853.640892]  el1h_64_irq+0x74/0x78
[  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
[  853.641616]  _raw_spin_lock+0x5c/0x68
[  853.641940]  panfrost_job_run+0x24c/0x3f8
[  853.642295]  drm_sched_main+0x130/0x390
[  853.642634]  kthread+0x174/0x180
[  853.642921]  ret_from_fork+0x10/0x20
[  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
[  916.644339]  (t=225792 jiffies g=505 q=1390)
[  916.644715] Task dump for CPU 3:
[  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  916.645868] Call trace:
[  916.646083]  dump_backtrace+0x0/0x198
[  916.646414]  show_stack+0x14/0x60
[  916.646708]  sched_show_task+0x148/0x168
[  916.647055]  dump_cpu_task+0x40/0x4c
[  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
[  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
[  916.648119]  update_process_times+0x94/0xd8
[  916.648488]  tick_sched_handle.isra.0+0x30/0x50
[  916.648887]  tick_sched_timer+0x48/0x98
[  916.649225]  __hrtimer_run_queues+0x110/0x1b0
[  916.649608]  hrtimer_interrupt+0xe4/0x238
[  916.649962]  arch_timer_handler_phys+0x28/0x40
[  916.650355]  handle_percpu_devid_irq+0x80/0x130
[  916.650756]  generic_handle_domain_irq+0x38/0x58
[  916.651162]  gic_handle_irq+0x4c/0x110
[  916.651495]  call_on_irq_stack+0x28/0x3c
[  916.651842]  do_interrupt_handler+0x78/0x80
[  916.652210]  el1_interrupt+0x34/0x80
[  916.652527]  el1h_64_irq_handler+0x14/0x20
[  916.652889]  el1h_64_irq+0x74/0x78
[  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
[  916.653614]  _raw_spin_lock+0x5c/0x68
[  916.653937]  panfrost_job_run+0x24c/0x3f8
[  916.654293]  drm_sched_main+0x130/0x390
[  916.654632]  kthread+0x174/0x180
[  916.654920]  ret_from_fork+0x10/0x20

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 14:03     ` Sascha Hauer
  0 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-15 14:03 UTC (permalink / raw)
  To: Christian König; +Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media

Hi,

On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> Simplifying the code a bit.
> 
> v2: use dma_resv_for_each_fence
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>  1 file changed, 6 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 042c16b5d54a..5bc5f775abe1 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>  					    struct drm_gem_object *obj,
>  					    bool write)
>  {
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *fence;
>  	int ret;
> -	struct dma_fence **fences;
> -	unsigned int i, fence_count;
> -
> -	if (!write) {
> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> -
> -		return drm_sched_job_add_dependency(job, fence);
> -	}
> -
> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> -	if (ret || !fence_count)
> -		return ret;
>  
> -	for (i = 0; i < fence_count; i++) {
> -		ret = drm_sched_job_add_dependency(job, fences[i]);
> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> +		ret = drm_sched_job_add_dependency(job, fence);
>  		if (ret)
> -			break;
> +			return ret;
>  	}
> -
> -	for (; i < fence_count; i++)
> -		dma_fence_put(fences[i]);
> -	kfree(fences);
> -	return ret;
> +	return 0;
>  }
>  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>  

This patch lets the panfrost driver explode on v5.16-rc1 with the
following. I didn't bisect it, but it goes away when I revert this
patch. I only started weston, nothing more.

Any idea what goes wrong here?

Sascha

[   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
[   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.514056] Modules linked in:
[   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.514621] ------------[ cut here ]------------
[   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.515049] pc : dma_fence_release+0xac/0xe8
[   12.515056] lr : dma_fence_release+0xac/0xe8
[   12.515061] sp : ffff8000123ebb20
[   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
[   12.515518] refcount_t: addition on 0; use-after-free.
[   12.516015]  x27: 0000000000000000
[   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
[   12.516992] x26: 0000000000000001
[   12.517366] Modules linked in:
[   12.517654]  x25: ffff000004b051c0
[   12.518108]
[   12.518555]  x24: 0000000000000000
[   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
[   12.519576]
[   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.520133] x23: 0000000000000000
[   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.520559]  x22: ffff800010d41b78
[   12.520856] pc : refcount_warn_saturate+0x98/0x140
[   12.521625]  x21: ffff000004b05050
[   12.521755] lr : refcount_warn_saturate+0x98/0x140
[   12.522299]
[   12.522588] sp : ffff8000122b3bc0
[   12.523192] x20: ffff000004b05040
[   12.523489] x29: ffff8000122b3bc0
[   12.523906]  x19: ffff000004b05078
[   12.524203]  x28: 0000000000000000
[   12.524620]  x18: 0000000000000010
[   12.524751]  x27: ffff000003791880
[   12.525040]
[   12.525329]
[   12.525618] x17: 0000000000000000
[   12.525915] x26: ffff8000122b3d30
[   12.526212]  x16: 0000000000000000
[   12.526509]  x25: 0000000000000001
[   12.526806]  x15: ffff0000050e2dc0
[   12.526937]  x24: ffff000003791a10
[   12.527067]
[   12.527357]
[   12.527646] x14: 00000000000001b5
[   12.527942] x23: 0000000000000000
[   12.528240]  x13: ffff0000050e2dc0
[   12.528536]  x22: ffff000003505280
[   12.528833]  x12: 00000000ffffffea
[   12.528964]  x21: ffff000003a2a220
[   12.529095]
[   12.529384]
[   12.529673] x11: ffff800011761ec8
[   12.529970] x20: ffff000004b05078
[   12.530267]  x10: ffff8000115e1e88
[   12.530564]  x19: ffff000004b05000
[   12.530861]  x9 : ffff8000115e1ee0
[   12.530992]  x18: 0000000000000010
[   12.531123]
[   12.531412]
[   12.531701] x8 : 000000000017ffe8
[   12.531998] x17: 0000000000500600
[   12.532294]  x7 : c0000000fffeffff
[   12.532591]  x16: 0000000000000000
[   12.532888]  x6 : 0000000000000001
[   12.533019]  x15: ffff000003505700
[   12.533150]
[   12.533439]
[   12.533728] x5 : ffff00007fb8c9a0
[   12.534025] x14: 0000000000000000
[   12.534322]  x4 : 0000000000000000
[   12.534619]  x13: 292d2d3d45505954
[   12.534914]  x3 : 0000000000000001
[   12.535045]  x12: 4220534253532d20
[   12.535176]
[   12.535465]
[   12.535754] x2 : ffff00007fb8c9a8
[   12.536051] x11: 5449442d204f4354
[   12.536347]  x1 : ea6e0584a53f2200
[   12.536643]  x10: 2d204f41552d204e
[   12.536941]  x0 : 0000000000000000
[   12.537073]  x9 : 4e41502b20666961
[   12.537203]
[   12.537492]
[   12.537782] Call trace:
[   12.538078] x8 : 642076635a6e2820
[   12.538377]  dma_fence_release+0xac/0xe8
[   12.538671]  x7 : 205d343430353135
[   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.539098]  x6 : 352e32312020205b
[   12.539230]  panfrost_job_push+0x1bc/0x200
[   12.539442]
[   12.539732]  panfrost_ioctl_submit+0x358/0x438
[   12.540073] x5 : ffff00007fb539a0
[   12.540370]  drm_ioctl_kernel+0xb8/0x170
[   12.540771]  x4 : 0000000000000000
[   12.541069]  drm_ioctl+0x214/0x450
[   12.541424]  x3 : 0000000000000001
[   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
[   12.541943]
[   12.542233]  invoke_syscall+0x40/0xf8
[   12.542573] x2 : ffff00007fb539a8
[   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.543167]  x1 : 0ac4fb7a0680bb00
[   12.543465]  do_el0_svc+0x20/0x80
[   12.543805]  x0 : 0000000000000000
[   12.543936]  el0_svc+0x1c/0x50
[   12.544255]
[   12.544544]  el0t_64_sync_handler+0xa8/0xb0
[   12.544955] Call trace:
[   12.545250]  el0t_64_sync+0x16c/0x170
[   12.545540]  refcount_warn_saturate+0x98/0x140
[   12.545837] ---[ end trace ba74542f51246288 ]---
[   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
[   12.546285] ------------[ cut here ]------------
[   12.546598]  drm_sched_job_done_cb+0x10/0x18
[   12.546813] refcount_t: underflow; use-after-free.
[   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
[   12.547920]  dma_fence_signal_locked+0x20/0x30
[   12.548336] Modules linked in:
[   12.548737]  panfrost_job_handle_done+0x34/0x50
[   12.549110]
[   12.549525]  panfrost_job_handle_irqs+0x358/0x570
[   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
[   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.551373]  irq_thread_fn+0x28/0x98
[   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.551899]  irq_thread+0x12c/0x230
[   12.552309] pc : refcount_warn_saturate+0xec/0x140
[   12.553131]  kthread+0x174/0x180
[   12.553578] lr : refcount_warn_saturate+0xec/0x140
[   12.554121]  ret_from_fork+0x10/0x20
[   12.554432] sp : ffff8000123ebaa0
[   12.555038] ---[ end trace ba74542f51246289 ]---
[   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
[   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
[   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
[   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
[   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
[   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.563733] Call trace:
[   12.563950]  refcount_warn_saturate+0xec/0x140
[   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
[   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   12.565216]  dma_fence_release+0xd4/0xe8
[   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.565970]  panfrost_job_push+0x1bc/0x200
[   12.566333]  panfrost_ioctl_submit+0x358/0x438
[   12.566726]  drm_ioctl_kernel+0xb8/0x170
[   12.567072]  drm_ioctl+0x214/0x450
[   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
[   12.567721]  invoke_syscall+0x40/0xf8
[   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.568463]  do_el0_svc+0x20/0x80
[   12.568755]  el0_svc+0x1c/0x50
[   12.569030]  el0t_64_sync_handler+0xa8/0xb0
[   12.569399]  el0t_64_sync+0x16c/0x170
[   12.569724] ---[ end trace ba74542f5124628a ]---
[   12.595086] ------------[ cut here ]------------
[   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
[   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   12.596934] Modules linked in:
[   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.599202] pc : dma_fence_release+0xac/0xe8
[   12.599584] lr : dma_fence_release+0xac/0xe8
[   12.599960] sp : ffff8000123ebb20
[   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.606542] Call trace:
[   12.606760]  dma_fence_release+0xac/0xe8
[   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   12.607517]  panfrost_job_push+0x1bc/0x200
[   12.607882]  panfrost_ioctl_submit+0x358/0x438
[   12.608274]  drm_ioctl_kernel+0xb8/0x170
[   12.608622]  drm_ioctl+0x214/0x450
[   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
[   12.609269]  invoke_syscall+0x40/0xf8
[   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.610011]  do_el0_svc+0x20/0x80
[   12.610304]  el0_svc+0x1c/0x50
[   12.610577]  el0t_64_sync_handler+0xa8/0xb0
[   12.610946]  el0t_64_sync+0x16c/0x170
[   12.611276] ---[ end trace ba74542f5124628b ]---
[   12.612869] ------------[ cut here ]------------
[   12.613288] refcount_t: saturated; leaking memory.
[   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
[   12.614476] Modules linked in:
[   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   12.616773] pc : refcount_warn_saturate+0xc0/0x140
[   12.617200] lr : refcount_warn_saturate+0xc0/0x140
[   12.617622] sp : ffff8000123eba60
[   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
[   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
[   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
[   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
[   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
[   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   12.624256] Call trace:
[   12.624474]  refcount_warn_saturate+0xc0/0x140
[   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
[   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
[   12.625811]  drm_atomic_helper_commit+0x80/0x360
[   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
[   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
[   12.627050]  drm_ioctl_kernel+0xb8/0x170
[   12.627397]  drm_ioctl+0x214/0x450
[   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
[   12.628046]  invoke_syscall+0x40/0xf8
[   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
[   12.628787]  do_el0_svc+0x20/0x80
[   12.629079]  el0_svc+0x1c/0x50
[   12.629354]  el0t_64_sync_handler+0xa8/0xb0
[   12.629723]  el0t_64_sync+0x16c/0x170
[   12.630048] ---[ end trace ba74542f5124628c ]---
[   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
[   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
[   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
[   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
[   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
[   13.296693] ------------[ cut here ]------------
[   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
[   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.298560] Modules linked in:
[   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.300830] pc : dma_fence_release+0xac/0xe8
[   13.301208] lr : dma_fence_release+0xac/0xe8
[   13.301585] sp : ffff8000123ebb20
[   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.308149] Call trace:
[   13.308367]  dma_fence_release+0xac/0xe8
[   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.309119]  panfrost_job_push+0x1bc/0x200
[   13.309483]  panfrost_ioctl_submit+0x358/0x438
[   13.309875]  drm_ioctl_kernel+0xb8/0x170
[   13.310221]  drm_ioctl+0x214/0x450
[   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
[   13.310868]  invoke_syscall+0x40/0xf8
[   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.311609]  do_el0_svc+0x20/0x80
[   13.311903]  el0_svc+0x1c/0x50
[   13.312177]  el0t_64_sync_handler+0xa8/0xb0
[   13.312545]  el0t_64_sync+0x16c/0x170
[   13.312869] ---[ end trace ba74542f5124628d ]---
[   13.340454] ------------[ cut here ]------------
[   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
[   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.342318] Modules linked in:
[   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.344584] pc : dma_fence_release+0xac/0xe8
[   13.344961] lr : dma_fence_release+0xac/0xe8
[   13.345338] sp : ffff8000123ebb20
[   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
[   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.351900] Call trace:
[   13.352116]  dma_fence_release+0xac/0xe8
[   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.352869]  panfrost_job_push+0x1bc/0x200
[   13.353232]  panfrost_ioctl_submit+0x358/0x438
[   13.353624]  drm_ioctl_kernel+0xb8/0x170
[   13.353971]  drm_ioctl+0x214/0x450
[   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
[   13.354616]  invoke_syscall+0x40/0xf8
[   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.355356]  do_el0_svc+0x20/0x80
[   13.355650]  el0_svc+0x1c/0x50
[   13.355925]  el0t_64_sync_handler+0xa8/0xb0
[   13.356293]  el0t_64_sync+0x16c/0x170
[   13.356618] ---[ end trace ba74542f5124628e ]---
[   13.379841] ------------[ cut here ]------------
[   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
[   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.381680] Modules linked in:
[   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.383937] pc : dma_fence_release+0xac/0xe8
[   13.384314] lr : dma_fence_release+0xac/0xe8
[   13.384690] sp : ffff8000123ebb20
[   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
[   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
[   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
[   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
[   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
[   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
[   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
[   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.391247] Call trace:
[   13.391464]  dma_fence_release+0xac/0xe8
[   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.392217]  panfrost_job_push+0x1bc/0x200
[   13.392581]  panfrost_ioctl_submit+0x358/0x438
[   13.392972]  drm_ioctl_kernel+0xb8/0x170
[   13.393319]  drm_ioctl+0x214/0x450
[   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
[   13.393967]  invoke_syscall+0x40/0xf8
[   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.394708]  do_el0_svc+0x20/0x80
[   13.395002]  el0_svc+0x1c/0x50
[   13.395275]  el0t_64_sync_handler+0xa8/0xb0
[   13.395643]  el0t_64_sync+0x16c/0x170
[   13.395968] ---[ end trace ba74542f5124628f ]---
[   13.398130] ------------[ cut here ]------------
[   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
[   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
[   13.400011] Modules linked in:
[   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.402269] pc : dma_fence_release+0xac/0xe8
[   13.402646] lr : dma_fence_release+0xac/0xe8
[   13.403024] sp : ffff8000123ebb20
[   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
[   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
[   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
[   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
[   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
[   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
[   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
[   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
[   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
[   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
[   13.409583] Call trace:
[   13.409800]  dma_fence_release+0xac/0xe8
[   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
[   13.410553]  panfrost_job_push+0x1bc/0x200
[   13.410917]  panfrost_ioctl_submit+0x358/0x438
[   13.411309]  drm_ioctl_kernel+0xb8/0x170
[   13.411656]  drm_ioctl+0x214/0x450
[   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
[   13.412303]  invoke_syscall+0x40/0xf8
[   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.413042]  do_el0_svc+0x20/0x80
[   13.413335]  el0_svc+0x1c/0x50
[   13.413607]  el0t_64_sync_handler+0xa8/0xb0
[   13.413976]  el0t_64_sync+0x16c/0x170
[   13.414298] ---[ end trace ba74542f51246290 ]---
[   13.430129] ------------[ cut here ]------------
[   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
[   13.430557] refcount_t: saturated; leaking memory.
[   13.431321] Mem abort info:
[   13.431324]   ESR = 0x96000044
[   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
[   13.431330]   SET = 0, FnV = 0
[   13.431333]   EA = 0, S1PTW = 0
[   13.431335]   FSC = 0x04: level 0 translation fault
[   13.431337] Data abort info:
[   13.431339]   ISV = 0, ISS = 0x00000044
[   13.431340]   CM = 0, WnR = 1
[   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
[   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
[   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
[   13.431359] Modules linked in:
[   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
[   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
[   13.432059] lr : dma_fence_signal+0x30/0x60
[   13.432327] Modules linked in:
[   13.432789] sp : ffff8000122b3b50
[   13.433057]
[   13.433331] x29: ffff8000122b3b50
[   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
[   13.434008]  x28: 0000000000000000
[   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
[   13.434601]  x27: ffff000003791880
[   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[   13.435751]
[   13.435753] x26: ffff8000122b3d30
[   13.436237] pc : refcount_warn_saturate+0x6c/0x140
[   13.436504]  x25: 0000000000000001
[   13.437393] lr : refcount_warn_saturate+0x6c/0x140
[   13.437938]  x24: ffff000003791a10
[   13.438542] sp : ffff8000123ebb40
[   13.439042]
[   13.439767] x29: ffff8000123ebb40
[   13.440130] x23: 0000000000000000
[   13.440398]  x28: ffff8000123ebd58
[   13.440687]  x22: ffff000003505280
[   13.440819]  x27: 0000000000000000
[   13.441108]  x21: ffff8000122b3b88
[   13.441931]
[   13.442228]
[   13.442773] x26: 0000000000000001
[   13.443070] x20: ffff000004b051c0
[   13.443674]  x25: ffff000004b051c0
[   13.443806]  x19: ffff000004b051c0
[   13.444095]  x24: 0000000000000000
[   13.444513]  x18: 0000000000000000
[   13.444811]
[   13.445227]
[   13.445524] x23: 0000000000000000
[   13.445814] x17: 3837783028203032
[   13.445945]  x22: ffff000004b051c0
[   13.446236]  x16: 3139323835323120
[   13.446525]  x21: ffff000004d73100
[   13.446822]  x15: 00000205aa24947a
[   13.447120]
[   13.447417]
[   13.447715] x20: ffff000004b05400
[   13.447846] x14: 0000000000000326
[   13.447977]  x19: 00000000ffffffff
[   13.448266]  x13: 0000000000000000
[   13.448555]  x18: 0000000000000010
[   13.448851]  x12: 0000000000000000
[   13.449148]
[   13.449446]
[   13.449743] x17: 0000000000000000
[   13.449874] x11: 0000000000000001
[   13.450006]  x16: 0000000000000000
[   13.450296]  x10: ffff8000122b3d48
[   13.450585]  x15: 000060978994e822
[   13.450882]  x9 : 00000000000019e0
[   13.451179]
[   13.451477]
[   13.451774] x14: 00000000000000b6
[   13.451905] x8 : ffff8000122b3d78
[   13.452037]  x13: 00000000000000b6
[   13.452326]  x7 : 0000000000000000
[   13.452614]  x12: 0000000000000000
[   13.452912]  x6 : 000000001fcf847e
[   13.453209]
[   13.453506]
[   13.453803] x11: 0000000000000001
[   13.453934] x5 : 00ffffffffffffff
[   13.454066]  x10: 00000000000009a0
[   13.454356]  x4 : 0015ef3c03fd7c00
[   13.454643]  x9 : ffff8000123eb8c0
[   13.454941]  x3 : 0000000000000018
[   13.455238]
[   13.455536]
[   13.455833] x8 : ffff0000050e3340
[   13.455965] x2 : ffff000004b051f0
[   13.456096]  x7 : ffff00007fb92a80
[   13.456386]  x1 : 000000032053be4d
[   13.456676]  x6 : 0000000000000115
[   13.456973]  x0 : 0000000000000000
[   13.457271]
[   13.457568]
[   13.457866] x5 : 0000000000000000
[   13.457998] Call trace:
[   13.458128]  x4 : ffff00007fb8c9a0
[   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
[   13.458707]  x3 : ffff00007fb8f950
[   13.459005]  dma_fence_signal+0x30/0x60
[   13.459302]
[   13.459600]  drm_sched_fence_finished+0x10/0x18
[   13.459897] x2 : ffff00007fb8c9a0
[   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
[   13.460159]  x1 : ea6e0584a53f2200
[   13.460449]  drm_sched_job_done_cb+0x10/0x18
[   13.460738]  x0 : 0000000000000000
[   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
[   13.461333]
[   13.461631]  dma_fence_signal_locked+0x20/0x30
[   13.461929] Call trace:
[   13.462060]  panfrost_job_handle_done+0x34/0x50
[   13.462192]  refcount_warn_saturate+0x6c/0x140
[   13.462481]  panfrost_job_handle_irqs+0x358/0x570
[   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
[   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
[   13.463462]  panfrost_job_push+0x1bc/0x200
[   13.463760]  irq_thread_fn+0x28/0x98
[   13.464094]  panfrost_ioctl_submit+0x358/0x438
[   13.464225]  irq_thread+0x12c/0x230
[   13.464620]  drm_ioctl_kernel+0xb8/0x170
[   13.464909]  kthread+0x174/0x180
[   13.465319]  drm_ioctl+0x214/0x450
[   13.465617]  ret_from_fork+0x10/0x20
[   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
[   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
[   13.466756]  invoke_syscall+0x40/0xf8
[   13.466891] ---[ end trace ba74542f51246291 ]---
[   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
[   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
[   13.467883]  do_el0_svc+0x20/0x80
[   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
[   13.468678]  el0_svc+0x1c/0x50
[   13.475908]  el0t_64_sync_handler+0xa8/0xb0
[   13.476277]  el0t_64_sync+0x16c/0x170
[   13.476601] ---[ end trace ba74542f51246292 ]---
[   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
[   14.462959] sched: RT throttling activated
[   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
[   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
[   34.476312]  (t=5250 jiffies g=505 q=301)
[   34.476667] Task dump for CPU 3:
[   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   34.477820] Call trace:
[   34.478035]  dump_backtrace+0x0/0x198
[   34.478365]  show_stack+0x14/0x60
[   34.478659]  sched_show_task+0x148/0x168
[   34.479008]  dump_cpu_task+0x40/0x4c
[   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
[   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
[   34.480072]  update_process_times+0x94/0xd8
[   34.480440]  tick_sched_handle.isra.0+0x30/0x50
[   34.480840]  tick_sched_timer+0x48/0x98
[   34.481178]  __hrtimer_run_queues+0x110/0x1b0
[   34.481562]  hrtimer_interrupt+0xe4/0x238
[   34.481917]  arch_timer_handler_phys+0x28/0x40
[   34.482310]  handle_percpu_devid_irq+0x80/0x130
[   34.482710]  generic_handle_domain_irq+0x38/0x58
[   34.483116]  gic_handle_irq+0x4c/0x110
[   34.483450]  call_on_irq_stack+0x28/0x3c
[   34.483798]  do_interrupt_handler+0x78/0x80
[   34.484166]  el1_interrupt+0x34/0x80
[   34.484484]  el1h_64_irq_handler+0x14/0x20
[   34.484846]  el1h_64_irq+0x74/0x78
[   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
[   34.485568]  _raw_spin_lock+0x5c/0x68
[   34.485895]  panfrost_job_run+0x24c/0x3f8
[   34.486250]  drm_sched_main+0x130/0x390
[   34.486591]  kthread+0x174/0x180
[   34.486878]  ret_from_fork+0x10/0x20
[   35.810989] vcc3v3_lcd1_n: disabling
[   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
[   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
[   97.488326]  (t=21003 jiffies g=505 q=379)
[   97.488687] Task dump for CPU 3:
[   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[   97.489842] Call trace:
[   97.490056]  dump_backtrace+0x0/0x198
[   97.490388]  show_stack+0x14/0x60
[   97.490682]  sched_show_task+0x148/0x168
[   97.491030]  dump_cpu_task+0x40/0x4c
[   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
[   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
[   97.492095]  update_process_times+0x94/0xd8
[   97.492463]  tick_sched_handle.isra.0+0x30/0x50
[   97.492862]  tick_sched_timer+0x48/0x98
[   97.493200]  __hrtimer_run_queues+0x110/0x1b0
[   97.493582]  hrtimer_interrupt+0xe4/0x238
[   97.493937]  arch_timer_handler_phys+0x28/0x40
[   97.494330]  handle_percpu_devid_irq+0x80/0x130
[   97.494730]  generic_handle_domain_irq+0x38/0x58
[   97.495136]  gic_handle_irq+0x4c/0x110
[   97.495473]  call_on_irq_stack+0x28/0x3c
[   97.495818]  do_interrupt_handler+0x78/0x80
[   97.496186]  el1_interrupt+0x34/0x80
[   97.496503]  el1h_64_irq_handler+0x14/0x20
[   97.496865]  el1h_64_irq+0x74/0x78
[   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
[   97.497588]  _raw_spin_lock+0x5c/0x68
[   97.497912]  panfrost_job_run+0x24c/0x3f8
[   97.498268]  drm_sched_main+0x130/0x390
[   97.498607]  kthread+0x174/0x180
[   97.498895]  ret_from_fork+0x10/0x20
[  140.108141] random: crng init done
[  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
[  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
[  160.500322]  (t=36756 jiffies g=505 q=482)
[  160.500684] Task dump for CPU 3:
[  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  160.501837] Call trace:
[  160.502054]  dump_backtrace+0x0/0x198
[  160.502384]  show_stack+0x14/0x60
[  160.502679]  sched_show_task+0x148/0x168
[  160.503027]  dump_cpu_task+0x40/0x4c
[  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
[  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
[  160.504091]  update_process_times+0x94/0xd8
[  160.504458]  tick_sched_handle.isra.0+0x30/0x50
[  160.504858]  tick_sched_timer+0x48/0x98
[  160.505195]  __hrtimer_run_queues+0x110/0x1b0
[  160.505580]  hrtimer_interrupt+0xe4/0x238
[  160.505934]  arch_timer_handler_phys+0x28/0x40
[  160.506327]  handle_percpu_devid_irq+0x80/0x130
[  160.506727]  generic_handle_domain_irq+0x38/0x58
[  160.507133]  gic_handle_irq+0x4c/0x110
[  160.507467]  call_on_irq_stack+0x28/0x3c
[  160.507813]  do_interrupt_handler+0x78/0x80
[  160.508181]  el1_interrupt+0x34/0x80
[  160.508497]  el1h_64_irq_handler+0x14/0x20
[  160.508858]  el1h_64_irq+0x74/0x78
[  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
[  160.509579]  _raw_spin_lock+0x5c/0x68
[  160.509903]  panfrost_job_run+0x24c/0x3f8
[  160.510259]  drm_sched_main+0x130/0x390
[  160.510599]  kthread+0x174/0x180
[  160.510886]  ret_from_fork+0x10/0x20
[  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
[  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
[  223.512325]  (t=52509 jiffies g=505 q=536)
[  223.512688] Task dump for CPU 3:
[  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  223.513842] Call trace:
[  223.514056]  dump_backtrace+0x0/0x198
[  223.514387]  show_stack+0x14/0x60
[  223.514681]  sched_show_task+0x148/0x168
[  223.515029]  dump_cpu_task+0x40/0x4c
[  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
[  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
[  223.516094]  update_process_times+0x94/0xd8
[  223.516462]  tick_sched_handle.isra.0+0x30/0x50
[  223.516860]  tick_sched_timer+0x48/0x98
[  223.517198]  __hrtimer_run_queues+0x110/0x1b0
[  223.517582]  hrtimer_interrupt+0xe4/0x238
[  223.517935]  arch_timer_handler_phys+0x28/0x40
[  223.518327]  handle_percpu_devid_irq+0x80/0x130
[  223.518727]  generic_handle_domain_irq+0x38/0x58
[  223.519133]  gic_handle_irq+0x4c/0x110
[  223.519466]  call_on_irq_stack+0x28/0x3c
[  223.519812]  do_interrupt_handler+0x78/0x80
[  223.520181]  el1_interrupt+0x34/0x80
[  223.520498]  el1h_64_irq_handler+0x14/0x20
[  223.520860]  el1h_64_irq+0x74/0x78
[  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
[  223.521584]  _raw_spin_lock+0x5c/0x68
[  223.521908]  panfrost_job_run+0x24c/0x3f8
[  223.522264]  drm_sched_main+0x130/0x390
[  223.522605]  kthread+0x174/0x180
[  223.522892]  ret_from_fork+0x10/0x20
[  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
[  286.524325]  (t=68262 jiffies g=505 q=612)
[  286.524687] Task dump for CPU 3:
[  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  286.525840] Call trace:
[  286.526057]  dump_backtrace+0x0/0x198
[  286.526387]  show_stack+0x14/0x60
[  286.526681]  sched_show_task+0x148/0x168
[  286.527029]  dump_cpu_task+0x40/0x4c
[  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
[  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
[  286.528092]  update_process_times+0x94/0xd8
[  286.528459]  tick_sched_handle.isra.0+0x30/0x50
[  286.528859]  tick_sched_timer+0x48/0x98
[  286.529197]  __hrtimer_run_queues+0x110/0x1b0
[  286.529579]  hrtimer_interrupt+0xe4/0x238
[  286.529933]  arch_timer_handler_phys+0x28/0x40
[  286.530326]  handle_percpu_devid_irq+0x80/0x130
[  286.530726]  generic_handle_domain_irq+0x38/0x58
[  286.531132]  gic_handle_irq+0x4c/0x110
[  286.531466]  call_on_irq_stack+0x28/0x3c
[  286.531812]  do_interrupt_handler+0x78/0x80
[  286.532180]  el1_interrupt+0x34/0x80
[  286.532496]  el1h_64_irq_handler+0x14/0x20
[  286.532857]  el1h_64_irq+0x74/0x78
[  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
[  286.533580]  _raw_spin_lock+0x5c/0x68
[  286.533904]  panfrost_job_run+0x24c/0x3f8
[  286.534259]  drm_sched_main+0x130/0x390
[  286.534600]  kthread+0x174/0x180
[  286.534887]  ret_from_fork+0x10/0x20
[  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
[  349.536324]  (t=84015 jiffies g=505 q=716)
[  349.536687] Task dump for CPU 3:
[  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  349.537839] Call trace:
[  349.538055]  dump_backtrace+0x0/0x198
[  349.538387]  show_stack+0x14/0x60
[  349.538681]  sched_show_task+0x148/0x168
[  349.539029]  dump_cpu_task+0x40/0x4c
[  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
[  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
[  349.540094]  update_process_times+0x94/0xd8
[  349.540462]  tick_sched_handle.isra.0+0x30/0x50
[  349.540862]  tick_sched_timer+0x48/0x98
[  349.541201]  __hrtimer_run_queues+0x110/0x1b0
[  349.541585]  hrtimer_interrupt+0xe4/0x238
[  349.541937]  arch_timer_handler_phys+0x28/0x40
[  349.542330]  handle_percpu_devid_irq+0x80/0x130
[  349.542730]  generic_handle_domain_irq+0x38/0x58
[  349.543136]  gic_handle_irq+0x4c/0x110
[  349.543469]  call_on_irq_stack+0x28/0x3c
[  349.543815]  do_interrupt_handler+0x78/0x80
[  349.544183]  el1_interrupt+0x34/0x80
[  349.544500]  el1h_64_irq_handler+0x14/0x20
[  349.544862]  el1h_64_irq+0x74/0x78
[  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
[  349.545586]  _raw_spin_lock+0x5c/0x68
[  349.545910]  panfrost_job_run+0x24c/0x3f8
[  349.546265]  drm_sched_main+0x130/0x390
[  349.546604]  kthread+0x174/0x180
[  349.546891]  ret_from_fork+0x10/0x20
[  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
[  412.548325]  (t=99768 jiffies g=505 q=784)
[  412.548686] Task dump for CPU 3:
[  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  412.549841] Call trace:
[  412.550058]  dump_backtrace+0x0/0x198
[  412.550389]  show_stack+0x14/0x60
[  412.550684]  sched_show_task+0x148/0x168
[  412.551031]  dump_cpu_task+0x40/0x4c
[  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
[  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
[  412.552095]  update_process_times+0x94/0xd8
[  412.552463]  tick_sched_handle.isra.0+0x30/0x50
[  412.552863]  tick_sched_timer+0x48/0x98
[  412.553201]  __hrtimer_run_queues+0x110/0x1b0
[  412.553583]  hrtimer_interrupt+0xe4/0x238
[  412.553936]  arch_timer_handler_phys+0x28/0x40
[  412.554331]  handle_percpu_devid_irq+0x80/0x130
[  412.554732]  generic_handle_domain_irq+0x38/0x58
[  412.555139]  gic_handle_irq+0x4c/0x110
[  412.555471]  call_on_irq_stack+0x28/0x3c
[  412.555817]  do_interrupt_handler+0x78/0x80
[  412.556186]  el1_interrupt+0x34/0x80
[  412.556502]  el1h_64_irq_handler+0x14/0x20
[  412.556864]  el1h_64_irq+0x74/0x78
[  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
[  412.557587]  _raw_spin_lock+0x5c/0x68
[  412.557912]  panfrost_job_run+0x24c/0x3f8
[  412.558267]  drm_sched_main+0x130/0x390
[  412.558607]  kthread+0x174/0x180
[  412.558894]  ret_from_fork+0x10/0x20
[  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
[  475.560329]  (t=115521 jiffies g=505 q=857)
[  475.560697] Task dump for CPU 3:
[  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  475.561850] Call trace:
[  475.562067]  dump_backtrace+0x0/0x198
[  475.562398]  show_stack+0x14/0x60
[  475.562693]  sched_show_task+0x148/0x168
[  475.563041]  dump_cpu_task+0x40/0x4c
[  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
[  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
[  475.564104]  update_process_times+0x94/0xd8
[  475.564472]  tick_sched_handle.isra.0+0x30/0x50
[  475.564871]  tick_sched_timer+0x48/0x98
[  475.565209]  __hrtimer_run_queues+0x110/0x1b0
[  475.565592]  hrtimer_interrupt+0xe4/0x238
[  475.565946]  arch_timer_handler_phys+0x28/0x40
[  475.566339]  handle_percpu_devid_irq+0x80/0x130
[  475.566739]  generic_handle_domain_irq+0x38/0x58
[  475.567145]  gic_handle_irq+0x4c/0x110
[  475.567477]  call_on_irq_stack+0x28/0x3c
[  475.567822]  do_interrupt_handler+0x78/0x80
[  475.568190]  el1_interrupt+0x34/0x80
[  475.568507]  el1h_64_irq_handler+0x14/0x20
[  475.568869]  el1h_64_irq+0x74/0x78
[  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
[  475.569593]  _raw_spin_lock+0x5c/0x68
[  475.569915]  panfrost_job_run+0x24c/0x3f8
[  475.570270]  drm_sched_main+0x130/0x390
[  475.570610]  kthread+0x174/0x180
[  475.570897]  ret_from_fork+0x10/0x20
[  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
[  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
[  538.572333]  (t=131274 jiffies g=505 q=947)
[  538.572701] Task dump for CPU 3:
[  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  538.573854] Call trace:
[  538.574070]  dump_backtrace+0x0/0x198
[  538.574402]  show_stack+0x14/0x60
[  538.574696]  sched_show_task+0x148/0x168
[  538.575044]  dump_cpu_task+0x40/0x4c
[  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
[  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
[  538.576109]  update_process_times+0x94/0xd8
[  538.576477]  tick_sched_handle.isra.0+0x30/0x50
[  538.576878]  tick_sched_timer+0x48/0x98
[  538.577216]  __hrtimer_run_queues+0x110/0x1b0
[  538.577599]  hrtimer_interrupt+0xe4/0x238
[  538.577953]  arch_timer_handler_phys+0x28/0x40
[  538.578346]  handle_percpu_devid_irq+0x80/0x130
[  538.578745]  generic_handle_domain_irq+0x38/0x58
[  538.579151]  gic_handle_irq+0x4c/0x110
[  538.579487]  call_on_irq_stack+0x28/0x3c
[  538.579833]  do_interrupt_handler+0x78/0x80
[  538.580201]  el1_interrupt+0x34/0x80
[  538.580518]  el1h_64_irq_handler+0x14/0x20
[  538.580880]  el1h_64_irq+0x74/0x78
[  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
[  538.581603]  _raw_spin_lock+0x5c/0x68
[  538.581927]  panfrost_job_run+0x24c/0x3f8
[  538.582283]  drm_sched_main+0x130/0x390
[  538.582623]  kthread+0x174/0x180
[  538.582910]  ret_from_fork+0x10/0x20
[  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
[  601.584330]  (t=147027 jiffies g=505 q=1018)
[  601.584706] Task dump for CPU 3:
[  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  601.585859] Call trace:
[  601.586075]  dump_backtrace+0x0/0x198
[  601.586406]  show_stack+0x14/0x60
[  601.586701]  sched_show_task+0x148/0x168
[  601.587048]  dump_cpu_task+0x40/0x4c
[  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
[  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
[  601.588112]  update_process_times+0x94/0xd8
[  601.588480]  tick_sched_handle.isra.0+0x30/0x50
[  601.588880]  tick_sched_timer+0x48/0x98
[  601.589218]  __hrtimer_run_queues+0x110/0x1b0
[  601.589602]  hrtimer_interrupt+0xe4/0x238
[  601.589956]  arch_timer_handler_phys+0x28/0x40
[  601.590348]  handle_percpu_devid_irq+0x80/0x130
[  601.590747]  generic_handle_domain_irq+0x38/0x58
[  601.591153]  gic_handle_irq+0x4c/0x110
[  601.591486]  call_on_irq_stack+0x28/0x3c
[  601.591832]  do_interrupt_handler+0x78/0x80
[  601.592201]  el1_interrupt+0x34/0x80
[  601.592517]  el1h_64_irq_handler+0x14/0x20
[  601.592879]  el1h_64_irq+0x74/0x78
[  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
[  601.593603]  _raw_spin_lock+0x5c/0x68
[  601.593927]  panfrost_job_run+0x24c/0x3f8
[  601.594283]  drm_sched_main+0x130/0x390
[  601.594623]  kthread+0x174/0x180
[  601.594910]  ret_from_fork+0x10/0x20
[  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
[  664.596333]  (t=162780 jiffies g=505 q=1086)
[  664.596709] Task dump for CPU 3:
[  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  664.597862] Call trace:
[  664.598078]  dump_backtrace+0x0/0x198
[  664.598409]  show_stack+0x14/0x60
[  664.598704]  sched_show_task+0x148/0x168
[  664.599052]  dump_cpu_task+0x40/0x4c
[  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
[  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
[  664.600114]  update_process_times+0x94/0xd8
[  664.600482]  tick_sched_handle.isra.0+0x30/0x50
[  664.600882]  tick_sched_timer+0x48/0x98
[  664.601220]  __hrtimer_run_queues+0x110/0x1b0
[  664.601604]  hrtimer_interrupt+0xe4/0x238
[  664.601958]  arch_timer_handler_phys+0x28/0x40
[  664.602352]  handle_percpu_devid_irq+0x80/0x130
[  664.602751]  generic_handle_domain_irq+0x38/0x58
[  664.603158]  gic_handle_irq+0x4c/0x110
[  664.603491]  call_on_irq_stack+0x28/0x3c
[  664.603838]  do_interrupt_handler+0x78/0x80
[  664.604206]  el1_interrupt+0x34/0x80
[  664.604522]  el1h_64_irq_handler+0x14/0x20
[  664.604883]  el1h_64_irq+0x74/0x78
[  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
[  664.605609]  _raw_spin_lock+0x5c/0x68
[  664.605934]  panfrost_job_run+0x24c/0x3f8
[  664.606290]  drm_sched_main+0x130/0x390
[  664.606631]  kthread+0x174/0x180
[  664.606918]  ret_from_fork+0x10/0x20
[  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
[  727.608331]  (t=178533 jiffies g=505 q=1152)
[  727.608706] Task dump for CPU 3:
[  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  727.609858] Call trace:
[  727.610074]  dump_backtrace+0x0/0x198
[  727.610403]  show_stack+0x14/0x60
[  727.610698]  sched_show_task+0x148/0x168
[  727.611047]  dump_cpu_task+0x40/0x4c
[  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
[  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
[  727.612112]  update_process_times+0x94/0xd8
[  727.612479]  tick_sched_handle.isra.0+0x30/0x50
[  727.612879]  tick_sched_timer+0x48/0x98
[  727.613216]  __hrtimer_run_queues+0x110/0x1b0
[  727.613601]  hrtimer_interrupt+0xe4/0x238
[  727.613955]  arch_timer_handler_phys+0x28/0x40
[  727.614348]  handle_percpu_devid_irq+0x80/0x130
[  727.614748]  generic_handle_domain_irq+0x38/0x58
[  727.615154]  gic_handle_irq+0x4c/0x110
[  727.615485]  call_on_irq_stack+0x28/0x3c
[  727.615832]  do_interrupt_handler+0x78/0x80
[  727.616200]  el1_interrupt+0x34/0x80
[  727.616517]  el1h_64_irq_handler+0x14/0x20
[  727.616879]  el1h_64_irq+0x74/0x78
[  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
[  727.617602]  _raw_spin_lock+0x5c/0x68
[  727.617926]  panfrost_job_run+0x24c/0x3f8
[  727.618282]  drm_sched_main+0x130/0x390
[  727.618621]  kthread+0x174/0x180
[  727.618908]  ret_from_fork+0x10/0x20
[  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
[  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
[  790.620331]  (t=194286 jiffies g=505 q=1219)
[  790.620708] Task dump for CPU 3:
[  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  790.621860] Call trace:
[  790.622075]  dump_backtrace+0x0/0x198
[  790.622405]  show_stack+0x14/0x60
[  790.622699]  sched_show_task+0x148/0x168
[  790.623049]  dump_cpu_task+0x40/0x4c
[  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
[  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
[  790.624113]  update_process_times+0x94/0xd8
[  790.624481]  tick_sched_handle.isra.0+0x30/0x50
[  790.624880]  tick_sched_timer+0x48/0x98
[  790.625218]  __hrtimer_run_queues+0x110/0x1b0
[  790.625603]  hrtimer_interrupt+0xe4/0x238
[  790.625957]  arch_timer_handler_phys+0x28/0x40
[  790.626350]  handle_percpu_devid_irq+0x80/0x130
[  790.626752]  generic_handle_domain_irq+0x38/0x58
[  790.627158]  gic_handle_irq+0x4c/0x110
[  790.627493]  call_on_irq_stack+0x28/0x3c
[  790.627839]  do_interrupt_handler+0x78/0x80
[  790.628208]  el1_interrupt+0x34/0x80
[  790.628526]  el1h_64_irq_handler+0x14/0x20
[  790.628888]  el1h_64_irq+0x74/0x78
[  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
[  790.629613]  _raw_spin_lock+0x5c/0x68
[  790.629937]  panfrost_job_run+0x24c/0x3f8
[  790.630292]  drm_sched_main+0x130/0x390
[  790.630632]  kthread+0x174/0x180
[  790.630919]  ret_from_fork+0x10/0x20
[  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
[  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
[  853.632340]  (t=210039 jiffies g=505 q=1318)
[  853.632716] Task dump for CPU 3:
[  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  853.633869] Call trace:
[  853.634084]  dump_backtrace+0x0/0x198
[  853.634418]  show_stack+0x14/0x60
[  853.634712]  sched_show_task+0x148/0x168
[  853.635061]  dump_cpu_task+0x40/0x4c
[  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
[  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
[  853.636124]  update_process_times+0x94/0xd8
[  853.636492]  tick_sched_handle.isra.0+0x30/0x50
[  853.636892]  tick_sched_timer+0x48/0x98
[  853.637230]  __hrtimer_run_queues+0x110/0x1b0
[  853.637613]  hrtimer_interrupt+0xe4/0x238
[  853.637965]  arch_timer_handler_phys+0x28/0x40
[  853.638358]  handle_percpu_devid_irq+0x80/0x130
[  853.638760]  generic_handle_domain_irq+0x38/0x58
[  853.639166]  gic_handle_irq+0x4c/0x110
[  853.639499]  call_on_irq_stack+0x28/0x3c
[  853.639845]  do_interrupt_handler+0x78/0x80
[  853.640213]  el1_interrupt+0x34/0x80
[  853.640530]  el1h_64_irq_handler+0x14/0x20
[  853.640892]  el1h_64_irq+0x74/0x78
[  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
[  853.641616]  _raw_spin_lock+0x5c/0x68
[  853.641940]  panfrost_job_run+0x24c/0x3f8
[  853.642295]  drm_sched_main+0x130/0x390
[  853.642634]  kthread+0x174/0x180
[  853.642921]  ret_from_fork+0x10/0x20
[  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
[  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
[  916.644339]  (t=225792 jiffies g=505 q=1390)
[  916.644715] Task dump for CPU 3:
[  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
[  916.645868] Call trace:
[  916.646083]  dump_backtrace+0x0/0x198
[  916.646414]  show_stack+0x14/0x60
[  916.646708]  sched_show_task+0x148/0x168
[  916.647055]  dump_cpu_task+0x40/0x4c
[  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
[  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
[  916.648119]  update_process_times+0x94/0xd8
[  916.648488]  tick_sched_handle.isra.0+0x30/0x50
[  916.648887]  tick_sched_timer+0x48/0x98
[  916.649225]  __hrtimer_run_queues+0x110/0x1b0
[  916.649608]  hrtimer_interrupt+0xe4/0x238
[  916.649962]  arch_timer_handler_phys+0x28/0x40
[  916.650355]  handle_percpu_devid_irq+0x80/0x130
[  916.650756]  generic_handle_domain_irq+0x38/0x58
[  916.651162]  gic_handle_irq+0x4c/0x110
[  916.651495]  call_on_irq_stack+0x28/0x3c
[  916.651842]  do_interrupt_handler+0x78/0x80
[  916.652210]  el1_interrupt+0x34/0x80
[  916.652527]  el1h_64_irq_handler+0x14/0x20
[  916.652889]  el1h_64_irq+0x74/0x78
[  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
[  916.653614]  _raw_spin_lock+0x5c/0x68
[  916.653937]  panfrost_job_run+0x24c/0x3f8
[  916.654293]  drm_sched_main+0x130/0x390
[  916.654632]  kthread+0x174/0x180
[  916.654920]  ret_from_fork+0x10/0x20

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-11-15 14:03     ` [Intel-gfx] " Sascha Hauer
  (?)
@ 2021-11-15 14:08       ` Daniel Vetter
  -1 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-11-15 14:08 UTC (permalink / raw)
  To: Sascha Hauer
  Cc: Christian König, linaro-mm-sig, dri-devel, linux-media,
	intel-gfx, daniel

On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> Hi,
> 
> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > Simplifying the code a bit.
> > 
> > v2: use dma_resv_for_each_fence
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> >  1 file changed, 6 insertions(+), 20 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index 042c16b5d54a..5bc5f775abe1 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> >  					    struct drm_gem_object *obj,
> >  					    bool write)
> >  {
> > +	struct dma_resv_iter cursor;
> > +	struct dma_fence *fence;
> >  	int ret;
> > -	struct dma_fence **fences;
> > -	unsigned int i, fence_count;
> > -
> > -	if (!write) {
> > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > -
> > -		return drm_sched_job_add_dependency(job, fence);
> > -	}
> > -
> > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > -	if (ret || !fence_count)
> > -		return ret;
> >  
> > -	for (i = 0; i < fence_count; i++) {
> > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > +		ret = drm_sched_job_add_dependency(job, fence);
> >  		if (ret)
> > -			break;
> > +			return ret;
> >  	}
> > -
> > -	for (; i < fence_count; i++)
> > -		dma_fence_put(fences[i]);
> > -	kfree(fences);
> > -	return ret;
> > +	return 0;
> >  }
> >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> >  
> 
> This patch lets the panfrost driver explode on v5.16-rc1 with the
> following. I didn't bisect it, but it goes away when I revert this
> patch. I only started weston, nothing more.
> 
> Any idea what goes wrong here?

Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
patch so it missed -rc1.

Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.
-Daniel

> 
> Sascha
> 
> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.514056] Modules linked in:
> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.514621] ------------[ cut here ]------------
> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.515049] pc : dma_fence_release+0xac/0xe8
> [   12.515056] lr : dma_fence_release+0xac/0xe8
> [   12.515061] sp : ffff8000123ebb20
> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
> [   12.515518] refcount_t: addition on 0; use-after-free.
> [   12.516015]  x27: 0000000000000000
> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
> [   12.516992] x26: 0000000000000001
> [   12.517366] Modules linked in:
> [   12.517654]  x25: ffff000004b051c0
> [   12.518108]
> [   12.518555]  x24: 0000000000000000
> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.519576]
> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.520133] x23: 0000000000000000
> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.520559]  x22: ffff800010d41b78
> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
> [   12.521625]  x21: ffff000004b05050
> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
> [   12.522299]
> [   12.522588] sp : ffff8000122b3bc0
> [   12.523192] x20: ffff000004b05040
> [   12.523489] x29: ffff8000122b3bc0
> [   12.523906]  x19: ffff000004b05078
> [   12.524203]  x28: 0000000000000000
> [   12.524620]  x18: 0000000000000010
> [   12.524751]  x27: ffff000003791880
> [   12.525040]
> [   12.525329]
> [   12.525618] x17: 0000000000000000
> [   12.525915] x26: ffff8000122b3d30
> [   12.526212]  x16: 0000000000000000
> [   12.526509]  x25: 0000000000000001
> [   12.526806]  x15: ffff0000050e2dc0
> [   12.526937]  x24: ffff000003791a10
> [   12.527067]
> [   12.527357]
> [   12.527646] x14: 00000000000001b5
> [   12.527942] x23: 0000000000000000
> [   12.528240]  x13: ffff0000050e2dc0
> [   12.528536]  x22: ffff000003505280
> [   12.528833]  x12: 00000000ffffffea
> [   12.528964]  x21: ffff000003a2a220
> [   12.529095]
> [   12.529384]
> [   12.529673] x11: ffff800011761ec8
> [   12.529970] x20: ffff000004b05078
> [   12.530267]  x10: ffff8000115e1e88
> [   12.530564]  x19: ffff000004b05000
> [   12.530861]  x9 : ffff8000115e1ee0
> [   12.530992]  x18: 0000000000000010
> [   12.531123]
> [   12.531412]
> [   12.531701] x8 : 000000000017ffe8
> [   12.531998] x17: 0000000000500600
> [   12.532294]  x7 : c0000000fffeffff
> [   12.532591]  x16: 0000000000000000
> [   12.532888]  x6 : 0000000000000001
> [   12.533019]  x15: ffff000003505700
> [   12.533150]
> [   12.533439]
> [   12.533728] x5 : ffff00007fb8c9a0
> [   12.534025] x14: 0000000000000000
> [   12.534322]  x4 : 0000000000000000
> [   12.534619]  x13: 292d2d3d45505954
> [   12.534914]  x3 : 0000000000000001
> [   12.535045]  x12: 4220534253532d20
> [   12.535176]
> [   12.535465]
> [   12.535754] x2 : ffff00007fb8c9a8
> [   12.536051] x11: 5449442d204f4354
> [   12.536347]  x1 : ea6e0584a53f2200
> [   12.536643]  x10: 2d204f41552d204e
> [   12.536941]  x0 : 0000000000000000
> [   12.537073]  x9 : 4e41502b20666961
> [   12.537203]
> [   12.537492]
> [   12.537782] Call trace:
> [   12.538078] x8 : 642076635a6e2820
> [   12.538377]  dma_fence_release+0xac/0xe8
> [   12.538671]  x7 : 205d343430353135
> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.539098]  x6 : 352e32312020205b
> [   12.539230]  panfrost_job_push+0x1bc/0x200
> [   12.539442]
> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
> [   12.540073] x5 : ffff00007fb539a0
> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
> [   12.540771]  x4 : 0000000000000000
> [   12.541069]  drm_ioctl+0x214/0x450
> [   12.541424]  x3 : 0000000000000001
> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.541943]
> [   12.542233]  invoke_syscall+0x40/0xf8
> [   12.542573] x2 : ffff00007fb539a8
> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.543167]  x1 : 0ac4fb7a0680bb00
> [   12.543465]  do_el0_svc+0x20/0x80
> [   12.543805]  x0 : 0000000000000000
> [   12.543936]  el0_svc+0x1c/0x50
> [   12.544255]
> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
> [   12.544955] Call trace:
> [   12.545250]  el0t_64_sync+0x16c/0x170
> [   12.545540]  refcount_warn_saturate+0x98/0x140
> [   12.545837] ---[ end trace ba74542f51246288 ]---
> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
> [   12.546285] ------------[ cut here ]------------
> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
> [   12.546813] refcount_t: underflow; use-after-free.
> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
> [   12.547920]  dma_fence_signal_locked+0x20/0x30
> [   12.548336] Modules linked in:
> [   12.548737]  panfrost_job_handle_done+0x34/0x50
> [   12.549110]
> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.551373]  irq_thread_fn+0x28/0x98
> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.551899]  irq_thread+0x12c/0x230
> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
> [   12.553131]  kthread+0x174/0x180
> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
> [   12.554121]  ret_from_fork+0x10/0x20
> [   12.554432] sp : ffff8000123ebaa0
> [   12.555038] ---[ end trace ba74542f51246289 ]---
> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.563733] Call trace:
> [   12.563950]  refcount_warn_saturate+0xec/0x140
> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.565216]  dma_fence_release+0xd4/0xe8
> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.565970]  panfrost_job_push+0x1bc/0x200
> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
> [   12.567072]  drm_ioctl+0x214/0x450
> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.567721]  invoke_syscall+0x40/0xf8
> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.568463]  do_el0_svc+0x20/0x80
> [   12.568755]  el0_svc+0x1c/0x50
> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
> [   12.569399]  el0t_64_sync+0x16c/0x170
> [   12.569724] ---[ end trace ba74542f5124628a ]---
> [   12.595086] ------------[ cut here ]------------
> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.596934] Modules linked in:
> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.599202] pc : dma_fence_release+0xac/0xe8
> [   12.599584] lr : dma_fence_release+0xac/0xe8
> [   12.599960] sp : ffff8000123ebb20
> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.606542] Call trace:
> [   12.606760]  dma_fence_release+0xac/0xe8
> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.607517]  panfrost_job_push+0x1bc/0x200
> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
> [   12.608622]  drm_ioctl+0x214/0x450
> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.609269]  invoke_syscall+0x40/0xf8
> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.610011]  do_el0_svc+0x20/0x80
> [   12.610304]  el0_svc+0x1c/0x50
> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
> [   12.610946]  el0t_64_sync+0x16c/0x170
> [   12.611276] ---[ end trace ba74542f5124628b ]---
> [   12.612869] ------------[ cut here ]------------
> [   12.613288] refcount_t: saturated; leaking memory.
> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
> [   12.614476] Modules linked in:
> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
> [   12.617622] sp : ffff8000123eba60
> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.624256] Call trace:
> [   12.624474]  refcount_warn_saturate+0xc0/0x140
> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
> [   12.627397]  drm_ioctl+0x214/0x450
> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.628046]  invoke_syscall+0x40/0xf8
> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.628787]  do_el0_svc+0x20/0x80
> [   12.629079]  el0_svc+0x1c/0x50
> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
> [   12.629723]  el0t_64_sync+0x16c/0x170
> [   12.630048] ---[ end trace ba74542f5124628c ]---
> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
> [   13.296693] ------------[ cut here ]------------
> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.298560] Modules linked in:
> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.300830] pc : dma_fence_release+0xac/0xe8
> [   13.301208] lr : dma_fence_release+0xac/0xe8
> [   13.301585] sp : ffff8000123ebb20
> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.308149] Call trace:
> [   13.308367]  dma_fence_release+0xac/0xe8
> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.309119]  panfrost_job_push+0x1bc/0x200
> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
> [   13.310221]  drm_ioctl+0x214/0x450
> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.310868]  invoke_syscall+0x40/0xf8
> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.311609]  do_el0_svc+0x20/0x80
> [   13.311903]  el0_svc+0x1c/0x50
> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
> [   13.312545]  el0t_64_sync+0x16c/0x170
> [   13.312869] ---[ end trace ba74542f5124628d ]---
> [   13.340454] ------------[ cut here ]------------
> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.342318] Modules linked in:
> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.344584] pc : dma_fence_release+0xac/0xe8
> [   13.344961] lr : dma_fence_release+0xac/0xe8
> [   13.345338] sp : ffff8000123ebb20
> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.351900] Call trace:
> [   13.352116]  dma_fence_release+0xac/0xe8
> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.352869]  panfrost_job_push+0x1bc/0x200
> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
> [   13.353971]  drm_ioctl+0x214/0x450
> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.354616]  invoke_syscall+0x40/0xf8
> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.355356]  do_el0_svc+0x20/0x80
> [   13.355650]  el0_svc+0x1c/0x50
> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
> [   13.356293]  el0t_64_sync+0x16c/0x170
> [   13.356618] ---[ end trace ba74542f5124628e ]---
> [   13.379841] ------------[ cut here ]------------
> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.381680] Modules linked in:
> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.383937] pc : dma_fence_release+0xac/0xe8
> [   13.384314] lr : dma_fence_release+0xac/0xe8
> [   13.384690] sp : ffff8000123ebb20
> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.391247] Call trace:
> [   13.391464]  dma_fence_release+0xac/0xe8
> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.392217]  panfrost_job_push+0x1bc/0x200
> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
> [   13.393319]  drm_ioctl+0x214/0x450
> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.393967]  invoke_syscall+0x40/0xf8
> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.394708]  do_el0_svc+0x20/0x80
> [   13.395002]  el0_svc+0x1c/0x50
> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
> [   13.395643]  el0t_64_sync+0x16c/0x170
> [   13.395968] ---[ end trace ba74542f5124628f ]---
> [   13.398130] ------------[ cut here ]------------
> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.400011] Modules linked in:
> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.402269] pc : dma_fence_release+0xac/0xe8
> [   13.402646] lr : dma_fence_release+0xac/0xe8
> [   13.403024] sp : ffff8000123ebb20
> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.409583] Call trace:
> [   13.409800]  dma_fence_release+0xac/0xe8
> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.410553]  panfrost_job_push+0x1bc/0x200
> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
> [   13.411656]  drm_ioctl+0x214/0x450
> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.412303]  invoke_syscall+0x40/0xf8
> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.413042]  do_el0_svc+0x20/0x80
> [   13.413335]  el0_svc+0x1c/0x50
> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
> [   13.413976]  el0t_64_sync+0x16c/0x170
> [   13.414298] ---[ end trace ba74542f51246290 ]---
> [   13.430129] ------------[ cut here ]------------
> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
> [   13.430557] refcount_t: saturated; leaking memory.
> [   13.431321] Mem abort info:
> [   13.431324]   ESR = 0x96000044
> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
> [   13.431330]   SET = 0, FnV = 0
> [   13.431333]   EA = 0, S1PTW = 0
> [   13.431335]   FSC = 0x04: level 0 translation fault
> [   13.431337] Data abort info:
> [   13.431339]   ISV = 0, ISS = 0x00000044
> [   13.431340]   CM = 0, WnR = 1
> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
> [   13.431359] Modules linked in:
> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
> [   13.432059] lr : dma_fence_signal+0x30/0x60
> [   13.432327] Modules linked in:
> [   13.432789] sp : ffff8000122b3b50
> [   13.433057]
> [   13.433331] x29: ffff8000122b3b50
> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.434008]  x28: 0000000000000000
> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.434601]  x27: ffff000003791880
> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.435751]
> [   13.435753] x26: ffff8000122b3d30
> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
> [   13.436504]  x25: 0000000000000001
> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
> [   13.437938]  x24: ffff000003791a10
> [   13.438542] sp : ffff8000123ebb40
> [   13.439042]
> [   13.439767] x29: ffff8000123ebb40
> [   13.440130] x23: 0000000000000000
> [   13.440398]  x28: ffff8000123ebd58
> [   13.440687]  x22: ffff000003505280
> [   13.440819]  x27: 0000000000000000
> [   13.441108]  x21: ffff8000122b3b88
> [   13.441931]
> [   13.442228]
> [   13.442773] x26: 0000000000000001
> [   13.443070] x20: ffff000004b051c0
> [   13.443674]  x25: ffff000004b051c0
> [   13.443806]  x19: ffff000004b051c0
> [   13.444095]  x24: 0000000000000000
> [   13.444513]  x18: 0000000000000000
> [   13.444811]
> [   13.445227]
> [   13.445524] x23: 0000000000000000
> [   13.445814] x17: 3837783028203032
> [   13.445945]  x22: ffff000004b051c0
> [   13.446236]  x16: 3139323835323120
> [   13.446525]  x21: ffff000004d73100
> [   13.446822]  x15: 00000205aa24947a
> [   13.447120]
> [   13.447417]
> [   13.447715] x20: ffff000004b05400
> [   13.447846] x14: 0000000000000326
> [   13.447977]  x19: 00000000ffffffff
> [   13.448266]  x13: 0000000000000000
> [   13.448555]  x18: 0000000000000010
> [   13.448851]  x12: 0000000000000000
> [   13.449148]
> [   13.449446]
> [   13.449743] x17: 0000000000000000
> [   13.449874] x11: 0000000000000001
> [   13.450006]  x16: 0000000000000000
> [   13.450296]  x10: ffff8000122b3d48
> [   13.450585]  x15: 000060978994e822
> [   13.450882]  x9 : 00000000000019e0
> [   13.451179]
> [   13.451477]
> [   13.451774] x14: 00000000000000b6
> [   13.451905] x8 : ffff8000122b3d78
> [   13.452037]  x13: 00000000000000b6
> [   13.452326]  x7 : 0000000000000000
> [   13.452614]  x12: 0000000000000000
> [   13.452912]  x6 : 000000001fcf847e
> [   13.453209]
> [   13.453506]
> [   13.453803] x11: 0000000000000001
> [   13.453934] x5 : 00ffffffffffffff
> [   13.454066]  x10: 00000000000009a0
> [   13.454356]  x4 : 0015ef3c03fd7c00
> [   13.454643]  x9 : ffff8000123eb8c0
> [   13.454941]  x3 : 0000000000000018
> [   13.455238]
> [   13.455536]
> [   13.455833] x8 : ffff0000050e3340
> [   13.455965] x2 : ffff000004b051f0
> [   13.456096]  x7 : ffff00007fb92a80
> [   13.456386]  x1 : 000000032053be4d
> [   13.456676]  x6 : 0000000000000115
> [   13.456973]  x0 : 0000000000000000
> [   13.457271]
> [   13.457568]
> [   13.457866] x5 : 0000000000000000
> [   13.457998] Call trace:
> [   13.458128]  x4 : ffff00007fb8c9a0
> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.458707]  x3 : ffff00007fb8f950
> [   13.459005]  dma_fence_signal+0x30/0x60
> [   13.459302]
> [   13.459600]  drm_sched_fence_finished+0x10/0x18
> [   13.459897] x2 : ffff00007fb8c9a0
> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
> [   13.460159]  x1 : ea6e0584a53f2200
> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
> [   13.460738]  x0 : 0000000000000000
> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   13.461333]
> [   13.461631]  dma_fence_signal_locked+0x20/0x30
> [   13.461929] Call trace:
> [   13.462060]  panfrost_job_handle_done+0x34/0x50
> [   13.462192]  refcount_warn_saturate+0x6c/0x140
> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
> [   13.463462]  panfrost_job_push+0x1bc/0x200
> [   13.463760]  irq_thread_fn+0x28/0x98
> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
> [   13.464225]  irq_thread+0x12c/0x230
> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
> [   13.464909]  kthread+0x174/0x180
> [   13.465319]  drm_ioctl+0x214/0x450
> [   13.465617]  ret_from_fork+0x10/0x20
> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
> [   13.466756]  invoke_syscall+0x40/0xf8
> [   13.466891] ---[ end trace ba74542f51246291 ]---
> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
> [   13.467883]  do_el0_svc+0x20/0x80
> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
> [   13.468678]  el0_svc+0x1c/0x50
> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
> [   13.476277]  el0t_64_sync+0x16c/0x170
> [   13.476601] ---[ end trace ba74542f51246292 ]---
> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
> [   14.462959] sched: RT throttling activated
> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
> [   34.476312]  (t=5250 jiffies g=505 q=301)
> [   34.476667] Task dump for CPU 3:
> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   34.477820] Call trace:
> [   34.478035]  dump_backtrace+0x0/0x198
> [   34.478365]  show_stack+0x14/0x60
> [   34.478659]  sched_show_task+0x148/0x168
> [   34.479008]  dump_cpu_task+0x40/0x4c
> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
> [   34.480072]  update_process_times+0x94/0xd8
> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
> [   34.480840]  tick_sched_timer+0x48/0x98
> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
> [   34.481562]  hrtimer_interrupt+0xe4/0x238
> [   34.481917]  arch_timer_handler_phys+0x28/0x40
> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
> [   34.482710]  generic_handle_domain_irq+0x38/0x58
> [   34.483116]  gic_handle_irq+0x4c/0x110
> [   34.483450]  call_on_irq_stack+0x28/0x3c
> [   34.483798]  do_interrupt_handler+0x78/0x80
> [   34.484166]  el1_interrupt+0x34/0x80
> [   34.484484]  el1h_64_irq_handler+0x14/0x20
> [   34.484846]  el1h_64_irq+0x74/0x78
> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
> [   34.485568]  _raw_spin_lock+0x5c/0x68
> [   34.485895]  panfrost_job_run+0x24c/0x3f8
> [   34.486250]  drm_sched_main+0x130/0x390
> [   34.486591]  kthread+0x174/0x180
> [   34.486878]  ret_from_fork+0x10/0x20
> [   35.810989] vcc3v3_lcd1_n: disabling
> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
> [   97.488326]  (t=21003 jiffies g=505 q=379)
> [   97.488687] Task dump for CPU 3:
> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   97.489842] Call trace:
> [   97.490056]  dump_backtrace+0x0/0x198
> [   97.490388]  show_stack+0x14/0x60
> [   97.490682]  sched_show_task+0x148/0x168
> [   97.491030]  dump_cpu_task+0x40/0x4c
> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
> [   97.492095]  update_process_times+0x94/0xd8
> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
> [   97.492862]  tick_sched_timer+0x48/0x98
> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
> [   97.493582]  hrtimer_interrupt+0xe4/0x238
> [   97.493937]  arch_timer_handler_phys+0x28/0x40
> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
> [   97.494730]  generic_handle_domain_irq+0x38/0x58
> [   97.495136]  gic_handle_irq+0x4c/0x110
> [   97.495473]  call_on_irq_stack+0x28/0x3c
> [   97.495818]  do_interrupt_handler+0x78/0x80
> [   97.496186]  el1_interrupt+0x34/0x80
> [   97.496503]  el1h_64_irq_handler+0x14/0x20
> [   97.496865]  el1h_64_irq+0x74/0x78
> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
> [   97.497588]  _raw_spin_lock+0x5c/0x68
> [   97.497912]  panfrost_job_run+0x24c/0x3f8
> [   97.498268]  drm_sched_main+0x130/0x390
> [   97.498607]  kthread+0x174/0x180
> [   97.498895]  ret_from_fork+0x10/0x20
> [  140.108141] random: crng init done
> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
> [  160.500322]  (t=36756 jiffies g=505 q=482)
> [  160.500684] Task dump for CPU 3:
> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  160.501837] Call trace:
> [  160.502054]  dump_backtrace+0x0/0x198
> [  160.502384]  show_stack+0x14/0x60
> [  160.502679]  sched_show_task+0x148/0x168
> [  160.503027]  dump_cpu_task+0x40/0x4c
> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
> [  160.504091]  update_process_times+0x94/0xd8
> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
> [  160.504858]  tick_sched_timer+0x48/0x98
> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
> [  160.505580]  hrtimer_interrupt+0xe4/0x238
> [  160.505934]  arch_timer_handler_phys+0x28/0x40
> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
> [  160.506727]  generic_handle_domain_irq+0x38/0x58
> [  160.507133]  gic_handle_irq+0x4c/0x110
> [  160.507467]  call_on_irq_stack+0x28/0x3c
> [  160.507813]  do_interrupt_handler+0x78/0x80
> [  160.508181]  el1_interrupt+0x34/0x80
> [  160.508497]  el1h_64_irq_handler+0x14/0x20
> [  160.508858]  el1h_64_irq+0x74/0x78
> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
> [  160.509579]  _raw_spin_lock+0x5c/0x68
> [  160.509903]  panfrost_job_run+0x24c/0x3f8
> [  160.510259]  drm_sched_main+0x130/0x390
> [  160.510599]  kthread+0x174/0x180
> [  160.510886]  ret_from_fork+0x10/0x20
> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
> [  223.512325]  (t=52509 jiffies g=505 q=536)
> [  223.512688] Task dump for CPU 3:
> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  223.513842] Call trace:
> [  223.514056]  dump_backtrace+0x0/0x198
> [  223.514387]  show_stack+0x14/0x60
> [  223.514681]  sched_show_task+0x148/0x168
> [  223.515029]  dump_cpu_task+0x40/0x4c
> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  223.516094]  update_process_times+0x94/0xd8
> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
> [  223.516860]  tick_sched_timer+0x48/0x98
> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
> [  223.517582]  hrtimer_interrupt+0xe4/0x238
> [  223.517935]  arch_timer_handler_phys+0x28/0x40
> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
> [  223.518727]  generic_handle_domain_irq+0x38/0x58
> [  223.519133]  gic_handle_irq+0x4c/0x110
> [  223.519466]  call_on_irq_stack+0x28/0x3c
> [  223.519812]  do_interrupt_handler+0x78/0x80
> [  223.520181]  el1_interrupt+0x34/0x80
> [  223.520498]  el1h_64_irq_handler+0x14/0x20
> [  223.520860]  el1h_64_irq+0x74/0x78
> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
> [  223.521584]  _raw_spin_lock+0x5c/0x68
> [  223.521908]  panfrost_job_run+0x24c/0x3f8
> [  223.522264]  drm_sched_main+0x130/0x390
> [  223.522605]  kthread+0x174/0x180
> [  223.522892]  ret_from_fork+0x10/0x20
> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
> [  286.524325]  (t=68262 jiffies g=505 q=612)
> [  286.524687] Task dump for CPU 3:
> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  286.525840] Call trace:
> [  286.526057]  dump_backtrace+0x0/0x198
> [  286.526387]  show_stack+0x14/0x60
> [  286.526681]  sched_show_task+0x148/0x168
> [  286.527029]  dump_cpu_task+0x40/0x4c
> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
> [  286.528092]  update_process_times+0x94/0xd8
> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
> [  286.528859]  tick_sched_timer+0x48/0x98
> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
> [  286.529579]  hrtimer_interrupt+0xe4/0x238
> [  286.529933]  arch_timer_handler_phys+0x28/0x40
> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
> [  286.530726]  generic_handle_domain_irq+0x38/0x58
> [  286.531132]  gic_handle_irq+0x4c/0x110
> [  286.531466]  call_on_irq_stack+0x28/0x3c
> [  286.531812]  do_interrupt_handler+0x78/0x80
> [  286.532180]  el1_interrupt+0x34/0x80
> [  286.532496]  el1h_64_irq_handler+0x14/0x20
> [  286.532857]  el1h_64_irq+0x74/0x78
> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
> [  286.533580]  _raw_spin_lock+0x5c/0x68
> [  286.533904]  panfrost_job_run+0x24c/0x3f8
> [  286.534259]  drm_sched_main+0x130/0x390
> [  286.534600]  kthread+0x174/0x180
> [  286.534887]  ret_from_fork+0x10/0x20
> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
> [  349.536324]  (t=84015 jiffies g=505 q=716)
> [  349.536687] Task dump for CPU 3:
> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  349.537839] Call trace:
> [  349.538055]  dump_backtrace+0x0/0x198
> [  349.538387]  show_stack+0x14/0x60
> [  349.538681]  sched_show_task+0x148/0x168
> [  349.539029]  dump_cpu_task+0x40/0x4c
> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  349.540094]  update_process_times+0x94/0xd8
> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
> [  349.540862]  tick_sched_timer+0x48/0x98
> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
> [  349.541585]  hrtimer_interrupt+0xe4/0x238
> [  349.541937]  arch_timer_handler_phys+0x28/0x40
> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
> [  349.542730]  generic_handle_domain_irq+0x38/0x58
> [  349.543136]  gic_handle_irq+0x4c/0x110
> [  349.543469]  call_on_irq_stack+0x28/0x3c
> [  349.543815]  do_interrupt_handler+0x78/0x80
> [  349.544183]  el1_interrupt+0x34/0x80
> [  349.544500]  el1h_64_irq_handler+0x14/0x20
> [  349.544862]  el1h_64_irq+0x74/0x78
> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  349.545586]  _raw_spin_lock+0x5c/0x68
> [  349.545910]  panfrost_job_run+0x24c/0x3f8
> [  349.546265]  drm_sched_main+0x130/0x390
> [  349.546604]  kthread+0x174/0x180
> [  349.546891]  ret_from_fork+0x10/0x20
> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
> [  412.548325]  (t=99768 jiffies g=505 q=784)
> [  412.548686] Task dump for CPU 3:
> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  412.549841] Call trace:
> [  412.550058]  dump_backtrace+0x0/0x198
> [  412.550389]  show_stack+0x14/0x60
> [  412.550684]  sched_show_task+0x148/0x168
> [  412.551031]  dump_cpu_task+0x40/0x4c
> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
> [  412.552095]  update_process_times+0x94/0xd8
> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
> [  412.552863]  tick_sched_timer+0x48/0x98
> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
> [  412.553583]  hrtimer_interrupt+0xe4/0x238
> [  412.553936]  arch_timer_handler_phys+0x28/0x40
> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
> [  412.554732]  generic_handle_domain_irq+0x38/0x58
> [  412.555139]  gic_handle_irq+0x4c/0x110
> [  412.555471]  call_on_irq_stack+0x28/0x3c
> [  412.555817]  do_interrupt_handler+0x78/0x80
> [  412.556186]  el1_interrupt+0x34/0x80
> [  412.556502]  el1h_64_irq_handler+0x14/0x20
> [  412.556864]  el1h_64_irq+0x74/0x78
> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  412.557587]  _raw_spin_lock+0x5c/0x68
> [  412.557912]  panfrost_job_run+0x24c/0x3f8
> [  412.558267]  drm_sched_main+0x130/0x390
> [  412.558607]  kthread+0x174/0x180
> [  412.558894]  ret_from_fork+0x10/0x20
> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
> [  475.560329]  (t=115521 jiffies g=505 q=857)
> [  475.560697] Task dump for CPU 3:
> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  475.561850] Call trace:
> [  475.562067]  dump_backtrace+0x0/0x198
> [  475.562398]  show_stack+0x14/0x60
> [  475.562693]  sched_show_task+0x148/0x168
> [  475.563041]  dump_cpu_task+0x40/0x4c
> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
> [  475.564104]  update_process_times+0x94/0xd8
> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
> [  475.564871]  tick_sched_timer+0x48/0x98
> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
> [  475.565592]  hrtimer_interrupt+0xe4/0x238
> [  475.565946]  arch_timer_handler_phys+0x28/0x40
> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
> [  475.566739]  generic_handle_domain_irq+0x38/0x58
> [  475.567145]  gic_handle_irq+0x4c/0x110
> [  475.567477]  call_on_irq_stack+0x28/0x3c
> [  475.567822]  do_interrupt_handler+0x78/0x80
> [  475.568190]  el1_interrupt+0x34/0x80
> [  475.568507]  el1h_64_irq_handler+0x14/0x20
> [  475.568869]  el1h_64_irq+0x74/0x78
> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
> [  475.569593]  _raw_spin_lock+0x5c/0x68
> [  475.569915]  panfrost_job_run+0x24c/0x3f8
> [  475.570270]  drm_sched_main+0x130/0x390
> [  475.570610]  kthread+0x174/0x180
> [  475.570897]  ret_from_fork+0x10/0x20
> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
> [  538.572333]  (t=131274 jiffies g=505 q=947)
> [  538.572701] Task dump for CPU 3:
> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  538.573854] Call trace:
> [  538.574070]  dump_backtrace+0x0/0x198
> [  538.574402]  show_stack+0x14/0x60
> [  538.574696]  sched_show_task+0x148/0x168
> [  538.575044]  dump_cpu_task+0x40/0x4c
> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
> [  538.576109]  update_process_times+0x94/0xd8
> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
> [  538.576878]  tick_sched_timer+0x48/0x98
> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
> [  538.577599]  hrtimer_interrupt+0xe4/0x238
> [  538.577953]  arch_timer_handler_phys+0x28/0x40
> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
> [  538.578745]  generic_handle_domain_irq+0x38/0x58
> [  538.579151]  gic_handle_irq+0x4c/0x110
> [  538.579487]  call_on_irq_stack+0x28/0x3c
> [  538.579833]  do_interrupt_handler+0x78/0x80
> [  538.580201]  el1_interrupt+0x34/0x80
> [  538.580518]  el1h_64_irq_handler+0x14/0x20
> [  538.580880]  el1h_64_irq+0x74/0x78
> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  538.581603]  _raw_spin_lock+0x5c/0x68
> [  538.581927]  panfrost_job_run+0x24c/0x3f8
> [  538.582283]  drm_sched_main+0x130/0x390
> [  538.582623]  kthread+0x174/0x180
> [  538.582910]  ret_from_fork+0x10/0x20
> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
> [  601.584330]  (t=147027 jiffies g=505 q=1018)
> [  601.584706] Task dump for CPU 3:
> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  601.585859] Call trace:
> [  601.586075]  dump_backtrace+0x0/0x198
> [  601.586406]  show_stack+0x14/0x60
> [  601.586701]  sched_show_task+0x148/0x168
> [  601.587048]  dump_cpu_task+0x40/0x4c
> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
> [  601.588112]  update_process_times+0x94/0xd8
> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
> [  601.588880]  tick_sched_timer+0x48/0x98
> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
> [  601.589602]  hrtimer_interrupt+0xe4/0x238
> [  601.589956]  arch_timer_handler_phys+0x28/0x40
> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
> [  601.590747]  generic_handle_domain_irq+0x38/0x58
> [  601.591153]  gic_handle_irq+0x4c/0x110
> [  601.591486]  call_on_irq_stack+0x28/0x3c
> [  601.591832]  do_interrupt_handler+0x78/0x80
> [  601.592201]  el1_interrupt+0x34/0x80
> [  601.592517]  el1h_64_irq_handler+0x14/0x20
> [  601.592879]  el1h_64_irq+0x74/0x78
> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  601.593603]  _raw_spin_lock+0x5c/0x68
> [  601.593927]  panfrost_job_run+0x24c/0x3f8
> [  601.594283]  drm_sched_main+0x130/0x390
> [  601.594623]  kthread+0x174/0x180
> [  601.594910]  ret_from_fork+0x10/0x20
> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
> [  664.596333]  (t=162780 jiffies g=505 q=1086)
> [  664.596709] Task dump for CPU 3:
> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  664.597862] Call trace:
> [  664.598078]  dump_backtrace+0x0/0x198
> [  664.598409]  show_stack+0x14/0x60
> [  664.598704]  sched_show_task+0x148/0x168
> [  664.599052]  dump_cpu_task+0x40/0x4c
> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
> [  664.600114]  update_process_times+0x94/0xd8
> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
> [  664.600882]  tick_sched_timer+0x48/0x98
> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
> [  664.601604]  hrtimer_interrupt+0xe4/0x238
> [  664.601958]  arch_timer_handler_phys+0x28/0x40
> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
> [  664.602751]  generic_handle_domain_irq+0x38/0x58
> [  664.603158]  gic_handle_irq+0x4c/0x110
> [  664.603491]  call_on_irq_stack+0x28/0x3c
> [  664.603838]  do_interrupt_handler+0x78/0x80
> [  664.604206]  el1_interrupt+0x34/0x80
> [  664.604522]  el1h_64_irq_handler+0x14/0x20
> [  664.604883]  el1h_64_irq+0x74/0x78
> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
> [  664.605609]  _raw_spin_lock+0x5c/0x68
> [  664.605934]  panfrost_job_run+0x24c/0x3f8
> [  664.606290]  drm_sched_main+0x130/0x390
> [  664.606631]  kthread+0x174/0x180
> [  664.606918]  ret_from_fork+0x10/0x20
> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
> [  727.608331]  (t=178533 jiffies g=505 q=1152)
> [  727.608706] Task dump for CPU 3:
> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  727.609858] Call trace:
> [  727.610074]  dump_backtrace+0x0/0x198
> [  727.610403]  show_stack+0x14/0x60
> [  727.610698]  sched_show_task+0x148/0x168
> [  727.611047]  dump_cpu_task+0x40/0x4c
> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
> [  727.612112]  update_process_times+0x94/0xd8
> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
> [  727.612879]  tick_sched_timer+0x48/0x98
> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
> [  727.613601]  hrtimer_interrupt+0xe4/0x238
> [  727.613955]  arch_timer_handler_phys+0x28/0x40
> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
> [  727.614748]  generic_handle_domain_irq+0x38/0x58
> [  727.615154]  gic_handle_irq+0x4c/0x110
> [  727.615485]  call_on_irq_stack+0x28/0x3c
> [  727.615832]  do_interrupt_handler+0x78/0x80
> [  727.616200]  el1_interrupt+0x34/0x80
> [  727.616517]  el1h_64_irq_handler+0x14/0x20
> [  727.616879]  el1h_64_irq+0x74/0x78
> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
> [  727.617602]  _raw_spin_lock+0x5c/0x68
> [  727.617926]  panfrost_job_run+0x24c/0x3f8
> [  727.618282]  drm_sched_main+0x130/0x390
> [  727.618621]  kthread+0x174/0x180
> [  727.618908]  ret_from_fork+0x10/0x20
> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
> [  790.620331]  (t=194286 jiffies g=505 q=1219)
> [  790.620708] Task dump for CPU 3:
> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  790.621860] Call trace:
> [  790.622075]  dump_backtrace+0x0/0x198
> [  790.622405]  show_stack+0x14/0x60
> [  790.622699]  sched_show_task+0x148/0x168
> [  790.623049]  dump_cpu_task+0x40/0x4c
> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
> [  790.624113]  update_process_times+0x94/0xd8
> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
> [  790.624880]  tick_sched_timer+0x48/0x98
> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
> [  790.625603]  hrtimer_interrupt+0xe4/0x238
> [  790.625957]  arch_timer_handler_phys+0x28/0x40
> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
> [  790.626752]  generic_handle_domain_irq+0x38/0x58
> [  790.627158]  gic_handle_irq+0x4c/0x110
> [  790.627493]  call_on_irq_stack+0x28/0x3c
> [  790.627839]  do_interrupt_handler+0x78/0x80
> [  790.628208]  el1_interrupt+0x34/0x80
> [  790.628526]  el1h_64_irq_handler+0x14/0x20
> [  790.628888]  el1h_64_irq+0x74/0x78
> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
> [  790.629613]  _raw_spin_lock+0x5c/0x68
> [  790.629937]  panfrost_job_run+0x24c/0x3f8
> [  790.630292]  drm_sched_main+0x130/0x390
> [  790.630632]  kthread+0x174/0x180
> [  790.630919]  ret_from_fork+0x10/0x20
> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
> [  853.632340]  (t=210039 jiffies g=505 q=1318)
> [  853.632716] Task dump for CPU 3:
> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  853.633869] Call trace:
> [  853.634084]  dump_backtrace+0x0/0x198
> [  853.634418]  show_stack+0x14/0x60
> [  853.634712]  sched_show_task+0x148/0x168
> [  853.635061]  dump_cpu_task+0x40/0x4c
> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
> [  853.636124]  update_process_times+0x94/0xd8
> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
> [  853.636892]  tick_sched_timer+0x48/0x98
> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
> [  853.637613]  hrtimer_interrupt+0xe4/0x238
> [  853.637965]  arch_timer_handler_phys+0x28/0x40
> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
> [  853.638760]  generic_handle_domain_irq+0x38/0x58
> [  853.639166]  gic_handle_irq+0x4c/0x110
> [  853.639499]  call_on_irq_stack+0x28/0x3c
> [  853.639845]  do_interrupt_handler+0x78/0x80
> [  853.640213]  el1_interrupt+0x34/0x80
> [  853.640530]  el1h_64_irq_handler+0x14/0x20
> [  853.640892]  el1h_64_irq+0x74/0x78
> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
> [  853.641616]  _raw_spin_lock+0x5c/0x68
> [  853.641940]  panfrost_job_run+0x24c/0x3f8
> [  853.642295]  drm_sched_main+0x130/0x390
> [  853.642634]  kthread+0x174/0x180
> [  853.642921]  ret_from_fork+0x10/0x20
> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
> [  916.644339]  (t=225792 jiffies g=505 q=1390)
> [  916.644715] Task dump for CPU 3:
> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  916.645868] Call trace:
> [  916.646083]  dump_backtrace+0x0/0x198
> [  916.646414]  show_stack+0x14/0x60
> [  916.646708]  sched_show_task+0x148/0x168
> [  916.647055]  dump_cpu_task+0x40/0x4c
> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
> [  916.648119]  update_process_times+0x94/0xd8
> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
> [  916.648887]  tick_sched_timer+0x48/0x98
> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
> [  916.649608]  hrtimer_interrupt+0xe4/0x238
> [  916.649962]  arch_timer_handler_phys+0x28/0x40
> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
> [  916.650756]  generic_handle_domain_irq+0x38/0x58
> [  916.651162]  gic_handle_irq+0x4c/0x110
> [  916.651495]  call_on_irq_stack+0x28/0x3c
> [  916.651842]  do_interrupt_handler+0x78/0x80
> [  916.652210]  el1_interrupt+0x34/0x80
> [  916.652527]  el1h_64_irq_handler+0x14/0x20
> [  916.652889]  el1h_64_irq+0x74/0x78
> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
> [  916.653614]  _raw_spin_lock+0x5c/0x68
> [  916.653937]  panfrost_job_run+0x24c/0x3f8
> [  916.654293]  drm_sched_main+0x130/0x390
> [  916.654632]  kthread+0x174/0x180
> [  916.654920]  ret_from_fork+0x10/0x20
> 
> -- 
> Pengutronix e.K.                           |                             |
> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 14:08       ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-11-15 14:08 UTC (permalink / raw)
  To: Sascha Hauer
  Cc: Christian König, intel-gfx, dri-devel, linaro-mm-sig, linux-media

On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> Hi,
> 
> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > Simplifying the code a bit.
> > 
> > v2: use dma_resv_for_each_fence
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> >  1 file changed, 6 insertions(+), 20 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index 042c16b5d54a..5bc5f775abe1 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> >  					    struct drm_gem_object *obj,
> >  					    bool write)
> >  {
> > +	struct dma_resv_iter cursor;
> > +	struct dma_fence *fence;
> >  	int ret;
> > -	struct dma_fence **fences;
> > -	unsigned int i, fence_count;
> > -
> > -	if (!write) {
> > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > -
> > -		return drm_sched_job_add_dependency(job, fence);
> > -	}
> > -
> > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > -	if (ret || !fence_count)
> > -		return ret;
> >  
> > -	for (i = 0; i < fence_count; i++) {
> > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > +		ret = drm_sched_job_add_dependency(job, fence);
> >  		if (ret)
> > -			break;
> > +			return ret;
> >  	}
> > -
> > -	for (; i < fence_count; i++)
> > -		dma_fence_put(fences[i]);
> > -	kfree(fences);
> > -	return ret;
> > +	return 0;
> >  }
> >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> >  
> 
> This patch lets the panfrost driver explode on v5.16-rc1 with the
> following. I didn't bisect it, but it goes away when I revert this
> patch. I only started weston, nothing more.
> 
> Any idea what goes wrong here?

Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
patch so it missed -rc1.

Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.
-Daniel

> 
> Sascha
> 
> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.514056] Modules linked in:
> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.514621] ------------[ cut here ]------------
> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.515049] pc : dma_fence_release+0xac/0xe8
> [   12.515056] lr : dma_fence_release+0xac/0xe8
> [   12.515061] sp : ffff8000123ebb20
> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
> [   12.515518] refcount_t: addition on 0; use-after-free.
> [   12.516015]  x27: 0000000000000000
> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
> [   12.516992] x26: 0000000000000001
> [   12.517366] Modules linked in:
> [   12.517654]  x25: ffff000004b051c0
> [   12.518108]
> [   12.518555]  x24: 0000000000000000
> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.519576]
> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.520133] x23: 0000000000000000
> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.520559]  x22: ffff800010d41b78
> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
> [   12.521625]  x21: ffff000004b05050
> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
> [   12.522299]
> [   12.522588] sp : ffff8000122b3bc0
> [   12.523192] x20: ffff000004b05040
> [   12.523489] x29: ffff8000122b3bc0
> [   12.523906]  x19: ffff000004b05078
> [   12.524203]  x28: 0000000000000000
> [   12.524620]  x18: 0000000000000010
> [   12.524751]  x27: ffff000003791880
> [   12.525040]
> [   12.525329]
> [   12.525618] x17: 0000000000000000
> [   12.525915] x26: ffff8000122b3d30
> [   12.526212]  x16: 0000000000000000
> [   12.526509]  x25: 0000000000000001
> [   12.526806]  x15: ffff0000050e2dc0
> [   12.526937]  x24: ffff000003791a10
> [   12.527067]
> [   12.527357]
> [   12.527646] x14: 00000000000001b5
> [   12.527942] x23: 0000000000000000
> [   12.528240]  x13: ffff0000050e2dc0
> [   12.528536]  x22: ffff000003505280
> [   12.528833]  x12: 00000000ffffffea
> [   12.528964]  x21: ffff000003a2a220
> [   12.529095]
> [   12.529384]
> [   12.529673] x11: ffff800011761ec8
> [   12.529970] x20: ffff000004b05078
> [   12.530267]  x10: ffff8000115e1e88
> [   12.530564]  x19: ffff000004b05000
> [   12.530861]  x9 : ffff8000115e1ee0
> [   12.530992]  x18: 0000000000000010
> [   12.531123]
> [   12.531412]
> [   12.531701] x8 : 000000000017ffe8
> [   12.531998] x17: 0000000000500600
> [   12.532294]  x7 : c0000000fffeffff
> [   12.532591]  x16: 0000000000000000
> [   12.532888]  x6 : 0000000000000001
> [   12.533019]  x15: ffff000003505700
> [   12.533150]
> [   12.533439]
> [   12.533728] x5 : ffff00007fb8c9a0
> [   12.534025] x14: 0000000000000000
> [   12.534322]  x4 : 0000000000000000
> [   12.534619]  x13: 292d2d3d45505954
> [   12.534914]  x3 : 0000000000000001
> [   12.535045]  x12: 4220534253532d20
> [   12.535176]
> [   12.535465]
> [   12.535754] x2 : ffff00007fb8c9a8
> [   12.536051] x11: 5449442d204f4354
> [   12.536347]  x1 : ea6e0584a53f2200
> [   12.536643]  x10: 2d204f41552d204e
> [   12.536941]  x0 : 0000000000000000
> [   12.537073]  x9 : 4e41502b20666961
> [   12.537203]
> [   12.537492]
> [   12.537782] Call trace:
> [   12.538078] x8 : 642076635a6e2820
> [   12.538377]  dma_fence_release+0xac/0xe8
> [   12.538671]  x7 : 205d343430353135
> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.539098]  x6 : 352e32312020205b
> [   12.539230]  panfrost_job_push+0x1bc/0x200
> [   12.539442]
> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
> [   12.540073] x5 : ffff00007fb539a0
> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
> [   12.540771]  x4 : 0000000000000000
> [   12.541069]  drm_ioctl+0x214/0x450
> [   12.541424]  x3 : 0000000000000001
> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.541943]
> [   12.542233]  invoke_syscall+0x40/0xf8
> [   12.542573] x2 : ffff00007fb539a8
> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.543167]  x1 : 0ac4fb7a0680bb00
> [   12.543465]  do_el0_svc+0x20/0x80
> [   12.543805]  x0 : 0000000000000000
> [   12.543936]  el0_svc+0x1c/0x50
> [   12.544255]
> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
> [   12.544955] Call trace:
> [   12.545250]  el0t_64_sync+0x16c/0x170
> [   12.545540]  refcount_warn_saturate+0x98/0x140
> [   12.545837] ---[ end trace ba74542f51246288 ]---
> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
> [   12.546285] ------------[ cut here ]------------
> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
> [   12.546813] refcount_t: underflow; use-after-free.
> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
> [   12.547920]  dma_fence_signal_locked+0x20/0x30
> [   12.548336] Modules linked in:
> [   12.548737]  panfrost_job_handle_done+0x34/0x50
> [   12.549110]
> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.551373]  irq_thread_fn+0x28/0x98
> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.551899]  irq_thread+0x12c/0x230
> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
> [   12.553131]  kthread+0x174/0x180
> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
> [   12.554121]  ret_from_fork+0x10/0x20
> [   12.554432] sp : ffff8000123ebaa0
> [   12.555038] ---[ end trace ba74542f51246289 ]---
> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.563733] Call trace:
> [   12.563950]  refcount_warn_saturate+0xec/0x140
> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.565216]  dma_fence_release+0xd4/0xe8
> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.565970]  panfrost_job_push+0x1bc/0x200
> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
> [   12.567072]  drm_ioctl+0x214/0x450
> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.567721]  invoke_syscall+0x40/0xf8
> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.568463]  do_el0_svc+0x20/0x80
> [   12.568755]  el0_svc+0x1c/0x50
> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
> [   12.569399]  el0t_64_sync+0x16c/0x170
> [   12.569724] ---[ end trace ba74542f5124628a ]---
> [   12.595086] ------------[ cut here ]------------
> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.596934] Modules linked in:
> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.599202] pc : dma_fence_release+0xac/0xe8
> [   12.599584] lr : dma_fence_release+0xac/0xe8
> [   12.599960] sp : ffff8000123ebb20
> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.606542] Call trace:
> [   12.606760]  dma_fence_release+0xac/0xe8
> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.607517]  panfrost_job_push+0x1bc/0x200
> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
> [   12.608622]  drm_ioctl+0x214/0x450
> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.609269]  invoke_syscall+0x40/0xf8
> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.610011]  do_el0_svc+0x20/0x80
> [   12.610304]  el0_svc+0x1c/0x50
> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
> [   12.610946]  el0t_64_sync+0x16c/0x170
> [   12.611276] ---[ end trace ba74542f5124628b ]---
> [   12.612869] ------------[ cut here ]------------
> [   12.613288] refcount_t: saturated; leaking memory.
> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
> [   12.614476] Modules linked in:
> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
> [   12.617622] sp : ffff8000123eba60
> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.624256] Call trace:
> [   12.624474]  refcount_warn_saturate+0xc0/0x140
> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
> [   12.627397]  drm_ioctl+0x214/0x450
> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.628046]  invoke_syscall+0x40/0xf8
> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.628787]  do_el0_svc+0x20/0x80
> [   12.629079]  el0_svc+0x1c/0x50
> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
> [   12.629723]  el0t_64_sync+0x16c/0x170
> [   12.630048] ---[ end trace ba74542f5124628c ]---
> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
> [   13.296693] ------------[ cut here ]------------
> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.298560] Modules linked in:
> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.300830] pc : dma_fence_release+0xac/0xe8
> [   13.301208] lr : dma_fence_release+0xac/0xe8
> [   13.301585] sp : ffff8000123ebb20
> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.308149] Call trace:
> [   13.308367]  dma_fence_release+0xac/0xe8
> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.309119]  panfrost_job_push+0x1bc/0x200
> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
> [   13.310221]  drm_ioctl+0x214/0x450
> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.310868]  invoke_syscall+0x40/0xf8
> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.311609]  do_el0_svc+0x20/0x80
> [   13.311903]  el0_svc+0x1c/0x50
> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
> [   13.312545]  el0t_64_sync+0x16c/0x170
> [   13.312869] ---[ end trace ba74542f5124628d ]---
> [   13.340454] ------------[ cut here ]------------
> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.342318] Modules linked in:
> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.344584] pc : dma_fence_release+0xac/0xe8
> [   13.344961] lr : dma_fence_release+0xac/0xe8
> [   13.345338] sp : ffff8000123ebb20
> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.351900] Call trace:
> [   13.352116]  dma_fence_release+0xac/0xe8
> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.352869]  panfrost_job_push+0x1bc/0x200
> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
> [   13.353971]  drm_ioctl+0x214/0x450
> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.354616]  invoke_syscall+0x40/0xf8
> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.355356]  do_el0_svc+0x20/0x80
> [   13.355650]  el0_svc+0x1c/0x50
> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
> [   13.356293]  el0t_64_sync+0x16c/0x170
> [   13.356618] ---[ end trace ba74542f5124628e ]---
> [   13.379841] ------------[ cut here ]------------
> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.381680] Modules linked in:
> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.383937] pc : dma_fence_release+0xac/0xe8
> [   13.384314] lr : dma_fence_release+0xac/0xe8
> [   13.384690] sp : ffff8000123ebb20
> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.391247] Call trace:
> [   13.391464]  dma_fence_release+0xac/0xe8
> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.392217]  panfrost_job_push+0x1bc/0x200
> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
> [   13.393319]  drm_ioctl+0x214/0x450
> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.393967]  invoke_syscall+0x40/0xf8
> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.394708]  do_el0_svc+0x20/0x80
> [   13.395002]  el0_svc+0x1c/0x50
> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
> [   13.395643]  el0t_64_sync+0x16c/0x170
> [   13.395968] ---[ end trace ba74542f5124628f ]---
> [   13.398130] ------------[ cut here ]------------
> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.400011] Modules linked in:
> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.402269] pc : dma_fence_release+0xac/0xe8
> [   13.402646] lr : dma_fence_release+0xac/0xe8
> [   13.403024] sp : ffff8000123ebb20
> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.409583] Call trace:
> [   13.409800]  dma_fence_release+0xac/0xe8
> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.410553]  panfrost_job_push+0x1bc/0x200
> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
> [   13.411656]  drm_ioctl+0x214/0x450
> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.412303]  invoke_syscall+0x40/0xf8
> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.413042]  do_el0_svc+0x20/0x80
> [   13.413335]  el0_svc+0x1c/0x50
> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
> [   13.413976]  el0t_64_sync+0x16c/0x170
> [   13.414298] ---[ end trace ba74542f51246290 ]---
> [   13.430129] ------------[ cut here ]------------
> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
> [   13.430557] refcount_t: saturated; leaking memory.
> [   13.431321] Mem abort info:
> [   13.431324]   ESR = 0x96000044
> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
> [   13.431330]   SET = 0, FnV = 0
> [   13.431333]   EA = 0, S1PTW = 0
> [   13.431335]   FSC = 0x04: level 0 translation fault
> [   13.431337] Data abort info:
> [   13.431339]   ISV = 0, ISS = 0x00000044
> [   13.431340]   CM = 0, WnR = 1
> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
> [   13.431359] Modules linked in:
> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
> [   13.432059] lr : dma_fence_signal+0x30/0x60
> [   13.432327] Modules linked in:
> [   13.432789] sp : ffff8000122b3b50
> [   13.433057]
> [   13.433331] x29: ffff8000122b3b50
> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.434008]  x28: 0000000000000000
> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.434601]  x27: ffff000003791880
> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.435751]
> [   13.435753] x26: ffff8000122b3d30
> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
> [   13.436504]  x25: 0000000000000001
> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
> [   13.437938]  x24: ffff000003791a10
> [   13.438542] sp : ffff8000123ebb40
> [   13.439042]
> [   13.439767] x29: ffff8000123ebb40
> [   13.440130] x23: 0000000000000000
> [   13.440398]  x28: ffff8000123ebd58
> [   13.440687]  x22: ffff000003505280
> [   13.440819]  x27: 0000000000000000
> [   13.441108]  x21: ffff8000122b3b88
> [   13.441931]
> [   13.442228]
> [   13.442773] x26: 0000000000000001
> [   13.443070] x20: ffff000004b051c0
> [   13.443674]  x25: ffff000004b051c0
> [   13.443806]  x19: ffff000004b051c0
> [   13.444095]  x24: 0000000000000000
> [   13.444513]  x18: 0000000000000000
> [   13.444811]
> [   13.445227]
> [   13.445524] x23: 0000000000000000
> [   13.445814] x17: 3837783028203032
> [   13.445945]  x22: ffff000004b051c0
> [   13.446236]  x16: 3139323835323120
> [   13.446525]  x21: ffff000004d73100
> [   13.446822]  x15: 00000205aa24947a
> [   13.447120]
> [   13.447417]
> [   13.447715] x20: ffff000004b05400
> [   13.447846] x14: 0000000000000326
> [   13.447977]  x19: 00000000ffffffff
> [   13.448266]  x13: 0000000000000000
> [   13.448555]  x18: 0000000000000010
> [   13.448851]  x12: 0000000000000000
> [   13.449148]
> [   13.449446]
> [   13.449743] x17: 0000000000000000
> [   13.449874] x11: 0000000000000001
> [   13.450006]  x16: 0000000000000000
> [   13.450296]  x10: ffff8000122b3d48
> [   13.450585]  x15: 000060978994e822
> [   13.450882]  x9 : 00000000000019e0
> [   13.451179]
> [   13.451477]
> [   13.451774] x14: 00000000000000b6
> [   13.451905] x8 : ffff8000122b3d78
> [   13.452037]  x13: 00000000000000b6
> [   13.452326]  x7 : 0000000000000000
> [   13.452614]  x12: 0000000000000000
> [   13.452912]  x6 : 000000001fcf847e
> [   13.453209]
> [   13.453506]
> [   13.453803] x11: 0000000000000001
> [   13.453934] x5 : 00ffffffffffffff
> [   13.454066]  x10: 00000000000009a0
> [   13.454356]  x4 : 0015ef3c03fd7c00
> [   13.454643]  x9 : ffff8000123eb8c0
> [   13.454941]  x3 : 0000000000000018
> [   13.455238]
> [   13.455536]
> [   13.455833] x8 : ffff0000050e3340
> [   13.455965] x2 : ffff000004b051f0
> [   13.456096]  x7 : ffff00007fb92a80
> [   13.456386]  x1 : 000000032053be4d
> [   13.456676]  x6 : 0000000000000115
> [   13.456973]  x0 : 0000000000000000
> [   13.457271]
> [   13.457568]
> [   13.457866] x5 : 0000000000000000
> [   13.457998] Call trace:
> [   13.458128]  x4 : ffff00007fb8c9a0
> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.458707]  x3 : ffff00007fb8f950
> [   13.459005]  dma_fence_signal+0x30/0x60
> [   13.459302]
> [   13.459600]  drm_sched_fence_finished+0x10/0x18
> [   13.459897] x2 : ffff00007fb8c9a0
> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
> [   13.460159]  x1 : ea6e0584a53f2200
> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
> [   13.460738]  x0 : 0000000000000000
> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   13.461333]
> [   13.461631]  dma_fence_signal_locked+0x20/0x30
> [   13.461929] Call trace:
> [   13.462060]  panfrost_job_handle_done+0x34/0x50
> [   13.462192]  refcount_warn_saturate+0x6c/0x140
> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
> [   13.463462]  panfrost_job_push+0x1bc/0x200
> [   13.463760]  irq_thread_fn+0x28/0x98
> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
> [   13.464225]  irq_thread+0x12c/0x230
> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
> [   13.464909]  kthread+0x174/0x180
> [   13.465319]  drm_ioctl+0x214/0x450
> [   13.465617]  ret_from_fork+0x10/0x20
> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
> [   13.466756]  invoke_syscall+0x40/0xf8
> [   13.466891] ---[ end trace ba74542f51246291 ]---
> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
> [   13.467883]  do_el0_svc+0x20/0x80
> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
> [   13.468678]  el0_svc+0x1c/0x50
> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
> [   13.476277]  el0t_64_sync+0x16c/0x170
> [   13.476601] ---[ end trace ba74542f51246292 ]---
> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
> [   14.462959] sched: RT throttling activated
> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
> [   34.476312]  (t=5250 jiffies g=505 q=301)
> [   34.476667] Task dump for CPU 3:
> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   34.477820] Call trace:
> [   34.478035]  dump_backtrace+0x0/0x198
> [   34.478365]  show_stack+0x14/0x60
> [   34.478659]  sched_show_task+0x148/0x168
> [   34.479008]  dump_cpu_task+0x40/0x4c
> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
> [   34.480072]  update_process_times+0x94/0xd8
> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
> [   34.480840]  tick_sched_timer+0x48/0x98
> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
> [   34.481562]  hrtimer_interrupt+0xe4/0x238
> [   34.481917]  arch_timer_handler_phys+0x28/0x40
> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
> [   34.482710]  generic_handle_domain_irq+0x38/0x58
> [   34.483116]  gic_handle_irq+0x4c/0x110
> [   34.483450]  call_on_irq_stack+0x28/0x3c
> [   34.483798]  do_interrupt_handler+0x78/0x80
> [   34.484166]  el1_interrupt+0x34/0x80
> [   34.484484]  el1h_64_irq_handler+0x14/0x20
> [   34.484846]  el1h_64_irq+0x74/0x78
> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
> [   34.485568]  _raw_spin_lock+0x5c/0x68
> [   34.485895]  panfrost_job_run+0x24c/0x3f8
> [   34.486250]  drm_sched_main+0x130/0x390
> [   34.486591]  kthread+0x174/0x180
> [   34.486878]  ret_from_fork+0x10/0x20
> [   35.810989] vcc3v3_lcd1_n: disabling
> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
> [   97.488326]  (t=21003 jiffies g=505 q=379)
> [   97.488687] Task dump for CPU 3:
> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   97.489842] Call trace:
> [   97.490056]  dump_backtrace+0x0/0x198
> [   97.490388]  show_stack+0x14/0x60
> [   97.490682]  sched_show_task+0x148/0x168
> [   97.491030]  dump_cpu_task+0x40/0x4c
> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
> [   97.492095]  update_process_times+0x94/0xd8
> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
> [   97.492862]  tick_sched_timer+0x48/0x98
> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
> [   97.493582]  hrtimer_interrupt+0xe4/0x238
> [   97.493937]  arch_timer_handler_phys+0x28/0x40
> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
> [   97.494730]  generic_handle_domain_irq+0x38/0x58
> [   97.495136]  gic_handle_irq+0x4c/0x110
> [   97.495473]  call_on_irq_stack+0x28/0x3c
> [   97.495818]  do_interrupt_handler+0x78/0x80
> [   97.496186]  el1_interrupt+0x34/0x80
> [   97.496503]  el1h_64_irq_handler+0x14/0x20
> [   97.496865]  el1h_64_irq+0x74/0x78
> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
> [   97.497588]  _raw_spin_lock+0x5c/0x68
> [   97.497912]  panfrost_job_run+0x24c/0x3f8
> [   97.498268]  drm_sched_main+0x130/0x390
> [   97.498607]  kthread+0x174/0x180
> [   97.498895]  ret_from_fork+0x10/0x20
> [  140.108141] random: crng init done
> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
> [  160.500322]  (t=36756 jiffies g=505 q=482)
> [  160.500684] Task dump for CPU 3:
> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  160.501837] Call trace:
> [  160.502054]  dump_backtrace+0x0/0x198
> [  160.502384]  show_stack+0x14/0x60
> [  160.502679]  sched_show_task+0x148/0x168
> [  160.503027]  dump_cpu_task+0x40/0x4c
> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
> [  160.504091]  update_process_times+0x94/0xd8
> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
> [  160.504858]  tick_sched_timer+0x48/0x98
> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
> [  160.505580]  hrtimer_interrupt+0xe4/0x238
> [  160.505934]  arch_timer_handler_phys+0x28/0x40
> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
> [  160.506727]  generic_handle_domain_irq+0x38/0x58
> [  160.507133]  gic_handle_irq+0x4c/0x110
> [  160.507467]  call_on_irq_stack+0x28/0x3c
> [  160.507813]  do_interrupt_handler+0x78/0x80
> [  160.508181]  el1_interrupt+0x34/0x80
> [  160.508497]  el1h_64_irq_handler+0x14/0x20
> [  160.508858]  el1h_64_irq+0x74/0x78
> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
> [  160.509579]  _raw_spin_lock+0x5c/0x68
> [  160.509903]  panfrost_job_run+0x24c/0x3f8
> [  160.510259]  drm_sched_main+0x130/0x390
> [  160.510599]  kthread+0x174/0x180
> [  160.510886]  ret_from_fork+0x10/0x20
> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
> [  223.512325]  (t=52509 jiffies g=505 q=536)
> [  223.512688] Task dump for CPU 3:
> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  223.513842] Call trace:
> [  223.514056]  dump_backtrace+0x0/0x198
> [  223.514387]  show_stack+0x14/0x60
> [  223.514681]  sched_show_task+0x148/0x168
> [  223.515029]  dump_cpu_task+0x40/0x4c
> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  223.516094]  update_process_times+0x94/0xd8
> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
> [  223.516860]  tick_sched_timer+0x48/0x98
> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
> [  223.517582]  hrtimer_interrupt+0xe4/0x238
> [  223.517935]  arch_timer_handler_phys+0x28/0x40
> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
> [  223.518727]  generic_handle_domain_irq+0x38/0x58
> [  223.519133]  gic_handle_irq+0x4c/0x110
> [  223.519466]  call_on_irq_stack+0x28/0x3c
> [  223.519812]  do_interrupt_handler+0x78/0x80
> [  223.520181]  el1_interrupt+0x34/0x80
> [  223.520498]  el1h_64_irq_handler+0x14/0x20
> [  223.520860]  el1h_64_irq+0x74/0x78
> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
> [  223.521584]  _raw_spin_lock+0x5c/0x68
> [  223.521908]  panfrost_job_run+0x24c/0x3f8
> [  223.522264]  drm_sched_main+0x130/0x390
> [  223.522605]  kthread+0x174/0x180
> [  223.522892]  ret_from_fork+0x10/0x20
> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
> [  286.524325]  (t=68262 jiffies g=505 q=612)
> [  286.524687] Task dump for CPU 3:
> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  286.525840] Call trace:
> [  286.526057]  dump_backtrace+0x0/0x198
> [  286.526387]  show_stack+0x14/0x60
> [  286.526681]  sched_show_task+0x148/0x168
> [  286.527029]  dump_cpu_task+0x40/0x4c
> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
> [  286.528092]  update_process_times+0x94/0xd8
> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
> [  286.528859]  tick_sched_timer+0x48/0x98
> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
> [  286.529579]  hrtimer_interrupt+0xe4/0x238
> [  286.529933]  arch_timer_handler_phys+0x28/0x40
> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
> [  286.530726]  generic_handle_domain_irq+0x38/0x58
> [  286.531132]  gic_handle_irq+0x4c/0x110
> [  286.531466]  call_on_irq_stack+0x28/0x3c
> [  286.531812]  do_interrupt_handler+0x78/0x80
> [  286.532180]  el1_interrupt+0x34/0x80
> [  286.532496]  el1h_64_irq_handler+0x14/0x20
> [  286.532857]  el1h_64_irq+0x74/0x78
> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
> [  286.533580]  _raw_spin_lock+0x5c/0x68
> [  286.533904]  panfrost_job_run+0x24c/0x3f8
> [  286.534259]  drm_sched_main+0x130/0x390
> [  286.534600]  kthread+0x174/0x180
> [  286.534887]  ret_from_fork+0x10/0x20
> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
> [  349.536324]  (t=84015 jiffies g=505 q=716)
> [  349.536687] Task dump for CPU 3:
> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  349.537839] Call trace:
> [  349.538055]  dump_backtrace+0x0/0x198
> [  349.538387]  show_stack+0x14/0x60
> [  349.538681]  sched_show_task+0x148/0x168
> [  349.539029]  dump_cpu_task+0x40/0x4c
> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  349.540094]  update_process_times+0x94/0xd8
> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
> [  349.540862]  tick_sched_timer+0x48/0x98
> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
> [  349.541585]  hrtimer_interrupt+0xe4/0x238
> [  349.541937]  arch_timer_handler_phys+0x28/0x40
> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
> [  349.542730]  generic_handle_domain_irq+0x38/0x58
> [  349.543136]  gic_handle_irq+0x4c/0x110
> [  349.543469]  call_on_irq_stack+0x28/0x3c
> [  349.543815]  do_interrupt_handler+0x78/0x80
> [  349.544183]  el1_interrupt+0x34/0x80
> [  349.544500]  el1h_64_irq_handler+0x14/0x20
> [  349.544862]  el1h_64_irq+0x74/0x78
> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  349.545586]  _raw_spin_lock+0x5c/0x68
> [  349.545910]  panfrost_job_run+0x24c/0x3f8
> [  349.546265]  drm_sched_main+0x130/0x390
> [  349.546604]  kthread+0x174/0x180
> [  349.546891]  ret_from_fork+0x10/0x20
> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
> [  412.548325]  (t=99768 jiffies g=505 q=784)
> [  412.548686] Task dump for CPU 3:
> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  412.549841] Call trace:
> [  412.550058]  dump_backtrace+0x0/0x198
> [  412.550389]  show_stack+0x14/0x60
> [  412.550684]  sched_show_task+0x148/0x168
> [  412.551031]  dump_cpu_task+0x40/0x4c
> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
> [  412.552095]  update_process_times+0x94/0xd8
> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
> [  412.552863]  tick_sched_timer+0x48/0x98
> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
> [  412.553583]  hrtimer_interrupt+0xe4/0x238
> [  412.553936]  arch_timer_handler_phys+0x28/0x40
> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
> [  412.554732]  generic_handle_domain_irq+0x38/0x58
> [  412.555139]  gic_handle_irq+0x4c/0x110
> [  412.555471]  call_on_irq_stack+0x28/0x3c
> [  412.555817]  do_interrupt_handler+0x78/0x80
> [  412.556186]  el1_interrupt+0x34/0x80
> [  412.556502]  el1h_64_irq_handler+0x14/0x20
> [  412.556864]  el1h_64_irq+0x74/0x78
> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  412.557587]  _raw_spin_lock+0x5c/0x68
> [  412.557912]  panfrost_job_run+0x24c/0x3f8
> [  412.558267]  drm_sched_main+0x130/0x390
> [  412.558607]  kthread+0x174/0x180
> [  412.558894]  ret_from_fork+0x10/0x20
> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
> [  475.560329]  (t=115521 jiffies g=505 q=857)
> [  475.560697] Task dump for CPU 3:
> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  475.561850] Call trace:
> [  475.562067]  dump_backtrace+0x0/0x198
> [  475.562398]  show_stack+0x14/0x60
> [  475.562693]  sched_show_task+0x148/0x168
> [  475.563041]  dump_cpu_task+0x40/0x4c
> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
> [  475.564104]  update_process_times+0x94/0xd8
> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
> [  475.564871]  tick_sched_timer+0x48/0x98
> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
> [  475.565592]  hrtimer_interrupt+0xe4/0x238
> [  475.565946]  arch_timer_handler_phys+0x28/0x40
> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
> [  475.566739]  generic_handle_domain_irq+0x38/0x58
> [  475.567145]  gic_handle_irq+0x4c/0x110
> [  475.567477]  call_on_irq_stack+0x28/0x3c
> [  475.567822]  do_interrupt_handler+0x78/0x80
> [  475.568190]  el1_interrupt+0x34/0x80
> [  475.568507]  el1h_64_irq_handler+0x14/0x20
> [  475.568869]  el1h_64_irq+0x74/0x78
> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
> [  475.569593]  _raw_spin_lock+0x5c/0x68
> [  475.569915]  panfrost_job_run+0x24c/0x3f8
> [  475.570270]  drm_sched_main+0x130/0x390
> [  475.570610]  kthread+0x174/0x180
> [  475.570897]  ret_from_fork+0x10/0x20
> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
> [  538.572333]  (t=131274 jiffies g=505 q=947)
> [  538.572701] Task dump for CPU 3:
> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  538.573854] Call trace:
> [  538.574070]  dump_backtrace+0x0/0x198
> [  538.574402]  show_stack+0x14/0x60
> [  538.574696]  sched_show_task+0x148/0x168
> [  538.575044]  dump_cpu_task+0x40/0x4c
> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
> [  538.576109]  update_process_times+0x94/0xd8
> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
> [  538.576878]  tick_sched_timer+0x48/0x98
> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
> [  538.577599]  hrtimer_interrupt+0xe4/0x238
> [  538.577953]  arch_timer_handler_phys+0x28/0x40
> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
> [  538.578745]  generic_handle_domain_irq+0x38/0x58
> [  538.579151]  gic_handle_irq+0x4c/0x110
> [  538.579487]  call_on_irq_stack+0x28/0x3c
> [  538.579833]  do_interrupt_handler+0x78/0x80
> [  538.580201]  el1_interrupt+0x34/0x80
> [  538.580518]  el1h_64_irq_handler+0x14/0x20
> [  538.580880]  el1h_64_irq+0x74/0x78
> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  538.581603]  _raw_spin_lock+0x5c/0x68
> [  538.581927]  panfrost_job_run+0x24c/0x3f8
> [  538.582283]  drm_sched_main+0x130/0x390
> [  538.582623]  kthread+0x174/0x180
> [  538.582910]  ret_from_fork+0x10/0x20
> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
> [  601.584330]  (t=147027 jiffies g=505 q=1018)
> [  601.584706] Task dump for CPU 3:
> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  601.585859] Call trace:
> [  601.586075]  dump_backtrace+0x0/0x198
> [  601.586406]  show_stack+0x14/0x60
> [  601.586701]  sched_show_task+0x148/0x168
> [  601.587048]  dump_cpu_task+0x40/0x4c
> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
> [  601.588112]  update_process_times+0x94/0xd8
> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
> [  601.588880]  tick_sched_timer+0x48/0x98
> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
> [  601.589602]  hrtimer_interrupt+0xe4/0x238
> [  601.589956]  arch_timer_handler_phys+0x28/0x40
> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
> [  601.590747]  generic_handle_domain_irq+0x38/0x58
> [  601.591153]  gic_handle_irq+0x4c/0x110
> [  601.591486]  call_on_irq_stack+0x28/0x3c
> [  601.591832]  do_interrupt_handler+0x78/0x80
> [  601.592201]  el1_interrupt+0x34/0x80
> [  601.592517]  el1h_64_irq_handler+0x14/0x20
> [  601.592879]  el1h_64_irq+0x74/0x78
> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  601.593603]  _raw_spin_lock+0x5c/0x68
> [  601.593927]  panfrost_job_run+0x24c/0x3f8
> [  601.594283]  drm_sched_main+0x130/0x390
> [  601.594623]  kthread+0x174/0x180
> [  601.594910]  ret_from_fork+0x10/0x20
> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
> [  664.596333]  (t=162780 jiffies g=505 q=1086)
> [  664.596709] Task dump for CPU 3:
> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  664.597862] Call trace:
> [  664.598078]  dump_backtrace+0x0/0x198
> [  664.598409]  show_stack+0x14/0x60
> [  664.598704]  sched_show_task+0x148/0x168
> [  664.599052]  dump_cpu_task+0x40/0x4c
> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
> [  664.600114]  update_process_times+0x94/0xd8
> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
> [  664.600882]  tick_sched_timer+0x48/0x98
> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
> [  664.601604]  hrtimer_interrupt+0xe4/0x238
> [  664.601958]  arch_timer_handler_phys+0x28/0x40
> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
> [  664.602751]  generic_handle_domain_irq+0x38/0x58
> [  664.603158]  gic_handle_irq+0x4c/0x110
> [  664.603491]  call_on_irq_stack+0x28/0x3c
> [  664.603838]  do_interrupt_handler+0x78/0x80
> [  664.604206]  el1_interrupt+0x34/0x80
> [  664.604522]  el1h_64_irq_handler+0x14/0x20
> [  664.604883]  el1h_64_irq+0x74/0x78
> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
> [  664.605609]  _raw_spin_lock+0x5c/0x68
> [  664.605934]  panfrost_job_run+0x24c/0x3f8
> [  664.606290]  drm_sched_main+0x130/0x390
> [  664.606631]  kthread+0x174/0x180
> [  664.606918]  ret_from_fork+0x10/0x20
> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
> [  727.608331]  (t=178533 jiffies g=505 q=1152)
> [  727.608706] Task dump for CPU 3:
> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  727.609858] Call trace:
> [  727.610074]  dump_backtrace+0x0/0x198
> [  727.610403]  show_stack+0x14/0x60
> [  727.610698]  sched_show_task+0x148/0x168
> [  727.611047]  dump_cpu_task+0x40/0x4c
> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
> [  727.612112]  update_process_times+0x94/0xd8
> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
> [  727.612879]  tick_sched_timer+0x48/0x98
> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
> [  727.613601]  hrtimer_interrupt+0xe4/0x238
> [  727.613955]  arch_timer_handler_phys+0x28/0x40
> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
> [  727.614748]  generic_handle_domain_irq+0x38/0x58
> [  727.615154]  gic_handle_irq+0x4c/0x110
> [  727.615485]  call_on_irq_stack+0x28/0x3c
> [  727.615832]  do_interrupt_handler+0x78/0x80
> [  727.616200]  el1_interrupt+0x34/0x80
> [  727.616517]  el1h_64_irq_handler+0x14/0x20
> [  727.616879]  el1h_64_irq+0x74/0x78
> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
> [  727.617602]  _raw_spin_lock+0x5c/0x68
> [  727.617926]  panfrost_job_run+0x24c/0x3f8
> [  727.618282]  drm_sched_main+0x130/0x390
> [  727.618621]  kthread+0x174/0x180
> [  727.618908]  ret_from_fork+0x10/0x20
> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
> [  790.620331]  (t=194286 jiffies g=505 q=1219)
> [  790.620708] Task dump for CPU 3:
> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  790.621860] Call trace:
> [  790.622075]  dump_backtrace+0x0/0x198
> [  790.622405]  show_stack+0x14/0x60
> [  790.622699]  sched_show_task+0x148/0x168
> [  790.623049]  dump_cpu_task+0x40/0x4c
> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
> [  790.624113]  update_process_times+0x94/0xd8
> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
> [  790.624880]  tick_sched_timer+0x48/0x98
> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
> [  790.625603]  hrtimer_interrupt+0xe4/0x238
> [  790.625957]  arch_timer_handler_phys+0x28/0x40
> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
> [  790.626752]  generic_handle_domain_irq+0x38/0x58
> [  790.627158]  gic_handle_irq+0x4c/0x110
> [  790.627493]  call_on_irq_stack+0x28/0x3c
> [  790.627839]  do_interrupt_handler+0x78/0x80
> [  790.628208]  el1_interrupt+0x34/0x80
> [  790.628526]  el1h_64_irq_handler+0x14/0x20
> [  790.628888]  el1h_64_irq+0x74/0x78
> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
> [  790.629613]  _raw_spin_lock+0x5c/0x68
> [  790.629937]  panfrost_job_run+0x24c/0x3f8
> [  790.630292]  drm_sched_main+0x130/0x390
> [  790.630632]  kthread+0x174/0x180
> [  790.630919]  ret_from_fork+0x10/0x20
> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
> [  853.632340]  (t=210039 jiffies g=505 q=1318)
> [  853.632716] Task dump for CPU 3:
> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  853.633869] Call trace:
> [  853.634084]  dump_backtrace+0x0/0x198
> [  853.634418]  show_stack+0x14/0x60
> [  853.634712]  sched_show_task+0x148/0x168
> [  853.635061]  dump_cpu_task+0x40/0x4c
> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
> [  853.636124]  update_process_times+0x94/0xd8
> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
> [  853.636892]  tick_sched_timer+0x48/0x98
> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
> [  853.637613]  hrtimer_interrupt+0xe4/0x238
> [  853.637965]  arch_timer_handler_phys+0x28/0x40
> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
> [  853.638760]  generic_handle_domain_irq+0x38/0x58
> [  853.639166]  gic_handle_irq+0x4c/0x110
> [  853.639499]  call_on_irq_stack+0x28/0x3c
> [  853.639845]  do_interrupt_handler+0x78/0x80
> [  853.640213]  el1_interrupt+0x34/0x80
> [  853.640530]  el1h_64_irq_handler+0x14/0x20
> [  853.640892]  el1h_64_irq+0x74/0x78
> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
> [  853.641616]  _raw_spin_lock+0x5c/0x68
> [  853.641940]  panfrost_job_run+0x24c/0x3f8
> [  853.642295]  drm_sched_main+0x130/0x390
> [  853.642634]  kthread+0x174/0x180
> [  853.642921]  ret_from_fork+0x10/0x20
> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
> [  916.644339]  (t=225792 jiffies g=505 q=1390)
> [  916.644715] Task dump for CPU 3:
> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  916.645868] Call trace:
> [  916.646083]  dump_backtrace+0x0/0x198
> [  916.646414]  show_stack+0x14/0x60
> [  916.646708]  sched_show_task+0x148/0x168
> [  916.647055]  dump_cpu_task+0x40/0x4c
> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
> [  916.648119]  update_process_times+0x94/0xd8
> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
> [  916.648887]  tick_sched_timer+0x48/0x98
> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
> [  916.649608]  hrtimer_interrupt+0xe4/0x238
> [  916.649962]  arch_timer_handler_phys+0x28/0x40
> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
> [  916.650756]  generic_handle_domain_irq+0x38/0x58
> [  916.651162]  gic_handle_irq+0x4c/0x110
> [  916.651495]  call_on_irq_stack+0x28/0x3c
> [  916.651842]  do_interrupt_handler+0x78/0x80
> [  916.652210]  el1_interrupt+0x34/0x80
> [  916.652527]  el1h_64_irq_handler+0x14/0x20
> [  916.652889]  el1h_64_irq+0x74/0x78
> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
> [  916.653614]  _raw_spin_lock+0x5c/0x68
> [  916.653937]  panfrost_job_run+0x24c/0x3f8
> [  916.654293]  drm_sched_main+0x130/0x390
> [  916.654632]  kthread+0x174/0x180
> [  916.654920]  ret_from_fork+0x10/0x20
> 
> -- 
> Pengutronix e.K.                           |                             |
> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 14:08       ` Daniel Vetter
  0 siblings, 0 replies; 115+ messages in thread
From: Daniel Vetter @ 2021-11-15 14:08 UTC (permalink / raw)
  To: Sascha Hauer
  Cc: Christian König, intel-gfx, dri-devel, linaro-mm-sig, linux-media

On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> Hi,
> 
> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > Simplifying the code a bit.
> > 
> > v2: use dma_resv_for_each_fence
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> >  1 file changed, 6 insertions(+), 20 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index 042c16b5d54a..5bc5f775abe1 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> >  					    struct drm_gem_object *obj,
> >  					    bool write)
> >  {
> > +	struct dma_resv_iter cursor;
> > +	struct dma_fence *fence;
> >  	int ret;
> > -	struct dma_fence **fences;
> > -	unsigned int i, fence_count;
> > -
> > -	if (!write) {
> > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > -
> > -		return drm_sched_job_add_dependency(job, fence);
> > -	}
> > -
> > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > -	if (ret || !fence_count)
> > -		return ret;
> >  
> > -	for (i = 0; i < fence_count; i++) {
> > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > +		ret = drm_sched_job_add_dependency(job, fence);
> >  		if (ret)
> > -			break;
> > +			return ret;
> >  	}
> > -
> > -	for (; i < fence_count; i++)
> > -		dma_fence_put(fences[i]);
> > -	kfree(fences);
> > -	return ret;
> > +	return 0;
> >  }
> >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> >  
> 
> This patch lets the panfrost driver explode on v5.16-rc1 with the
> following. I didn't bisect it, but it goes away when I revert this
> patch. I only started weston, nothing more.
> 
> Any idea what goes wrong here?

Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
patch so it missed -rc1.

Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.
-Daniel

> 
> Sascha
> 
> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.514056] Modules linked in:
> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.514621] ------------[ cut here ]------------
> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.515049] pc : dma_fence_release+0xac/0xe8
> [   12.515056] lr : dma_fence_release+0xac/0xe8
> [   12.515061] sp : ffff8000123ebb20
> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
> [   12.515518] refcount_t: addition on 0; use-after-free.
> [   12.516015]  x27: 0000000000000000
> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
> [   12.516992] x26: 0000000000000001
> [   12.517366] Modules linked in:
> [   12.517654]  x25: ffff000004b051c0
> [   12.518108]
> [   12.518555]  x24: 0000000000000000
> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
> [   12.519576]
> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.520133] x23: 0000000000000000
> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.520559]  x22: ffff800010d41b78
> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
> [   12.521625]  x21: ffff000004b05050
> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
> [   12.522299]
> [   12.522588] sp : ffff8000122b3bc0
> [   12.523192] x20: ffff000004b05040
> [   12.523489] x29: ffff8000122b3bc0
> [   12.523906]  x19: ffff000004b05078
> [   12.524203]  x28: 0000000000000000
> [   12.524620]  x18: 0000000000000010
> [   12.524751]  x27: ffff000003791880
> [   12.525040]
> [   12.525329]
> [   12.525618] x17: 0000000000000000
> [   12.525915] x26: ffff8000122b3d30
> [   12.526212]  x16: 0000000000000000
> [   12.526509]  x25: 0000000000000001
> [   12.526806]  x15: ffff0000050e2dc0
> [   12.526937]  x24: ffff000003791a10
> [   12.527067]
> [   12.527357]
> [   12.527646] x14: 00000000000001b5
> [   12.527942] x23: 0000000000000000
> [   12.528240]  x13: ffff0000050e2dc0
> [   12.528536]  x22: ffff000003505280
> [   12.528833]  x12: 00000000ffffffea
> [   12.528964]  x21: ffff000003a2a220
> [   12.529095]
> [   12.529384]
> [   12.529673] x11: ffff800011761ec8
> [   12.529970] x20: ffff000004b05078
> [   12.530267]  x10: ffff8000115e1e88
> [   12.530564]  x19: ffff000004b05000
> [   12.530861]  x9 : ffff8000115e1ee0
> [   12.530992]  x18: 0000000000000010
> [   12.531123]
> [   12.531412]
> [   12.531701] x8 : 000000000017ffe8
> [   12.531998] x17: 0000000000500600
> [   12.532294]  x7 : c0000000fffeffff
> [   12.532591]  x16: 0000000000000000
> [   12.532888]  x6 : 0000000000000001
> [   12.533019]  x15: ffff000003505700
> [   12.533150]
> [   12.533439]
> [   12.533728] x5 : ffff00007fb8c9a0
> [   12.534025] x14: 0000000000000000
> [   12.534322]  x4 : 0000000000000000
> [   12.534619]  x13: 292d2d3d45505954
> [   12.534914]  x3 : 0000000000000001
> [   12.535045]  x12: 4220534253532d20
> [   12.535176]
> [   12.535465]
> [   12.535754] x2 : ffff00007fb8c9a8
> [   12.536051] x11: 5449442d204f4354
> [   12.536347]  x1 : ea6e0584a53f2200
> [   12.536643]  x10: 2d204f41552d204e
> [   12.536941]  x0 : 0000000000000000
> [   12.537073]  x9 : 4e41502b20666961
> [   12.537203]
> [   12.537492]
> [   12.537782] Call trace:
> [   12.538078] x8 : 642076635a6e2820
> [   12.538377]  dma_fence_release+0xac/0xe8
> [   12.538671]  x7 : 205d343430353135
> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.539098]  x6 : 352e32312020205b
> [   12.539230]  panfrost_job_push+0x1bc/0x200
> [   12.539442]
> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
> [   12.540073] x5 : ffff00007fb539a0
> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
> [   12.540771]  x4 : 0000000000000000
> [   12.541069]  drm_ioctl+0x214/0x450
> [   12.541424]  x3 : 0000000000000001
> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.541943]
> [   12.542233]  invoke_syscall+0x40/0xf8
> [   12.542573] x2 : ffff00007fb539a8
> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.543167]  x1 : 0ac4fb7a0680bb00
> [   12.543465]  do_el0_svc+0x20/0x80
> [   12.543805]  x0 : 0000000000000000
> [   12.543936]  el0_svc+0x1c/0x50
> [   12.544255]
> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
> [   12.544955] Call trace:
> [   12.545250]  el0t_64_sync+0x16c/0x170
> [   12.545540]  refcount_warn_saturate+0x98/0x140
> [   12.545837] ---[ end trace ba74542f51246288 ]---
> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
> [   12.546285] ------------[ cut here ]------------
> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
> [   12.546813] refcount_t: underflow; use-after-free.
> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
> [   12.547920]  dma_fence_signal_locked+0x20/0x30
> [   12.548336] Modules linked in:
> [   12.548737]  panfrost_job_handle_done+0x34/0x50
> [   12.549110]
> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.551373]  irq_thread_fn+0x28/0x98
> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.551899]  irq_thread+0x12c/0x230
> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
> [   12.553131]  kthread+0x174/0x180
> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
> [   12.554121]  ret_from_fork+0x10/0x20
> [   12.554432] sp : ffff8000123ebaa0
> [   12.555038] ---[ end trace ba74542f51246289 ]---
> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.563733] Call trace:
> [   12.563950]  refcount_warn_saturate+0xec/0x140
> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   12.565216]  dma_fence_release+0xd4/0xe8
> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.565970]  panfrost_job_push+0x1bc/0x200
> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
> [   12.567072]  drm_ioctl+0x214/0x450
> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.567721]  invoke_syscall+0x40/0xf8
> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.568463]  do_el0_svc+0x20/0x80
> [   12.568755]  el0_svc+0x1c/0x50
> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
> [   12.569399]  el0t_64_sync+0x16c/0x170
> [   12.569724] ---[ end trace ba74542f5124628a ]---
> [   12.595086] ------------[ cut here ]------------
> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   12.596934] Modules linked in:
> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.599202] pc : dma_fence_release+0xac/0xe8
> [   12.599584] lr : dma_fence_release+0xac/0xe8
> [   12.599960] sp : ffff8000123ebb20
> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.606542] Call trace:
> [   12.606760]  dma_fence_release+0xac/0xe8
> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   12.607517]  panfrost_job_push+0x1bc/0x200
> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
> [   12.608622]  drm_ioctl+0x214/0x450
> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.609269]  invoke_syscall+0x40/0xf8
> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.610011]  do_el0_svc+0x20/0x80
> [   12.610304]  el0_svc+0x1c/0x50
> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
> [   12.610946]  el0t_64_sync+0x16c/0x170
> [   12.611276] ---[ end trace ba74542f5124628b ]---
> [   12.612869] ------------[ cut here ]------------
> [   12.613288] refcount_t: saturated; leaking memory.
> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
> [   12.614476] Modules linked in:
> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
> [   12.617622] sp : ffff8000123eba60
> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   12.624256] Call trace:
> [   12.624474]  refcount_warn_saturate+0xc0/0x140
> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
> [   12.627397]  drm_ioctl+0x214/0x450
> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
> [   12.628046]  invoke_syscall+0x40/0xf8
> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
> [   12.628787]  do_el0_svc+0x20/0x80
> [   12.629079]  el0_svc+0x1c/0x50
> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
> [   12.629723]  el0t_64_sync+0x16c/0x170
> [   12.630048] ---[ end trace ba74542f5124628c ]---
> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
> [   13.296693] ------------[ cut here ]------------
> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.298560] Modules linked in:
> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.300830] pc : dma_fence_release+0xac/0xe8
> [   13.301208] lr : dma_fence_release+0xac/0xe8
> [   13.301585] sp : ffff8000123ebb20
> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.308149] Call trace:
> [   13.308367]  dma_fence_release+0xac/0xe8
> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.309119]  panfrost_job_push+0x1bc/0x200
> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
> [   13.310221]  drm_ioctl+0x214/0x450
> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.310868]  invoke_syscall+0x40/0xf8
> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.311609]  do_el0_svc+0x20/0x80
> [   13.311903]  el0_svc+0x1c/0x50
> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
> [   13.312545]  el0t_64_sync+0x16c/0x170
> [   13.312869] ---[ end trace ba74542f5124628d ]---
> [   13.340454] ------------[ cut here ]------------
> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.342318] Modules linked in:
> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.344584] pc : dma_fence_release+0xac/0xe8
> [   13.344961] lr : dma_fence_release+0xac/0xe8
> [   13.345338] sp : ffff8000123ebb20
> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.351900] Call trace:
> [   13.352116]  dma_fence_release+0xac/0xe8
> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.352869]  panfrost_job_push+0x1bc/0x200
> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
> [   13.353971]  drm_ioctl+0x214/0x450
> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.354616]  invoke_syscall+0x40/0xf8
> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.355356]  do_el0_svc+0x20/0x80
> [   13.355650]  el0_svc+0x1c/0x50
> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
> [   13.356293]  el0t_64_sync+0x16c/0x170
> [   13.356618] ---[ end trace ba74542f5124628e ]---
> [   13.379841] ------------[ cut here ]------------
> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.381680] Modules linked in:
> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.383937] pc : dma_fence_release+0xac/0xe8
> [   13.384314] lr : dma_fence_release+0xac/0xe8
> [   13.384690] sp : ffff8000123ebb20
> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.391247] Call trace:
> [   13.391464]  dma_fence_release+0xac/0xe8
> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.392217]  panfrost_job_push+0x1bc/0x200
> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
> [   13.393319]  drm_ioctl+0x214/0x450
> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.393967]  invoke_syscall+0x40/0xf8
> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.394708]  do_el0_svc+0x20/0x80
> [   13.395002]  el0_svc+0x1c/0x50
> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
> [   13.395643]  el0t_64_sync+0x16c/0x170
> [   13.395968] ---[ end trace ba74542f5124628f ]---
> [   13.398130] ------------[ cut here ]------------
> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
> [   13.400011] Modules linked in:
> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.402269] pc : dma_fence_release+0xac/0xe8
> [   13.402646] lr : dma_fence_release+0xac/0xe8
> [   13.403024] sp : ffff8000123ebb20
> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
> [   13.409583] Call trace:
> [   13.409800]  dma_fence_release+0xac/0xe8
> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
> [   13.410553]  panfrost_job_push+0x1bc/0x200
> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
> [   13.411656]  drm_ioctl+0x214/0x450
> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.412303]  invoke_syscall+0x40/0xf8
> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.413042]  do_el0_svc+0x20/0x80
> [   13.413335]  el0_svc+0x1c/0x50
> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
> [   13.413976]  el0t_64_sync+0x16c/0x170
> [   13.414298] ---[ end trace ba74542f51246290 ]---
> [   13.430129] ------------[ cut here ]------------
> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
> [   13.430557] refcount_t: saturated; leaking memory.
> [   13.431321] Mem abort info:
> [   13.431324]   ESR = 0x96000044
> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
> [   13.431330]   SET = 0, FnV = 0
> [   13.431333]   EA = 0, S1PTW = 0
> [   13.431335]   FSC = 0x04: level 0 translation fault
> [   13.431337] Data abort info:
> [   13.431339]   ISV = 0, ISS = 0x00000044
> [   13.431340]   CM = 0, WnR = 1
> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
> [   13.431359] Modules linked in:
> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
> [   13.432059] lr : dma_fence_signal+0x30/0x60
> [   13.432327] Modules linked in:
> [   13.432789] sp : ffff8000122b3b50
> [   13.433057]
> [   13.433331] x29: ffff8000122b3b50
> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
> [   13.434008]  x28: 0000000000000000
> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
> [   13.434601]  x27: ffff000003791880
> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
> [   13.435751]
> [   13.435753] x26: ffff8000122b3d30
> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
> [   13.436504]  x25: 0000000000000001
> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
> [   13.437938]  x24: ffff000003791a10
> [   13.438542] sp : ffff8000123ebb40
> [   13.439042]
> [   13.439767] x29: ffff8000123ebb40
> [   13.440130] x23: 0000000000000000
> [   13.440398]  x28: ffff8000123ebd58
> [   13.440687]  x22: ffff000003505280
> [   13.440819]  x27: 0000000000000000
> [   13.441108]  x21: ffff8000122b3b88
> [   13.441931]
> [   13.442228]
> [   13.442773] x26: 0000000000000001
> [   13.443070] x20: ffff000004b051c0
> [   13.443674]  x25: ffff000004b051c0
> [   13.443806]  x19: ffff000004b051c0
> [   13.444095]  x24: 0000000000000000
> [   13.444513]  x18: 0000000000000000
> [   13.444811]
> [   13.445227]
> [   13.445524] x23: 0000000000000000
> [   13.445814] x17: 3837783028203032
> [   13.445945]  x22: ffff000004b051c0
> [   13.446236]  x16: 3139323835323120
> [   13.446525]  x21: ffff000004d73100
> [   13.446822]  x15: 00000205aa24947a
> [   13.447120]
> [   13.447417]
> [   13.447715] x20: ffff000004b05400
> [   13.447846] x14: 0000000000000326
> [   13.447977]  x19: 00000000ffffffff
> [   13.448266]  x13: 0000000000000000
> [   13.448555]  x18: 0000000000000010
> [   13.448851]  x12: 0000000000000000
> [   13.449148]
> [   13.449446]
> [   13.449743] x17: 0000000000000000
> [   13.449874] x11: 0000000000000001
> [   13.450006]  x16: 0000000000000000
> [   13.450296]  x10: ffff8000122b3d48
> [   13.450585]  x15: 000060978994e822
> [   13.450882]  x9 : 00000000000019e0
> [   13.451179]
> [   13.451477]
> [   13.451774] x14: 00000000000000b6
> [   13.451905] x8 : ffff8000122b3d78
> [   13.452037]  x13: 00000000000000b6
> [   13.452326]  x7 : 0000000000000000
> [   13.452614]  x12: 0000000000000000
> [   13.452912]  x6 : 000000001fcf847e
> [   13.453209]
> [   13.453506]
> [   13.453803] x11: 0000000000000001
> [   13.453934] x5 : 00ffffffffffffff
> [   13.454066]  x10: 00000000000009a0
> [   13.454356]  x4 : 0015ef3c03fd7c00
> [   13.454643]  x9 : ffff8000123eb8c0
> [   13.454941]  x3 : 0000000000000018
> [   13.455238]
> [   13.455536]
> [   13.455833] x8 : ffff0000050e3340
> [   13.455965] x2 : ffff000004b051f0
> [   13.456096]  x7 : ffff00007fb92a80
> [   13.456386]  x1 : 000000032053be4d
> [   13.456676]  x6 : 0000000000000115
> [   13.456973]  x0 : 0000000000000000
> [   13.457271]
> [   13.457568]
> [   13.457866] x5 : 0000000000000000
> [   13.457998] Call trace:
> [   13.458128]  x4 : ffff00007fb8c9a0
> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
> [   13.458707]  x3 : ffff00007fb8f950
> [   13.459005]  dma_fence_signal+0x30/0x60
> [   13.459302]
> [   13.459600]  drm_sched_fence_finished+0x10/0x18
> [   13.459897] x2 : ffff00007fb8c9a0
> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
> [   13.460159]  x1 : ea6e0584a53f2200
> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
> [   13.460738]  x0 : 0000000000000000
> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
> [   13.461333]
> [   13.461631]  dma_fence_signal_locked+0x20/0x30
> [   13.461929] Call trace:
> [   13.462060]  panfrost_job_handle_done+0x34/0x50
> [   13.462192]  refcount_warn_saturate+0x6c/0x140
> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
> [   13.463462]  panfrost_job_push+0x1bc/0x200
> [   13.463760]  irq_thread_fn+0x28/0x98
> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
> [   13.464225]  irq_thread+0x12c/0x230
> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
> [   13.464909]  kthread+0x174/0x180
> [   13.465319]  drm_ioctl+0x214/0x450
> [   13.465617]  ret_from_fork+0x10/0x20
> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
> [   13.466756]  invoke_syscall+0x40/0xf8
> [   13.466891] ---[ end trace ba74542f51246291 ]---
> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
> [   13.467883]  do_el0_svc+0x20/0x80
> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
> [   13.468678]  el0_svc+0x1c/0x50
> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
> [   13.476277]  el0t_64_sync+0x16c/0x170
> [   13.476601] ---[ end trace ba74542f51246292 ]---
> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
> [   14.462959] sched: RT throttling activated
> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
> [   34.476312]  (t=5250 jiffies g=505 q=301)
> [   34.476667] Task dump for CPU 3:
> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   34.477820] Call trace:
> [   34.478035]  dump_backtrace+0x0/0x198
> [   34.478365]  show_stack+0x14/0x60
> [   34.478659]  sched_show_task+0x148/0x168
> [   34.479008]  dump_cpu_task+0x40/0x4c
> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
> [   34.480072]  update_process_times+0x94/0xd8
> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
> [   34.480840]  tick_sched_timer+0x48/0x98
> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
> [   34.481562]  hrtimer_interrupt+0xe4/0x238
> [   34.481917]  arch_timer_handler_phys+0x28/0x40
> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
> [   34.482710]  generic_handle_domain_irq+0x38/0x58
> [   34.483116]  gic_handle_irq+0x4c/0x110
> [   34.483450]  call_on_irq_stack+0x28/0x3c
> [   34.483798]  do_interrupt_handler+0x78/0x80
> [   34.484166]  el1_interrupt+0x34/0x80
> [   34.484484]  el1h_64_irq_handler+0x14/0x20
> [   34.484846]  el1h_64_irq+0x74/0x78
> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
> [   34.485568]  _raw_spin_lock+0x5c/0x68
> [   34.485895]  panfrost_job_run+0x24c/0x3f8
> [   34.486250]  drm_sched_main+0x130/0x390
> [   34.486591]  kthread+0x174/0x180
> [   34.486878]  ret_from_fork+0x10/0x20
> [   35.810989] vcc3v3_lcd1_n: disabling
> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
> [   97.488326]  (t=21003 jiffies g=505 q=379)
> [   97.488687] Task dump for CPU 3:
> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [   97.489842] Call trace:
> [   97.490056]  dump_backtrace+0x0/0x198
> [   97.490388]  show_stack+0x14/0x60
> [   97.490682]  sched_show_task+0x148/0x168
> [   97.491030]  dump_cpu_task+0x40/0x4c
> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
> [   97.492095]  update_process_times+0x94/0xd8
> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
> [   97.492862]  tick_sched_timer+0x48/0x98
> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
> [   97.493582]  hrtimer_interrupt+0xe4/0x238
> [   97.493937]  arch_timer_handler_phys+0x28/0x40
> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
> [   97.494730]  generic_handle_domain_irq+0x38/0x58
> [   97.495136]  gic_handle_irq+0x4c/0x110
> [   97.495473]  call_on_irq_stack+0x28/0x3c
> [   97.495818]  do_interrupt_handler+0x78/0x80
> [   97.496186]  el1_interrupt+0x34/0x80
> [   97.496503]  el1h_64_irq_handler+0x14/0x20
> [   97.496865]  el1h_64_irq+0x74/0x78
> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
> [   97.497588]  _raw_spin_lock+0x5c/0x68
> [   97.497912]  panfrost_job_run+0x24c/0x3f8
> [   97.498268]  drm_sched_main+0x130/0x390
> [   97.498607]  kthread+0x174/0x180
> [   97.498895]  ret_from_fork+0x10/0x20
> [  140.108141] random: crng init done
> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
> [  160.500322]  (t=36756 jiffies g=505 q=482)
> [  160.500684] Task dump for CPU 3:
> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  160.501837] Call trace:
> [  160.502054]  dump_backtrace+0x0/0x198
> [  160.502384]  show_stack+0x14/0x60
> [  160.502679]  sched_show_task+0x148/0x168
> [  160.503027]  dump_cpu_task+0x40/0x4c
> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
> [  160.504091]  update_process_times+0x94/0xd8
> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
> [  160.504858]  tick_sched_timer+0x48/0x98
> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
> [  160.505580]  hrtimer_interrupt+0xe4/0x238
> [  160.505934]  arch_timer_handler_phys+0x28/0x40
> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
> [  160.506727]  generic_handle_domain_irq+0x38/0x58
> [  160.507133]  gic_handle_irq+0x4c/0x110
> [  160.507467]  call_on_irq_stack+0x28/0x3c
> [  160.507813]  do_interrupt_handler+0x78/0x80
> [  160.508181]  el1_interrupt+0x34/0x80
> [  160.508497]  el1h_64_irq_handler+0x14/0x20
> [  160.508858]  el1h_64_irq+0x74/0x78
> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
> [  160.509579]  _raw_spin_lock+0x5c/0x68
> [  160.509903]  panfrost_job_run+0x24c/0x3f8
> [  160.510259]  drm_sched_main+0x130/0x390
> [  160.510599]  kthread+0x174/0x180
> [  160.510886]  ret_from_fork+0x10/0x20
> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
> [  223.512325]  (t=52509 jiffies g=505 q=536)
> [  223.512688] Task dump for CPU 3:
> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  223.513842] Call trace:
> [  223.514056]  dump_backtrace+0x0/0x198
> [  223.514387]  show_stack+0x14/0x60
> [  223.514681]  sched_show_task+0x148/0x168
> [  223.515029]  dump_cpu_task+0x40/0x4c
> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  223.516094]  update_process_times+0x94/0xd8
> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
> [  223.516860]  tick_sched_timer+0x48/0x98
> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
> [  223.517582]  hrtimer_interrupt+0xe4/0x238
> [  223.517935]  arch_timer_handler_phys+0x28/0x40
> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
> [  223.518727]  generic_handle_domain_irq+0x38/0x58
> [  223.519133]  gic_handle_irq+0x4c/0x110
> [  223.519466]  call_on_irq_stack+0x28/0x3c
> [  223.519812]  do_interrupt_handler+0x78/0x80
> [  223.520181]  el1_interrupt+0x34/0x80
> [  223.520498]  el1h_64_irq_handler+0x14/0x20
> [  223.520860]  el1h_64_irq+0x74/0x78
> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
> [  223.521584]  _raw_spin_lock+0x5c/0x68
> [  223.521908]  panfrost_job_run+0x24c/0x3f8
> [  223.522264]  drm_sched_main+0x130/0x390
> [  223.522605]  kthread+0x174/0x180
> [  223.522892]  ret_from_fork+0x10/0x20
> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
> [  286.524325]  (t=68262 jiffies g=505 q=612)
> [  286.524687] Task dump for CPU 3:
> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  286.525840] Call trace:
> [  286.526057]  dump_backtrace+0x0/0x198
> [  286.526387]  show_stack+0x14/0x60
> [  286.526681]  sched_show_task+0x148/0x168
> [  286.527029]  dump_cpu_task+0x40/0x4c
> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
> [  286.528092]  update_process_times+0x94/0xd8
> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
> [  286.528859]  tick_sched_timer+0x48/0x98
> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
> [  286.529579]  hrtimer_interrupt+0xe4/0x238
> [  286.529933]  arch_timer_handler_phys+0x28/0x40
> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
> [  286.530726]  generic_handle_domain_irq+0x38/0x58
> [  286.531132]  gic_handle_irq+0x4c/0x110
> [  286.531466]  call_on_irq_stack+0x28/0x3c
> [  286.531812]  do_interrupt_handler+0x78/0x80
> [  286.532180]  el1_interrupt+0x34/0x80
> [  286.532496]  el1h_64_irq_handler+0x14/0x20
> [  286.532857]  el1h_64_irq+0x74/0x78
> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
> [  286.533580]  _raw_spin_lock+0x5c/0x68
> [  286.533904]  panfrost_job_run+0x24c/0x3f8
> [  286.534259]  drm_sched_main+0x130/0x390
> [  286.534600]  kthread+0x174/0x180
> [  286.534887]  ret_from_fork+0x10/0x20
> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
> [  349.536324]  (t=84015 jiffies g=505 q=716)
> [  349.536687] Task dump for CPU 3:
> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  349.537839] Call trace:
> [  349.538055]  dump_backtrace+0x0/0x198
> [  349.538387]  show_stack+0x14/0x60
> [  349.538681]  sched_show_task+0x148/0x168
> [  349.539029]  dump_cpu_task+0x40/0x4c
> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
> [  349.540094]  update_process_times+0x94/0xd8
> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
> [  349.540862]  tick_sched_timer+0x48/0x98
> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
> [  349.541585]  hrtimer_interrupt+0xe4/0x238
> [  349.541937]  arch_timer_handler_phys+0x28/0x40
> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
> [  349.542730]  generic_handle_domain_irq+0x38/0x58
> [  349.543136]  gic_handle_irq+0x4c/0x110
> [  349.543469]  call_on_irq_stack+0x28/0x3c
> [  349.543815]  do_interrupt_handler+0x78/0x80
> [  349.544183]  el1_interrupt+0x34/0x80
> [  349.544500]  el1h_64_irq_handler+0x14/0x20
> [  349.544862]  el1h_64_irq+0x74/0x78
> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  349.545586]  _raw_spin_lock+0x5c/0x68
> [  349.545910]  panfrost_job_run+0x24c/0x3f8
> [  349.546265]  drm_sched_main+0x130/0x390
> [  349.546604]  kthread+0x174/0x180
> [  349.546891]  ret_from_fork+0x10/0x20
> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
> [  412.548325]  (t=99768 jiffies g=505 q=784)
> [  412.548686] Task dump for CPU 3:
> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  412.549841] Call trace:
> [  412.550058]  dump_backtrace+0x0/0x198
> [  412.550389]  show_stack+0x14/0x60
> [  412.550684]  sched_show_task+0x148/0x168
> [  412.551031]  dump_cpu_task+0x40/0x4c
> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
> [  412.552095]  update_process_times+0x94/0xd8
> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
> [  412.552863]  tick_sched_timer+0x48/0x98
> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
> [  412.553583]  hrtimer_interrupt+0xe4/0x238
> [  412.553936]  arch_timer_handler_phys+0x28/0x40
> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
> [  412.554732]  generic_handle_domain_irq+0x38/0x58
> [  412.555139]  gic_handle_irq+0x4c/0x110
> [  412.555471]  call_on_irq_stack+0x28/0x3c
> [  412.555817]  do_interrupt_handler+0x78/0x80
> [  412.556186]  el1_interrupt+0x34/0x80
> [  412.556502]  el1h_64_irq_handler+0x14/0x20
> [  412.556864]  el1h_64_irq+0x74/0x78
> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
> [  412.557587]  _raw_spin_lock+0x5c/0x68
> [  412.557912]  panfrost_job_run+0x24c/0x3f8
> [  412.558267]  drm_sched_main+0x130/0x390
> [  412.558607]  kthread+0x174/0x180
> [  412.558894]  ret_from_fork+0x10/0x20
> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
> [  475.560329]  (t=115521 jiffies g=505 q=857)
> [  475.560697] Task dump for CPU 3:
> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  475.561850] Call trace:
> [  475.562067]  dump_backtrace+0x0/0x198
> [  475.562398]  show_stack+0x14/0x60
> [  475.562693]  sched_show_task+0x148/0x168
> [  475.563041]  dump_cpu_task+0x40/0x4c
> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
> [  475.564104]  update_process_times+0x94/0xd8
> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
> [  475.564871]  tick_sched_timer+0x48/0x98
> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
> [  475.565592]  hrtimer_interrupt+0xe4/0x238
> [  475.565946]  arch_timer_handler_phys+0x28/0x40
> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
> [  475.566739]  generic_handle_domain_irq+0x38/0x58
> [  475.567145]  gic_handle_irq+0x4c/0x110
> [  475.567477]  call_on_irq_stack+0x28/0x3c
> [  475.567822]  do_interrupt_handler+0x78/0x80
> [  475.568190]  el1_interrupt+0x34/0x80
> [  475.568507]  el1h_64_irq_handler+0x14/0x20
> [  475.568869]  el1h_64_irq+0x74/0x78
> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
> [  475.569593]  _raw_spin_lock+0x5c/0x68
> [  475.569915]  panfrost_job_run+0x24c/0x3f8
> [  475.570270]  drm_sched_main+0x130/0x390
> [  475.570610]  kthread+0x174/0x180
> [  475.570897]  ret_from_fork+0x10/0x20
> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
> [  538.572333]  (t=131274 jiffies g=505 q=947)
> [  538.572701] Task dump for CPU 3:
> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  538.573854] Call trace:
> [  538.574070]  dump_backtrace+0x0/0x198
> [  538.574402]  show_stack+0x14/0x60
> [  538.574696]  sched_show_task+0x148/0x168
> [  538.575044]  dump_cpu_task+0x40/0x4c
> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
> [  538.576109]  update_process_times+0x94/0xd8
> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
> [  538.576878]  tick_sched_timer+0x48/0x98
> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
> [  538.577599]  hrtimer_interrupt+0xe4/0x238
> [  538.577953]  arch_timer_handler_phys+0x28/0x40
> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
> [  538.578745]  generic_handle_domain_irq+0x38/0x58
> [  538.579151]  gic_handle_irq+0x4c/0x110
> [  538.579487]  call_on_irq_stack+0x28/0x3c
> [  538.579833]  do_interrupt_handler+0x78/0x80
> [  538.580201]  el1_interrupt+0x34/0x80
> [  538.580518]  el1h_64_irq_handler+0x14/0x20
> [  538.580880]  el1h_64_irq+0x74/0x78
> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  538.581603]  _raw_spin_lock+0x5c/0x68
> [  538.581927]  panfrost_job_run+0x24c/0x3f8
> [  538.582283]  drm_sched_main+0x130/0x390
> [  538.582623]  kthread+0x174/0x180
> [  538.582910]  ret_from_fork+0x10/0x20
> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
> [  601.584330]  (t=147027 jiffies g=505 q=1018)
> [  601.584706] Task dump for CPU 3:
> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  601.585859] Call trace:
> [  601.586075]  dump_backtrace+0x0/0x198
> [  601.586406]  show_stack+0x14/0x60
> [  601.586701]  sched_show_task+0x148/0x168
> [  601.587048]  dump_cpu_task+0x40/0x4c
> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
> [  601.588112]  update_process_times+0x94/0xd8
> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
> [  601.588880]  tick_sched_timer+0x48/0x98
> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
> [  601.589602]  hrtimer_interrupt+0xe4/0x238
> [  601.589956]  arch_timer_handler_phys+0x28/0x40
> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
> [  601.590747]  generic_handle_domain_irq+0x38/0x58
> [  601.591153]  gic_handle_irq+0x4c/0x110
> [  601.591486]  call_on_irq_stack+0x28/0x3c
> [  601.591832]  do_interrupt_handler+0x78/0x80
> [  601.592201]  el1_interrupt+0x34/0x80
> [  601.592517]  el1h_64_irq_handler+0x14/0x20
> [  601.592879]  el1h_64_irq+0x74/0x78
> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
> [  601.593603]  _raw_spin_lock+0x5c/0x68
> [  601.593927]  panfrost_job_run+0x24c/0x3f8
> [  601.594283]  drm_sched_main+0x130/0x390
> [  601.594623]  kthread+0x174/0x180
> [  601.594910]  ret_from_fork+0x10/0x20
> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
> [  664.596333]  (t=162780 jiffies g=505 q=1086)
> [  664.596709] Task dump for CPU 3:
> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  664.597862] Call trace:
> [  664.598078]  dump_backtrace+0x0/0x198
> [  664.598409]  show_stack+0x14/0x60
> [  664.598704]  sched_show_task+0x148/0x168
> [  664.599052]  dump_cpu_task+0x40/0x4c
> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
> [  664.600114]  update_process_times+0x94/0xd8
> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
> [  664.600882]  tick_sched_timer+0x48/0x98
> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
> [  664.601604]  hrtimer_interrupt+0xe4/0x238
> [  664.601958]  arch_timer_handler_phys+0x28/0x40
> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
> [  664.602751]  generic_handle_domain_irq+0x38/0x58
> [  664.603158]  gic_handle_irq+0x4c/0x110
> [  664.603491]  call_on_irq_stack+0x28/0x3c
> [  664.603838]  do_interrupt_handler+0x78/0x80
> [  664.604206]  el1_interrupt+0x34/0x80
> [  664.604522]  el1h_64_irq_handler+0x14/0x20
> [  664.604883]  el1h_64_irq+0x74/0x78
> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
> [  664.605609]  _raw_spin_lock+0x5c/0x68
> [  664.605934]  panfrost_job_run+0x24c/0x3f8
> [  664.606290]  drm_sched_main+0x130/0x390
> [  664.606631]  kthread+0x174/0x180
> [  664.606918]  ret_from_fork+0x10/0x20
> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
> [  727.608331]  (t=178533 jiffies g=505 q=1152)
> [  727.608706] Task dump for CPU 3:
> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  727.609858] Call trace:
> [  727.610074]  dump_backtrace+0x0/0x198
> [  727.610403]  show_stack+0x14/0x60
> [  727.610698]  sched_show_task+0x148/0x168
> [  727.611047]  dump_cpu_task+0x40/0x4c
> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
> [  727.612112]  update_process_times+0x94/0xd8
> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
> [  727.612879]  tick_sched_timer+0x48/0x98
> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
> [  727.613601]  hrtimer_interrupt+0xe4/0x238
> [  727.613955]  arch_timer_handler_phys+0x28/0x40
> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
> [  727.614748]  generic_handle_domain_irq+0x38/0x58
> [  727.615154]  gic_handle_irq+0x4c/0x110
> [  727.615485]  call_on_irq_stack+0x28/0x3c
> [  727.615832]  do_interrupt_handler+0x78/0x80
> [  727.616200]  el1_interrupt+0x34/0x80
> [  727.616517]  el1h_64_irq_handler+0x14/0x20
> [  727.616879]  el1h_64_irq+0x74/0x78
> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
> [  727.617602]  _raw_spin_lock+0x5c/0x68
> [  727.617926]  panfrost_job_run+0x24c/0x3f8
> [  727.618282]  drm_sched_main+0x130/0x390
> [  727.618621]  kthread+0x174/0x180
> [  727.618908]  ret_from_fork+0x10/0x20
> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
> [  790.620331]  (t=194286 jiffies g=505 q=1219)
> [  790.620708] Task dump for CPU 3:
> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  790.621860] Call trace:
> [  790.622075]  dump_backtrace+0x0/0x198
> [  790.622405]  show_stack+0x14/0x60
> [  790.622699]  sched_show_task+0x148/0x168
> [  790.623049]  dump_cpu_task+0x40/0x4c
> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
> [  790.624113]  update_process_times+0x94/0xd8
> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
> [  790.624880]  tick_sched_timer+0x48/0x98
> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
> [  790.625603]  hrtimer_interrupt+0xe4/0x238
> [  790.625957]  arch_timer_handler_phys+0x28/0x40
> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
> [  790.626752]  generic_handle_domain_irq+0x38/0x58
> [  790.627158]  gic_handle_irq+0x4c/0x110
> [  790.627493]  call_on_irq_stack+0x28/0x3c
> [  790.627839]  do_interrupt_handler+0x78/0x80
> [  790.628208]  el1_interrupt+0x34/0x80
> [  790.628526]  el1h_64_irq_handler+0x14/0x20
> [  790.628888]  el1h_64_irq+0x74/0x78
> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
> [  790.629613]  _raw_spin_lock+0x5c/0x68
> [  790.629937]  panfrost_job_run+0x24c/0x3f8
> [  790.630292]  drm_sched_main+0x130/0x390
> [  790.630632]  kthread+0x174/0x180
> [  790.630919]  ret_from_fork+0x10/0x20
> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
> [  853.632340]  (t=210039 jiffies g=505 q=1318)
> [  853.632716] Task dump for CPU 3:
> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  853.633869] Call trace:
> [  853.634084]  dump_backtrace+0x0/0x198
> [  853.634418]  show_stack+0x14/0x60
> [  853.634712]  sched_show_task+0x148/0x168
> [  853.635061]  dump_cpu_task+0x40/0x4c
> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
> [  853.636124]  update_process_times+0x94/0xd8
> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
> [  853.636892]  tick_sched_timer+0x48/0x98
> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
> [  853.637613]  hrtimer_interrupt+0xe4/0x238
> [  853.637965]  arch_timer_handler_phys+0x28/0x40
> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
> [  853.638760]  generic_handle_domain_irq+0x38/0x58
> [  853.639166]  gic_handle_irq+0x4c/0x110
> [  853.639499]  call_on_irq_stack+0x28/0x3c
> [  853.639845]  do_interrupt_handler+0x78/0x80
> [  853.640213]  el1_interrupt+0x34/0x80
> [  853.640530]  el1h_64_irq_handler+0x14/0x20
> [  853.640892]  el1h_64_irq+0x74/0x78
> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
> [  853.641616]  _raw_spin_lock+0x5c/0x68
> [  853.641940]  panfrost_job_run+0x24c/0x3f8
> [  853.642295]  drm_sched_main+0x130/0x390
> [  853.642634]  kthread+0x174/0x180
> [  853.642921]  ret_from_fork+0x10/0x20
> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
> [  916.644339]  (t=225792 jiffies g=505 q=1390)
> [  916.644715] Task dump for CPU 3:
> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
> [  916.645868] Call trace:
> [  916.646083]  dump_backtrace+0x0/0x198
> [  916.646414]  show_stack+0x14/0x60
> [  916.646708]  sched_show_task+0x148/0x168
> [  916.647055]  dump_cpu_task+0x40/0x4c
> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
> [  916.648119]  update_process_times+0x94/0xd8
> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
> [  916.648887]  tick_sched_timer+0x48/0x98
> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
> [  916.649608]  hrtimer_interrupt+0xe4/0x238
> [  916.649962]  arch_timer_handler_phys+0x28/0x40
> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
> [  916.650756]  generic_handle_domain_irq+0x38/0x58
> [  916.651162]  gic_handle_irq+0x4c/0x110
> [  916.651495]  call_on_irq_stack+0x28/0x3c
> [  916.651842]  do_interrupt_handler+0x78/0x80
> [  916.652210]  el1_interrupt+0x34/0x80
> [  916.652527]  el1h_64_irq_handler+0x14/0x20
> [  916.652889]  el1h_64_irq+0x74/0x78
> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
> [  916.653614]  _raw_spin_lock+0x5c/0x68
> [  916.653937]  panfrost_job_run+0x24c/0x3f8
> [  916.654293]  drm_sched_main+0x130/0x390
> [  916.654632]  kthread+0x174/0x180
> [  916.654920]  ret_from_fork+0x10/0x20
> 
> -- 
> Pengutronix e.K.                           |                             |
> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-11-15 14:08       ` Daniel Vetter
  (?)
@ 2021-11-15 20:32         ` Christian König
  -1 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-11-15 20:32 UTC (permalink / raw)
  To: Daniel Vetter, Sascha Hauer
  Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media

Am 15.11.21 um 15:08 schrieb Daniel Vetter:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
>> Hi,
>>
>> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
>>> Simplifying the code a bit.
>>>
>>> v2: use dma_resv_for_each_fence
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>>>   1 file changed, 6 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index 042c16b5d54a..5bc5f775abe1 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>>>   					    struct drm_gem_object *obj,
>>>   					    bool write)
>>>   {
>>> +	struct dma_resv_iter cursor;
>>> +	struct dma_fence *fence;
>>>   	int ret;
>>> -	struct dma_fence **fences;
>>> -	unsigned int i, fence_count;
>>> -
>>> -	if (!write) {
>>> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
>>> -
>>> -		return drm_sched_job_add_dependency(job, fence);
>>> -	}
>>> -
>>> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
>>> -	if (ret || !fence_count)
>>> -		return ret;
>>>   
>>> -	for (i = 0; i < fence_count; i++) {
>>> -		ret = drm_sched_job_add_dependency(job, fences[i]);
>>> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
>>> +		ret = drm_sched_job_add_dependency(job, fence);
>>>   		if (ret)
>>> -			break;
>>> +			return ret;
>>>   	}
>>> -
>>> -	for (; i < fence_count; i++)
>>> -		dma_fence_put(fences[i]);
>>> -	kfree(fences);
>>> -	return ret;
>>> +	return 0;
>>>   }
>>>   EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>>>   
>> This patch lets the panfrost driver explode on v5.16-rc1 with the
>> following. I didn't bisect it, but it goes away when I revert this
>> patch. I only started weston, nothing more.
>>
>> Any idea what goes wrong here?
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.
>
> Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.

The problem is it doesn't apply to drm-misc-fixes. Looks like the branch 
wasn't updated.

What's going on here?

Christian.

> -Daniel
>
>> Sascha
>>
>> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
>> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.514056] Modules linked in:
>> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.514621] ------------[ cut here ]------------
>> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.515049] pc : dma_fence_release+0xac/0xe8
>> [   12.515056] lr : dma_fence_release+0xac/0xe8
>> [   12.515061] sp : ffff8000123ebb20
>> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
>> [   12.515518] refcount_t: addition on 0; use-after-free.
>> [   12.516015]  x27: 0000000000000000
>> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
>> [   12.516992] x26: 0000000000000001
>> [   12.517366] Modules linked in:
>> [   12.517654]  x25: ffff000004b051c0
>> [   12.518108]
>> [   12.518555]  x24: 0000000000000000
>> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.519576]
>> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.520133] x23: 0000000000000000
>> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.520559]  x22: ffff800010d41b78
>> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
>> [   12.521625]  x21: ffff000004b05050
>> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
>> [   12.522299]
>> [   12.522588] sp : ffff8000122b3bc0
>> [   12.523192] x20: ffff000004b05040
>> [   12.523489] x29: ffff8000122b3bc0
>> [   12.523906]  x19: ffff000004b05078
>> [   12.524203]  x28: 0000000000000000
>> [   12.524620]  x18: 0000000000000010
>> [   12.524751]  x27: ffff000003791880
>> [   12.525040]
>> [   12.525329]
>> [   12.525618] x17: 0000000000000000
>> [   12.525915] x26: ffff8000122b3d30
>> [   12.526212]  x16: 0000000000000000
>> [   12.526509]  x25: 0000000000000001
>> [   12.526806]  x15: ffff0000050e2dc0
>> [   12.526937]  x24: ffff000003791a10
>> [   12.527067]
>> [   12.527357]
>> [   12.527646] x14: 00000000000001b5
>> [   12.527942] x23: 0000000000000000
>> [   12.528240]  x13: ffff0000050e2dc0
>> [   12.528536]  x22: ffff000003505280
>> [   12.528833]  x12: 00000000ffffffea
>> [   12.528964]  x21: ffff000003a2a220
>> [   12.529095]
>> [   12.529384]
>> [   12.529673] x11: ffff800011761ec8
>> [   12.529970] x20: ffff000004b05078
>> [   12.530267]  x10: ffff8000115e1e88
>> [   12.530564]  x19: ffff000004b05000
>> [   12.530861]  x9 : ffff8000115e1ee0
>> [   12.530992]  x18: 0000000000000010
>> [   12.531123]
>> [   12.531412]
>> [   12.531701] x8 : 000000000017ffe8
>> [   12.531998] x17: 0000000000500600
>> [   12.532294]  x7 : c0000000fffeffff
>> [   12.532591]  x16: 0000000000000000
>> [   12.532888]  x6 : 0000000000000001
>> [   12.533019]  x15: ffff000003505700
>> [   12.533150]
>> [   12.533439]
>> [   12.533728] x5 : ffff00007fb8c9a0
>> [   12.534025] x14: 0000000000000000
>> [   12.534322]  x4 : 0000000000000000
>> [   12.534619]  x13: 292d2d3d45505954
>> [   12.534914]  x3 : 0000000000000001
>> [   12.535045]  x12: 4220534253532d20
>> [   12.535176]
>> [   12.535465]
>> [   12.535754] x2 : ffff00007fb8c9a8
>> [   12.536051] x11: 5449442d204f4354
>> [   12.536347]  x1 : ea6e0584a53f2200
>> [   12.536643]  x10: 2d204f41552d204e
>> [   12.536941]  x0 : 0000000000000000
>> [   12.537073]  x9 : 4e41502b20666961
>> [   12.537203]
>> [   12.537492]
>> [   12.537782] Call trace:
>> [   12.538078] x8 : 642076635a6e2820
>> [   12.538377]  dma_fence_release+0xac/0xe8
>> [   12.538671]  x7 : 205d343430353135
>> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.539098]  x6 : 352e32312020205b
>> [   12.539230]  panfrost_job_push+0x1bc/0x200
>> [   12.539442]
>> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
>> [   12.540073] x5 : ffff00007fb539a0
>> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
>> [   12.540771]  x4 : 0000000000000000
>> [   12.541069]  drm_ioctl+0x214/0x450
>> [   12.541424]  x3 : 0000000000000001
>> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.541943]
>> [   12.542233]  invoke_syscall+0x40/0xf8
>> [   12.542573] x2 : ffff00007fb539a8
>> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.543167]  x1 : 0ac4fb7a0680bb00
>> [   12.543465]  do_el0_svc+0x20/0x80
>> [   12.543805]  x0 : 0000000000000000
>> [   12.543936]  el0_svc+0x1c/0x50
>> [   12.544255]
>> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.544955] Call trace:
>> [   12.545250]  el0t_64_sync+0x16c/0x170
>> [   12.545540]  refcount_warn_saturate+0x98/0x140
>> [   12.545837] ---[ end trace ba74542f51246288 ]---
>> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
>> [   12.546285] ------------[ cut here ]------------
>> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
>> [   12.546813] refcount_t: underflow; use-after-free.
>> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
>> [   12.547920]  dma_fence_signal_locked+0x20/0x30
>> [   12.548336] Modules linked in:
>> [   12.548737]  panfrost_job_handle_done+0x34/0x50
>> [   12.549110]
>> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
>> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.551373]  irq_thread_fn+0x28/0x98
>> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.551899]  irq_thread+0x12c/0x230
>> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
>> [   12.553131]  kthread+0x174/0x180
>> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
>> [   12.554121]  ret_from_fork+0x10/0x20
>> [   12.554432] sp : ffff8000123ebaa0
>> [   12.555038] ---[ end trace ba74542f51246289 ]---
>> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
>> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
>> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
>> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
>> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
>> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.563733] Call trace:
>> [   12.563950]  refcount_warn_saturate+0xec/0x140
>> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
>> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.565216]  dma_fence_release+0xd4/0xe8
>> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.565970]  panfrost_job_push+0x1bc/0x200
>> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
>> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
>> [   12.567072]  drm_ioctl+0x214/0x450
>> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.567721]  invoke_syscall+0x40/0xf8
>> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.568463]  do_el0_svc+0x20/0x80
>> [   12.568755]  el0_svc+0x1c/0x50
>> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.569399]  el0t_64_sync+0x16c/0x170
>> [   12.569724] ---[ end trace ba74542f5124628a ]---
>> [   12.595086] ------------[ cut here ]------------
>> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
>> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.596934] Modules linked in:
>> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.599202] pc : dma_fence_release+0xac/0xe8
>> [   12.599584] lr : dma_fence_release+0xac/0xe8
>> [   12.599960] sp : ffff8000123ebb20
>> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.606542] Call trace:
>> [   12.606760]  dma_fence_release+0xac/0xe8
>> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.607517]  panfrost_job_push+0x1bc/0x200
>> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
>> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
>> [   12.608622]  drm_ioctl+0x214/0x450
>> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.609269]  invoke_syscall+0x40/0xf8
>> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.610011]  do_el0_svc+0x20/0x80
>> [   12.610304]  el0_svc+0x1c/0x50
>> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.610946]  el0t_64_sync+0x16c/0x170
>> [   12.611276] ---[ end trace ba74542f5124628b ]---
>> [   12.612869] ------------[ cut here ]------------
>> [   12.613288] refcount_t: saturated; leaking memory.
>> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
>> [   12.614476] Modules linked in:
>> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
>> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
>> [   12.617622] sp : ffff8000123eba60
>> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
>> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
>> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
>> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
>> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
>> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.624256] Call trace:
>> [   12.624474]  refcount_warn_saturate+0xc0/0x140
>> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
>> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
>> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
>> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
>> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
>> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
>> [   12.627397]  drm_ioctl+0x214/0x450
>> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.628046]  invoke_syscall+0x40/0xf8
>> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.628787]  do_el0_svc+0x20/0x80
>> [   12.629079]  el0_svc+0x1c/0x50
>> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.629723]  el0t_64_sync+0x16c/0x170
>> [   12.630048] ---[ end trace ba74542f5124628c ]---
>> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
>> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
>> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
>> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
>> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
>> [   13.296693] ------------[ cut here ]------------
>> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
>> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.298560] Modules linked in:
>> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.300830] pc : dma_fence_release+0xac/0xe8
>> [   13.301208] lr : dma_fence_release+0xac/0xe8
>> [   13.301585] sp : ffff8000123ebb20
>> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.308149] Call trace:
>> [   13.308367]  dma_fence_release+0xac/0xe8
>> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.309119]  panfrost_job_push+0x1bc/0x200
>> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
>> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
>> [   13.310221]  drm_ioctl+0x214/0x450
>> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.310868]  invoke_syscall+0x40/0xf8
>> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.311609]  do_el0_svc+0x20/0x80
>> [   13.311903]  el0_svc+0x1c/0x50
>> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.312545]  el0t_64_sync+0x16c/0x170
>> [   13.312869] ---[ end trace ba74542f5124628d ]---
>> [   13.340454] ------------[ cut here ]------------
>> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
>> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.342318] Modules linked in:
>> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.344584] pc : dma_fence_release+0xac/0xe8
>> [   13.344961] lr : dma_fence_release+0xac/0xe8
>> [   13.345338] sp : ffff8000123ebb20
>> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.351900] Call trace:
>> [   13.352116]  dma_fence_release+0xac/0xe8
>> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.352869]  panfrost_job_push+0x1bc/0x200
>> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
>> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
>> [   13.353971]  drm_ioctl+0x214/0x450
>> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.354616]  invoke_syscall+0x40/0xf8
>> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.355356]  do_el0_svc+0x20/0x80
>> [   13.355650]  el0_svc+0x1c/0x50
>> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.356293]  el0t_64_sync+0x16c/0x170
>> [   13.356618] ---[ end trace ba74542f5124628e ]---
>> [   13.379841] ------------[ cut here ]------------
>> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
>> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.381680] Modules linked in:
>> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.383937] pc : dma_fence_release+0xac/0xe8
>> [   13.384314] lr : dma_fence_release+0xac/0xe8
>> [   13.384690] sp : ffff8000123ebb20
>> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.391247] Call trace:
>> [   13.391464]  dma_fence_release+0xac/0xe8
>> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.392217]  panfrost_job_push+0x1bc/0x200
>> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
>> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
>> [   13.393319]  drm_ioctl+0x214/0x450
>> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.393967]  invoke_syscall+0x40/0xf8
>> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.394708]  do_el0_svc+0x20/0x80
>> [   13.395002]  el0_svc+0x1c/0x50
>> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.395643]  el0t_64_sync+0x16c/0x170
>> [   13.395968] ---[ end trace ba74542f5124628f ]---
>> [   13.398130] ------------[ cut here ]------------
>> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
>> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.400011] Modules linked in:
>> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.402269] pc : dma_fence_release+0xac/0xe8
>> [   13.402646] lr : dma_fence_release+0xac/0xe8
>> [   13.403024] sp : ffff8000123ebb20
>> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
>> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
>> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
>> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
>> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
>> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
>> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
>> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.409583] Call trace:
>> [   13.409800]  dma_fence_release+0xac/0xe8
>> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.410553]  panfrost_job_push+0x1bc/0x200
>> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
>> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
>> [   13.411656]  drm_ioctl+0x214/0x450
>> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.412303]  invoke_syscall+0x40/0xf8
>> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.413042]  do_el0_svc+0x20/0x80
>> [   13.413335]  el0_svc+0x1c/0x50
>> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.413976]  el0t_64_sync+0x16c/0x170
>> [   13.414298] ---[ end trace ba74542f51246290 ]---
>> [   13.430129] ------------[ cut here ]------------
>> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
>> [   13.430557] refcount_t: saturated; leaking memory.
>> [   13.431321] Mem abort info:
>> [   13.431324]   ESR = 0x96000044
>> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
>> [   13.431330]   SET = 0, FnV = 0
>> [   13.431333]   EA = 0, S1PTW = 0
>> [   13.431335]   FSC = 0x04: level 0 translation fault
>> [   13.431337] Data abort info:
>> [   13.431339]   ISV = 0, ISS = 0x00000044
>> [   13.431340]   CM = 0, WnR = 1
>> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
>> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
>> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
>> [   13.431359] Modules linked in:
>> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
>> [   13.432059] lr : dma_fence_signal+0x30/0x60
>> [   13.432327] Modules linked in:
>> [   13.432789] sp : ffff8000122b3b50
>> [   13.433057]
>> [   13.433331] x29: ffff8000122b3b50
>> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.434008]  x28: 0000000000000000
>> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.434601]  x27: ffff000003791880
>> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.435751]
>> [   13.435753] x26: ffff8000122b3d30
>> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
>> [   13.436504]  x25: 0000000000000001
>> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
>> [   13.437938]  x24: ffff000003791a10
>> [   13.438542] sp : ffff8000123ebb40
>> [   13.439042]
>> [   13.439767] x29: ffff8000123ebb40
>> [   13.440130] x23: 0000000000000000
>> [   13.440398]  x28: ffff8000123ebd58
>> [   13.440687]  x22: ffff000003505280
>> [   13.440819]  x27: 0000000000000000
>> [   13.441108]  x21: ffff8000122b3b88
>> [   13.441931]
>> [   13.442228]
>> [   13.442773] x26: 0000000000000001
>> [   13.443070] x20: ffff000004b051c0
>> [   13.443674]  x25: ffff000004b051c0
>> [   13.443806]  x19: ffff000004b051c0
>> [   13.444095]  x24: 0000000000000000
>> [   13.444513]  x18: 0000000000000000
>> [   13.444811]
>> [   13.445227]
>> [   13.445524] x23: 0000000000000000
>> [   13.445814] x17: 3837783028203032
>> [   13.445945]  x22: ffff000004b051c0
>> [   13.446236]  x16: 3139323835323120
>> [   13.446525]  x21: ffff000004d73100
>> [   13.446822]  x15: 00000205aa24947a
>> [   13.447120]
>> [   13.447417]
>> [   13.447715] x20: ffff000004b05400
>> [   13.447846] x14: 0000000000000326
>> [   13.447977]  x19: 00000000ffffffff
>> [   13.448266]  x13: 0000000000000000
>> [   13.448555]  x18: 0000000000000010
>> [   13.448851]  x12: 0000000000000000
>> [   13.449148]
>> [   13.449446]
>> [   13.449743] x17: 0000000000000000
>> [   13.449874] x11: 0000000000000001
>> [   13.450006]  x16: 0000000000000000
>> [   13.450296]  x10: ffff8000122b3d48
>> [   13.450585]  x15: 000060978994e822
>> [   13.450882]  x9 : 00000000000019e0
>> [   13.451179]
>> [   13.451477]
>> [   13.451774] x14: 00000000000000b6
>> [   13.451905] x8 : ffff8000122b3d78
>> [   13.452037]  x13: 00000000000000b6
>> [   13.452326]  x7 : 0000000000000000
>> [   13.452614]  x12: 0000000000000000
>> [   13.452912]  x6 : 000000001fcf847e
>> [   13.453209]
>> [   13.453506]
>> [   13.453803] x11: 0000000000000001
>> [   13.453934] x5 : 00ffffffffffffff
>> [   13.454066]  x10: 00000000000009a0
>> [   13.454356]  x4 : 0015ef3c03fd7c00
>> [   13.454643]  x9 : ffff8000123eb8c0
>> [   13.454941]  x3 : 0000000000000018
>> [   13.455238]
>> [   13.455536]
>> [   13.455833] x8 : ffff0000050e3340
>> [   13.455965] x2 : ffff000004b051f0
>> [   13.456096]  x7 : ffff00007fb92a80
>> [   13.456386]  x1 : 000000032053be4d
>> [   13.456676]  x6 : 0000000000000115
>> [   13.456973]  x0 : 0000000000000000
>> [   13.457271]
>> [   13.457568]
>> [   13.457866] x5 : 0000000000000000
>> [   13.457998] Call trace:
>> [   13.458128]  x4 : ffff00007fb8c9a0
>> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.458707]  x3 : ffff00007fb8f950
>> [   13.459005]  dma_fence_signal+0x30/0x60
>> [   13.459302]
>> [   13.459600]  drm_sched_fence_finished+0x10/0x18
>> [   13.459897] x2 : ffff00007fb8c9a0
>> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
>> [   13.460159]  x1 : ea6e0584a53f2200
>> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
>> [   13.460738]  x0 : 0000000000000000
>> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   13.461333]
>> [   13.461631]  dma_fence_signal_locked+0x20/0x30
>> [   13.461929] Call trace:
>> [   13.462060]  panfrost_job_handle_done+0x34/0x50
>> [   13.462192]  refcount_warn_saturate+0x6c/0x140
>> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
>> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
>> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   13.463462]  panfrost_job_push+0x1bc/0x200
>> [   13.463760]  irq_thread_fn+0x28/0x98
>> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
>> [   13.464225]  irq_thread+0x12c/0x230
>> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
>> [   13.464909]  kthread+0x174/0x180
>> [   13.465319]  drm_ioctl+0x214/0x450
>> [   13.465617]  ret_from_fork+0x10/0x20
>> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
>> [   13.466756]  invoke_syscall+0x40/0xf8
>> [   13.466891] ---[ end trace ba74542f51246291 ]---
>> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
>> [   13.467883]  do_el0_svc+0x20/0x80
>> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
>> [   13.468678]  el0_svc+0x1c/0x50
>> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.476277]  el0t_64_sync+0x16c/0x170
>> [   13.476601] ---[ end trace ba74542f51246292 ]---
>> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
>> [   14.462959] sched: RT throttling activated
>> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
>> [   34.476312]  (t=5250 jiffies g=505 q=301)
>> [   34.476667] Task dump for CPU 3:
>> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   34.477820] Call trace:
>> [   34.478035]  dump_backtrace+0x0/0x198
>> [   34.478365]  show_stack+0x14/0x60
>> [   34.478659]  sched_show_task+0x148/0x168
>> [   34.479008]  dump_cpu_task+0x40/0x4c
>> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   34.480072]  update_process_times+0x94/0xd8
>> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
>> [   34.480840]  tick_sched_timer+0x48/0x98
>> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
>> [   34.481562]  hrtimer_interrupt+0xe4/0x238
>> [   34.481917]  arch_timer_handler_phys+0x28/0x40
>> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
>> [   34.482710]  generic_handle_domain_irq+0x38/0x58
>> [   34.483116]  gic_handle_irq+0x4c/0x110
>> [   34.483450]  call_on_irq_stack+0x28/0x3c
>> [   34.483798]  do_interrupt_handler+0x78/0x80
>> [   34.484166]  el1_interrupt+0x34/0x80
>> [   34.484484]  el1h_64_irq_handler+0x14/0x20
>> [   34.484846]  el1h_64_irq+0x74/0x78
>> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   34.485568]  _raw_spin_lock+0x5c/0x68
>> [   34.485895]  panfrost_job_run+0x24c/0x3f8
>> [   34.486250]  drm_sched_main+0x130/0x390
>> [   34.486591]  kthread+0x174/0x180
>> [   34.486878]  ret_from_fork+0x10/0x20
>> [   35.810989] vcc3v3_lcd1_n: disabling
>> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
>> [   97.488326]  (t=21003 jiffies g=505 q=379)
>> [   97.488687] Task dump for CPU 3:
>> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   97.489842] Call trace:
>> [   97.490056]  dump_backtrace+0x0/0x198
>> [   97.490388]  show_stack+0x14/0x60
>> [   97.490682]  sched_show_task+0x148/0x168
>> [   97.491030]  dump_cpu_task+0x40/0x4c
>> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   97.492095]  update_process_times+0x94/0xd8
>> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
>> [   97.492862]  tick_sched_timer+0x48/0x98
>> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
>> [   97.493582]  hrtimer_interrupt+0xe4/0x238
>> [   97.493937]  arch_timer_handler_phys+0x28/0x40
>> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
>> [   97.494730]  generic_handle_domain_irq+0x38/0x58
>> [   97.495136]  gic_handle_irq+0x4c/0x110
>> [   97.495473]  call_on_irq_stack+0x28/0x3c
>> [   97.495818]  do_interrupt_handler+0x78/0x80
>> [   97.496186]  el1_interrupt+0x34/0x80
>> [   97.496503]  el1h_64_irq_handler+0x14/0x20
>> [   97.496865]  el1h_64_irq+0x74/0x78
>> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   97.497588]  _raw_spin_lock+0x5c/0x68
>> [   97.497912]  panfrost_job_run+0x24c/0x3f8
>> [   97.498268]  drm_sched_main+0x130/0x390
>> [   97.498607]  kthread+0x174/0x180
>> [   97.498895]  ret_from_fork+0x10/0x20
>> [  140.108141] random: crng init done
>> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
>> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
>> [  160.500322]  (t=36756 jiffies g=505 q=482)
>> [  160.500684] Task dump for CPU 3:
>> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  160.501837] Call trace:
>> [  160.502054]  dump_backtrace+0x0/0x198
>> [  160.502384]  show_stack+0x14/0x60
>> [  160.502679]  sched_show_task+0x148/0x168
>> [  160.503027]  dump_cpu_task+0x40/0x4c
>> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  160.504091]  update_process_times+0x94/0xd8
>> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
>> [  160.504858]  tick_sched_timer+0x48/0x98
>> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
>> [  160.505580]  hrtimer_interrupt+0xe4/0x238
>> [  160.505934]  arch_timer_handler_phys+0x28/0x40
>> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
>> [  160.506727]  generic_handle_domain_irq+0x38/0x58
>> [  160.507133]  gic_handle_irq+0x4c/0x110
>> [  160.507467]  call_on_irq_stack+0x28/0x3c
>> [  160.507813]  do_interrupt_handler+0x78/0x80
>> [  160.508181]  el1_interrupt+0x34/0x80
>> [  160.508497]  el1h_64_irq_handler+0x14/0x20
>> [  160.508858]  el1h_64_irq+0x74/0x78
>> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  160.509579]  _raw_spin_lock+0x5c/0x68
>> [  160.509903]  panfrost_job_run+0x24c/0x3f8
>> [  160.510259]  drm_sched_main+0x130/0x390
>> [  160.510599]  kthread+0x174/0x180
>> [  160.510886]  ret_from_fork+0x10/0x20
>> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
>> [  223.512325]  (t=52509 jiffies g=505 q=536)
>> [  223.512688] Task dump for CPU 3:
>> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  223.513842] Call trace:
>> [  223.514056]  dump_backtrace+0x0/0x198
>> [  223.514387]  show_stack+0x14/0x60
>> [  223.514681]  sched_show_task+0x148/0x168
>> [  223.515029]  dump_cpu_task+0x40/0x4c
>> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  223.516094]  update_process_times+0x94/0xd8
>> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
>> [  223.516860]  tick_sched_timer+0x48/0x98
>> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
>> [  223.517582]  hrtimer_interrupt+0xe4/0x238
>> [  223.517935]  arch_timer_handler_phys+0x28/0x40
>> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
>> [  223.518727]  generic_handle_domain_irq+0x38/0x58
>> [  223.519133]  gic_handle_irq+0x4c/0x110
>> [  223.519466]  call_on_irq_stack+0x28/0x3c
>> [  223.519812]  do_interrupt_handler+0x78/0x80
>> [  223.520181]  el1_interrupt+0x34/0x80
>> [  223.520498]  el1h_64_irq_handler+0x14/0x20
>> [  223.520860]  el1h_64_irq+0x74/0x78
>> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  223.521584]  _raw_spin_lock+0x5c/0x68
>> [  223.521908]  panfrost_job_run+0x24c/0x3f8
>> [  223.522264]  drm_sched_main+0x130/0x390
>> [  223.522605]  kthread+0x174/0x180
>> [  223.522892]  ret_from_fork+0x10/0x20
>> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
>> [  286.524325]  (t=68262 jiffies g=505 q=612)
>> [  286.524687] Task dump for CPU 3:
>> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  286.525840] Call trace:
>> [  286.526057]  dump_backtrace+0x0/0x198
>> [  286.526387]  show_stack+0x14/0x60
>> [  286.526681]  sched_show_task+0x148/0x168
>> [  286.527029]  dump_cpu_task+0x40/0x4c
>> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  286.528092]  update_process_times+0x94/0xd8
>> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
>> [  286.528859]  tick_sched_timer+0x48/0x98
>> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
>> [  286.529579]  hrtimer_interrupt+0xe4/0x238
>> [  286.529933]  arch_timer_handler_phys+0x28/0x40
>> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
>> [  286.530726]  generic_handle_domain_irq+0x38/0x58
>> [  286.531132]  gic_handle_irq+0x4c/0x110
>> [  286.531466]  call_on_irq_stack+0x28/0x3c
>> [  286.531812]  do_interrupt_handler+0x78/0x80
>> [  286.532180]  el1_interrupt+0x34/0x80
>> [  286.532496]  el1h_64_irq_handler+0x14/0x20
>> [  286.532857]  el1h_64_irq+0x74/0x78
>> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  286.533580]  _raw_spin_lock+0x5c/0x68
>> [  286.533904]  panfrost_job_run+0x24c/0x3f8
>> [  286.534259]  drm_sched_main+0x130/0x390
>> [  286.534600]  kthread+0x174/0x180
>> [  286.534887]  ret_from_fork+0x10/0x20
>> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
>> [  349.536324]  (t=84015 jiffies g=505 q=716)
>> [  349.536687] Task dump for CPU 3:
>> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  349.537839] Call trace:
>> [  349.538055]  dump_backtrace+0x0/0x198
>> [  349.538387]  show_stack+0x14/0x60
>> [  349.538681]  sched_show_task+0x148/0x168
>> [  349.539029]  dump_cpu_task+0x40/0x4c
>> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  349.540094]  update_process_times+0x94/0xd8
>> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
>> [  349.540862]  tick_sched_timer+0x48/0x98
>> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
>> [  349.541585]  hrtimer_interrupt+0xe4/0x238
>> [  349.541937]  arch_timer_handler_phys+0x28/0x40
>> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
>> [  349.542730]  generic_handle_domain_irq+0x38/0x58
>> [  349.543136]  gic_handle_irq+0x4c/0x110
>> [  349.543469]  call_on_irq_stack+0x28/0x3c
>> [  349.543815]  do_interrupt_handler+0x78/0x80
>> [  349.544183]  el1_interrupt+0x34/0x80
>> [  349.544500]  el1h_64_irq_handler+0x14/0x20
>> [  349.544862]  el1h_64_irq+0x74/0x78
>> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  349.545586]  _raw_spin_lock+0x5c/0x68
>> [  349.545910]  panfrost_job_run+0x24c/0x3f8
>> [  349.546265]  drm_sched_main+0x130/0x390
>> [  349.546604]  kthread+0x174/0x180
>> [  349.546891]  ret_from_fork+0x10/0x20
>> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
>> [  412.548325]  (t=99768 jiffies g=505 q=784)
>> [  412.548686] Task dump for CPU 3:
>> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  412.549841] Call trace:
>> [  412.550058]  dump_backtrace+0x0/0x198
>> [  412.550389]  show_stack+0x14/0x60
>> [  412.550684]  sched_show_task+0x148/0x168
>> [  412.551031]  dump_cpu_task+0x40/0x4c
>> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  412.552095]  update_process_times+0x94/0xd8
>> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
>> [  412.552863]  tick_sched_timer+0x48/0x98
>> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
>> [  412.553583]  hrtimer_interrupt+0xe4/0x238
>> [  412.553936]  arch_timer_handler_phys+0x28/0x40
>> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
>> [  412.554732]  generic_handle_domain_irq+0x38/0x58
>> [  412.555139]  gic_handle_irq+0x4c/0x110
>> [  412.555471]  call_on_irq_stack+0x28/0x3c
>> [  412.555817]  do_interrupt_handler+0x78/0x80
>> [  412.556186]  el1_interrupt+0x34/0x80
>> [  412.556502]  el1h_64_irq_handler+0x14/0x20
>> [  412.556864]  el1h_64_irq+0x74/0x78
>> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  412.557587]  _raw_spin_lock+0x5c/0x68
>> [  412.557912]  panfrost_job_run+0x24c/0x3f8
>> [  412.558267]  drm_sched_main+0x130/0x390
>> [  412.558607]  kthread+0x174/0x180
>> [  412.558894]  ret_from_fork+0x10/0x20
>> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
>> [  475.560329]  (t=115521 jiffies g=505 q=857)
>> [  475.560697] Task dump for CPU 3:
>> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  475.561850] Call trace:
>> [  475.562067]  dump_backtrace+0x0/0x198
>> [  475.562398]  show_stack+0x14/0x60
>> [  475.562693]  sched_show_task+0x148/0x168
>> [  475.563041]  dump_cpu_task+0x40/0x4c
>> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  475.564104]  update_process_times+0x94/0xd8
>> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
>> [  475.564871]  tick_sched_timer+0x48/0x98
>> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
>> [  475.565592]  hrtimer_interrupt+0xe4/0x238
>> [  475.565946]  arch_timer_handler_phys+0x28/0x40
>> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
>> [  475.566739]  generic_handle_domain_irq+0x38/0x58
>> [  475.567145]  gic_handle_irq+0x4c/0x110
>> [  475.567477]  call_on_irq_stack+0x28/0x3c
>> [  475.567822]  do_interrupt_handler+0x78/0x80
>> [  475.568190]  el1_interrupt+0x34/0x80
>> [  475.568507]  el1h_64_irq_handler+0x14/0x20
>> [  475.568869]  el1h_64_irq+0x74/0x78
>> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  475.569593]  _raw_spin_lock+0x5c/0x68
>> [  475.569915]  panfrost_job_run+0x24c/0x3f8
>> [  475.570270]  drm_sched_main+0x130/0x390
>> [  475.570610]  kthread+0x174/0x180
>> [  475.570897]  ret_from_fork+0x10/0x20
>> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
>> [  538.572333]  (t=131274 jiffies g=505 q=947)
>> [  538.572701] Task dump for CPU 3:
>> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  538.573854] Call trace:
>> [  538.574070]  dump_backtrace+0x0/0x198
>> [  538.574402]  show_stack+0x14/0x60
>> [  538.574696]  sched_show_task+0x148/0x168
>> [  538.575044]  dump_cpu_task+0x40/0x4c
>> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  538.576109]  update_process_times+0x94/0xd8
>> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
>> [  538.576878]  tick_sched_timer+0x48/0x98
>> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
>> [  538.577599]  hrtimer_interrupt+0xe4/0x238
>> [  538.577953]  arch_timer_handler_phys+0x28/0x40
>> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
>> [  538.578745]  generic_handle_domain_irq+0x38/0x58
>> [  538.579151]  gic_handle_irq+0x4c/0x110
>> [  538.579487]  call_on_irq_stack+0x28/0x3c
>> [  538.579833]  do_interrupt_handler+0x78/0x80
>> [  538.580201]  el1_interrupt+0x34/0x80
>> [  538.580518]  el1h_64_irq_handler+0x14/0x20
>> [  538.580880]  el1h_64_irq+0x74/0x78
>> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  538.581603]  _raw_spin_lock+0x5c/0x68
>> [  538.581927]  panfrost_job_run+0x24c/0x3f8
>> [  538.582283]  drm_sched_main+0x130/0x390
>> [  538.582623]  kthread+0x174/0x180
>> [  538.582910]  ret_from_fork+0x10/0x20
>> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
>> [  601.584330]  (t=147027 jiffies g=505 q=1018)
>> [  601.584706] Task dump for CPU 3:
>> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  601.585859] Call trace:
>> [  601.586075]  dump_backtrace+0x0/0x198
>> [  601.586406]  show_stack+0x14/0x60
>> [  601.586701]  sched_show_task+0x148/0x168
>> [  601.587048]  dump_cpu_task+0x40/0x4c
>> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  601.588112]  update_process_times+0x94/0xd8
>> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
>> [  601.588880]  tick_sched_timer+0x48/0x98
>> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
>> [  601.589602]  hrtimer_interrupt+0xe4/0x238
>> [  601.589956]  arch_timer_handler_phys+0x28/0x40
>> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
>> [  601.590747]  generic_handle_domain_irq+0x38/0x58
>> [  601.591153]  gic_handle_irq+0x4c/0x110
>> [  601.591486]  call_on_irq_stack+0x28/0x3c
>> [  601.591832]  do_interrupt_handler+0x78/0x80
>> [  601.592201]  el1_interrupt+0x34/0x80
>> [  601.592517]  el1h_64_irq_handler+0x14/0x20
>> [  601.592879]  el1h_64_irq+0x74/0x78
>> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  601.593603]  _raw_spin_lock+0x5c/0x68
>> [  601.593927]  panfrost_job_run+0x24c/0x3f8
>> [  601.594283]  drm_sched_main+0x130/0x390
>> [  601.594623]  kthread+0x174/0x180
>> [  601.594910]  ret_from_fork+0x10/0x20
>> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
>> [  664.596333]  (t=162780 jiffies g=505 q=1086)
>> [  664.596709] Task dump for CPU 3:
>> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  664.597862] Call trace:
>> [  664.598078]  dump_backtrace+0x0/0x198
>> [  664.598409]  show_stack+0x14/0x60
>> [  664.598704]  sched_show_task+0x148/0x168
>> [  664.599052]  dump_cpu_task+0x40/0x4c
>> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  664.600114]  update_process_times+0x94/0xd8
>> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
>> [  664.600882]  tick_sched_timer+0x48/0x98
>> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
>> [  664.601604]  hrtimer_interrupt+0xe4/0x238
>> [  664.601958]  arch_timer_handler_phys+0x28/0x40
>> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
>> [  664.602751]  generic_handle_domain_irq+0x38/0x58
>> [  664.603158]  gic_handle_irq+0x4c/0x110
>> [  664.603491]  call_on_irq_stack+0x28/0x3c
>> [  664.603838]  do_interrupt_handler+0x78/0x80
>> [  664.604206]  el1_interrupt+0x34/0x80
>> [  664.604522]  el1h_64_irq_handler+0x14/0x20
>> [  664.604883]  el1h_64_irq+0x74/0x78
>> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  664.605609]  _raw_spin_lock+0x5c/0x68
>> [  664.605934]  panfrost_job_run+0x24c/0x3f8
>> [  664.606290]  drm_sched_main+0x130/0x390
>> [  664.606631]  kthread+0x174/0x180
>> [  664.606918]  ret_from_fork+0x10/0x20
>> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
>> [  727.608331]  (t=178533 jiffies g=505 q=1152)
>> [  727.608706] Task dump for CPU 3:
>> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  727.609858] Call trace:
>> [  727.610074]  dump_backtrace+0x0/0x198
>> [  727.610403]  show_stack+0x14/0x60
>> [  727.610698]  sched_show_task+0x148/0x168
>> [  727.611047]  dump_cpu_task+0x40/0x4c
>> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  727.612112]  update_process_times+0x94/0xd8
>> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
>> [  727.612879]  tick_sched_timer+0x48/0x98
>> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
>> [  727.613601]  hrtimer_interrupt+0xe4/0x238
>> [  727.613955]  arch_timer_handler_phys+0x28/0x40
>> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
>> [  727.614748]  generic_handle_domain_irq+0x38/0x58
>> [  727.615154]  gic_handle_irq+0x4c/0x110
>> [  727.615485]  call_on_irq_stack+0x28/0x3c
>> [  727.615832]  do_interrupt_handler+0x78/0x80
>> [  727.616200]  el1_interrupt+0x34/0x80
>> [  727.616517]  el1h_64_irq_handler+0x14/0x20
>> [  727.616879]  el1h_64_irq+0x74/0x78
>> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  727.617602]  _raw_spin_lock+0x5c/0x68
>> [  727.617926]  panfrost_job_run+0x24c/0x3f8
>> [  727.618282]  drm_sched_main+0x130/0x390
>> [  727.618621]  kthread+0x174/0x180
>> [  727.618908]  ret_from_fork+0x10/0x20
>> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
>> [  790.620331]  (t=194286 jiffies g=505 q=1219)
>> [  790.620708] Task dump for CPU 3:
>> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  790.621860] Call trace:
>> [  790.622075]  dump_backtrace+0x0/0x198
>> [  790.622405]  show_stack+0x14/0x60
>> [  790.622699]  sched_show_task+0x148/0x168
>> [  790.623049]  dump_cpu_task+0x40/0x4c
>> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  790.624113]  update_process_times+0x94/0xd8
>> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
>> [  790.624880]  tick_sched_timer+0x48/0x98
>> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
>> [  790.625603]  hrtimer_interrupt+0xe4/0x238
>> [  790.625957]  arch_timer_handler_phys+0x28/0x40
>> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
>> [  790.626752]  generic_handle_domain_irq+0x38/0x58
>> [  790.627158]  gic_handle_irq+0x4c/0x110
>> [  790.627493]  call_on_irq_stack+0x28/0x3c
>> [  790.627839]  do_interrupt_handler+0x78/0x80
>> [  790.628208]  el1_interrupt+0x34/0x80
>> [  790.628526]  el1h_64_irq_handler+0x14/0x20
>> [  790.628888]  el1h_64_irq+0x74/0x78
>> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  790.629613]  _raw_spin_lock+0x5c/0x68
>> [  790.629937]  panfrost_job_run+0x24c/0x3f8
>> [  790.630292]  drm_sched_main+0x130/0x390
>> [  790.630632]  kthread+0x174/0x180
>> [  790.630919]  ret_from_fork+0x10/0x20
>> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
>> [  853.632340]  (t=210039 jiffies g=505 q=1318)
>> [  853.632716] Task dump for CPU 3:
>> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  853.633869] Call trace:
>> [  853.634084]  dump_backtrace+0x0/0x198
>> [  853.634418]  show_stack+0x14/0x60
>> [  853.634712]  sched_show_task+0x148/0x168
>> [  853.635061]  dump_cpu_task+0x40/0x4c
>> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  853.636124]  update_process_times+0x94/0xd8
>> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
>> [  853.636892]  tick_sched_timer+0x48/0x98
>> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
>> [  853.637613]  hrtimer_interrupt+0xe4/0x238
>> [  853.637965]  arch_timer_handler_phys+0x28/0x40
>> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
>> [  853.638760]  generic_handle_domain_irq+0x38/0x58
>> [  853.639166]  gic_handle_irq+0x4c/0x110
>> [  853.639499]  call_on_irq_stack+0x28/0x3c
>> [  853.639845]  do_interrupt_handler+0x78/0x80
>> [  853.640213]  el1_interrupt+0x34/0x80
>> [  853.640530]  el1h_64_irq_handler+0x14/0x20
>> [  853.640892]  el1h_64_irq+0x74/0x78
>> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  853.641616]  _raw_spin_lock+0x5c/0x68
>> [  853.641940]  panfrost_job_run+0x24c/0x3f8
>> [  853.642295]  drm_sched_main+0x130/0x390
>> [  853.642634]  kthread+0x174/0x180
>> [  853.642921]  ret_from_fork+0x10/0x20
>> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
>> [  916.644339]  (t=225792 jiffies g=505 q=1390)
>> [  916.644715] Task dump for CPU 3:
>> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  916.645868] Call trace:
>> [  916.646083]  dump_backtrace+0x0/0x198
>> [  916.646414]  show_stack+0x14/0x60
>> [  916.646708]  sched_show_task+0x148/0x168
>> [  916.647055]  dump_cpu_task+0x40/0x4c
>> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  916.648119]  update_process_times+0x94/0xd8
>> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
>> [  916.648887]  tick_sched_timer+0x48/0x98
>> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
>> [  916.649608]  hrtimer_interrupt+0xe4/0x238
>> [  916.649962]  arch_timer_handler_phys+0x28/0x40
>> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
>> [  916.650756]  generic_handle_domain_irq+0x38/0x58
>> [  916.651162]  gic_handle_irq+0x4c/0x110
>> [  916.651495]  call_on_irq_stack+0x28/0x3c
>> [  916.651842]  do_interrupt_handler+0x78/0x80
>> [  916.652210]  el1_interrupt+0x34/0x80
>> [  916.652527]  el1h_64_irq_handler+0x14/0x20
>> [  916.652889]  el1h_64_irq+0x74/0x78
>> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  916.653614]  _raw_spin_lock+0x5c/0x68
>> [  916.653937]  panfrost_job_run+0x24c/0x3f8
>> [  916.654293]  drm_sched_main+0x130/0x390
>> [  916.654632]  kthread+0x174/0x180
>> [  916.654920]  ret_from_fork+0x10/0x20
>>
>> -- 
>> Pengutronix e.K.                           |                             |
>> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
>> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
>> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 20:32         ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-11-15 20:32 UTC (permalink / raw)
  To: Daniel Vetter, Sascha Hauer
  Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media

Am 15.11.21 um 15:08 schrieb Daniel Vetter:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
>> Hi,
>>
>> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
>>> Simplifying the code a bit.
>>>
>>> v2: use dma_resv_for_each_fence
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>>>   1 file changed, 6 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index 042c16b5d54a..5bc5f775abe1 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>>>   					    struct drm_gem_object *obj,
>>>   					    bool write)
>>>   {
>>> +	struct dma_resv_iter cursor;
>>> +	struct dma_fence *fence;
>>>   	int ret;
>>> -	struct dma_fence **fences;
>>> -	unsigned int i, fence_count;
>>> -
>>> -	if (!write) {
>>> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
>>> -
>>> -		return drm_sched_job_add_dependency(job, fence);
>>> -	}
>>> -
>>> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
>>> -	if (ret || !fence_count)
>>> -		return ret;
>>>   
>>> -	for (i = 0; i < fence_count; i++) {
>>> -		ret = drm_sched_job_add_dependency(job, fences[i]);
>>> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
>>> +		ret = drm_sched_job_add_dependency(job, fence);
>>>   		if (ret)
>>> -			break;
>>> +			return ret;
>>>   	}
>>> -
>>> -	for (; i < fence_count; i++)
>>> -		dma_fence_put(fences[i]);
>>> -	kfree(fences);
>>> -	return ret;
>>> +	return 0;
>>>   }
>>>   EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>>>   
>> This patch lets the panfrost driver explode on v5.16-rc1 with the
>> following. I didn't bisect it, but it goes away when I revert this
>> patch. I only started weston, nothing more.
>>
>> Any idea what goes wrong here?
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.
>
> Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.

The problem is it doesn't apply to drm-misc-fixes. Looks like the branch 
wasn't updated.

What's going on here?

Christian.

> -Daniel
>
>> Sascha
>>
>> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
>> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.514056] Modules linked in:
>> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.514621] ------------[ cut here ]------------
>> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.515049] pc : dma_fence_release+0xac/0xe8
>> [   12.515056] lr : dma_fence_release+0xac/0xe8
>> [   12.515061] sp : ffff8000123ebb20
>> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
>> [   12.515518] refcount_t: addition on 0; use-after-free.
>> [   12.516015]  x27: 0000000000000000
>> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
>> [   12.516992] x26: 0000000000000001
>> [   12.517366] Modules linked in:
>> [   12.517654]  x25: ffff000004b051c0
>> [   12.518108]
>> [   12.518555]  x24: 0000000000000000
>> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.519576]
>> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.520133] x23: 0000000000000000
>> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.520559]  x22: ffff800010d41b78
>> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
>> [   12.521625]  x21: ffff000004b05050
>> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
>> [   12.522299]
>> [   12.522588] sp : ffff8000122b3bc0
>> [   12.523192] x20: ffff000004b05040
>> [   12.523489] x29: ffff8000122b3bc0
>> [   12.523906]  x19: ffff000004b05078
>> [   12.524203]  x28: 0000000000000000
>> [   12.524620]  x18: 0000000000000010
>> [   12.524751]  x27: ffff000003791880
>> [   12.525040]
>> [   12.525329]
>> [   12.525618] x17: 0000000000000000
>> [   12.525915] x26: ffff8000122b3d30
>> [   12.526212]  x16: 0000000000000000
>> [   12.526509]  x25: 0000000000000001
>> [   12.526806]  x15: ffff0000050e2dc0
>> [   12.526937]  x24: ffff000003791a10
>> [   12.527067]
>> [   12.527357]
>> [   12.527646] x14: 00000000000001b5
>> [   12.527942] x23: 0000000000000000
>> [   12.528240]  x13: ffff0000050e2dc0
>> [   12.528536]  x22: ffff000003505280
>> [   12.528833]  x12: 00000000ffffffea
>> [   12.528964]  x21: ffff000003a2a220
>> [   12.529095]
>> [   12.529384]
>> [   12.529673] x11: ffff800011761ec8
>> [   12.529970] x20: ffff000004b05078
>> [   12.530267]  x10: ffff8000115e1e88
>> [   12.530564]  x19: ffff000004b05000
>> [   12.530861]  x9 : ffff8000115e1ee0
>> [   12.530992]  x18: 0000000000000010
>> [   12.531123]
>> [   12.531412]
>> [   12.531701] x8 : 000000000017ffe8
>> [   12.531998] x17: 0000000000500600
>> [   12.532294]  x7 : c0000000fffeffff
>> [   12.532591]  x16: 0000000000000000
>> [   12.532888]  x6 : 0000000000000001
>> [   12.533019]  x15: ffff000003505700
>> [   12.533150]
>> [   12.533439]
>> [   12.533728] x5 : ffff00007fb8c9a0
>> [   12.534025] x14: 0000000000000000
>> [   12.534322]  x4 : 0000000000000000
>> [   12.534619]  x13: 292d2d3d45505954
>> [   12.534914]  x3 : 0000000000000001
>> [   12.535045]  x12: 4220534253532d20
>> [   12.535176]
>> [   12.535465]
>> [   12.535754] x2 : ffff00007fb8c9a8
>> [   12.536051] x11: 5449442d204f4354
>> [   12.536347]  x1 : ea6e0584a53f2200
>> [   12.536643]  x10: 2d204f41552d204e
>> [   12.536941]  x0 : 0000000000000000
>> [   12.537073]  x9 : 4e41502b20666961
>> [   12.537203]
>> [   12.537492]
>> [   12.537782] Call trace:
>> [   12.538078] x8 : 642076635a6e2820
>> [   12.538377]  dma_fence_release+0xac/0xe8
>> [   12.538671]  x7 : 205d343430353135
>> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.539098]  x6 : 352e32312020205b
>> [   12.539230]  panfrost_job_push+0x1bc/0x200
>> [   12.539442]
>> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
>> [   12.540073] x5 : ffff00007fb539a0
>> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
>> [   12.540771]  x4 : 0000000000000000
>> [   12.541069]  drm_ioctl+0x214/0x450
>> [   12.541424]  x3 : 0000000000000001
>> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.541943]
>> [   12.542233]  invoke_syscall+0x40/0xf8
>> [   12.542573] x2 : ffff00007fb539a8
>> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.543167]  x1 : 0ac4fb7a0680bb00
>> [   12.543465]  do_el0_svc+0x20/0x80
>> [   12.543805]  x0 : 0000000000000000
>> [   12.543936]  el0_svc+0x1c/0x50
>> [   12.544255]
>> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.544955] Call trace:
>> [   12.545250]  el0t_64_sync+0x16c/0x170
>> [   12.545540]  refcount_warn_saturate+0x98/0x140
>> [   12.545837] ---[ end trace ba74542f51246288 ]---
>> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
>> [   12.546285] ------------[ cut here ]------------
>> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
>> [   12.546813] refcount_t: underflow; use-after-free.
>> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
>> [   12.547920]  dma_fence_signal_locked+0x20/0x30
>> [   12.548336] Modules linked in:
>> [   12.548737]  panfrost_job_handle_done+0x34/0x50
>> [   12.549110]
>> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
>> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.551373]  irq_thread_fn+0x28/0x98
>> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.551899]  irq_thread+0x12c/0x230
>> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
>> [   12.553131]  kthread+0x174/0x180
>> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
>> [   12.554121]  ret_from_fork+0x10/0x20
>> [   12.554432] sp : ffff8000123ebaa0
>> [   12.555038] ---[ end trace ba74542f51246289 ]---
>> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
>> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
>> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
>> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
>> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
>> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.563733] Call trace:
>> [   12.563950]  refcount_warn_saturate+0xec/0x140
>> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
>> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.565216]  dma_fence_release+0xd4/0xe8
>> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.565970]  panfrost_job_push+0x1bc/0x200
>> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
>> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
>> [   12.567072]  drm_ioctl+0x214/0x450
>> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.567721]  invoke_syscall+0x40/0xf8
>> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.568463]  do_el0_svc+0x20/0x80
>> [   12.568755]  el0_svc+0x1c/0x50
>> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.569399]  el0t_64_sync+0x16c/0x170
>> [   12.569724] ---[ end trace ba74542f5124628a ]---
>> [   12.595086] ------------[ cut here ]------------
>> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
>> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.596934] Modules linked in:
>> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.599202] pc : dma_fence_release+0xac/0xe8
>> [   12.599584] lr : dma_fence_release+0xac/0xe8
>> [   12.599960] sp : ffff8000123ebb20
>> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.606542] Call trace:
>> [   12.606760]  dma_fence_release+0xac/0xe8
>> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.607517]  panfrost_job_push+0x1bc/0x200
>> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
>> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
>> [   12.608622]  drm_ioctl+0x214/0x450
>> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.609269]  invoke_syscall+0x40/0xf8
>> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.610011]  do_el0_svc+0x20/0x80
>> [   12.610304]  el0_svc+0x1c/0x50
>> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.610946]  el0t_64_sync+0x16c/0x170
>> [   12.611276] ---[ end trace ba74542f5124628b ]---
>> [   12.612869] ------------[ cut here ]------------
>> [   12.613288] refcount_t: saturated; leaking memory.
>> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
>> [   12.614476] Modules linked in:
>> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
>> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
>> [   12.617622] sp : ffff8000123eba60
>> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
>> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
>> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
>> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
>> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
>> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.624256] Call trace:
>> [   12.624474]  refcount_warn_saturate+0xc0/0x140
>> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
>> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
>> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
>> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
>> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
>> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
>> [   12.627397]  drm_ioctl+0x214/0x450
>> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.628046]  invoke_syscall+0x40/0xf8
>> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.628787]  do_el0_svc+0x20/0x80
>> [   12.629079]  el0_svc+0x1c/0x50
>> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.629723]  el0t_64_sync+0x16c/0x170
>> [   12.630048] ---[ end trace ba74542f5124628c ]---
>> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
>> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
>> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
>> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
>> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
>> [   13.296693] ------------[ cut here ]------------
>> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
>> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.298560] Modules linked in:
>> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.300830] pc : dma_fence_release+0xac/0xe8
>> [   13.301208] lr : dma_fence_release+0xac/0xe8
>> [   13.301585] sp : ffff8000123ebb20
>> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.308149] Call trace:
>> [   13.308367]  dma_fence_release+0xac/0xe8
>> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.309119]  panfrost_job_push+0x1bc/0x200
>> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
>> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
>> [   13.310221]  drm_ioctl+0x214/0x450
>> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.310868]  invoke_syscall+0x40/0xf8
>> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.311609]  do_el0_svc+0x20/0x80
>> [   13.311903]  el0_svc+0x1c/0x50
>> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.312545]  el0t_64_sync+0x16c/0x170
>> [   13.312869] ---[ end trace ba74542f5124628d ]---
>> [   13.340454] ------------[ cut here ]------------
>> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
>> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.342318] Modules linked in:
>> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.344584] pc : dma_fence_release+0xac/0xe8
>> [   13.344961] lr : dma_fence_release+0xac/0xe8
>> [   13.345338] sp : ffff8000123ebb20
>> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.351900] Call trace:
>> [   13.352116]  dma_fence_release+0xac/0xe8
>> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.352869]  panfrost_job_push+0x1bc/0x200
>> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
>> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
>> [   13.353971]  drm_ioctl+0x214/0x450
>> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.354616]  invoke_syscall+0x40/0xf8
>> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.355356]  do_el0_svc+0x20/0x80
>> [   13.355650]  el0_svc+0x1c/0x50
>> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.356293]  el0t_64_sync+0x16c/0x170
>> [   13.356618] ---[ end trace ba74542f5124628e ]---
>> [   13.379841] ------------[ cut here ]------------
>> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
>> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.381680] Modules linked in:
>> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.383937] pc : dma_fence_release+0xac/0xe8
>> [   13.384314] lr : dma_fence_release+0xac/0xe8
>> [   13.384690] sp : ffff8000123ebb20
>> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.391247] Call trace:
>> [   13.391464]  dma_fence_release+0xac/0xe8
>> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.392217]  panfrost_job_push+0x1bc/0x200
>> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
>> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
>> [   13.393319]  drm_ioctl+0x214/0x450
>> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.393967]  invoke_syscall+0x40/0xf8
>> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.394708]  do_el0_svc+0x20/0x80
>> [   13.395002]  el0_svc+0x1c/0x50
>> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.395643]  el0t_64_sync+0x16c/0x170
>> [   13.395968] ---[ end trace ba74542f5124628f ]---
>> [   13.398130] ------------[ cut here ]------------
>> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
>> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.400011] Modules linked in:
>> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.402269] pc : dma_fence_release+0xac/0xe8
>> [   13.402646] lr : dma_fence_release+0xac/0xe8
>> [   13.403024] sp : ffff8000123ebb20
>> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
>> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
>> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
>> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
>> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
>> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
>> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
>> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.409583] Call trace:
>> [   13.409800]  dma_fence_release+0xac/0xe8
>> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.410553]  panfrost_job_push+0x1bc/0x200
>> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
>> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
>> [   13.411656]  drm_ioctl+0x214/0x450
>> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.412303]  invoke_syscall+0x40/0xf8
>> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.413042]  do_el0_svc+0x20/0x80
>> [   13.413335]  el0_svc+0x1c/0x50
>> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.413976]  el0t_64_sync+0x16c/0x170
>> [   13.414298] ---[ end trace ba74542f51246290 ]---
>> [   13.430129] ------------[ cut here ]------------
>> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
>> [   13.430557] refcount_t: saturated; leaking memory.
>> [   13.431321] Mem abort info:
>> [   13.431324]   ESR = 0x96000044
>> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
>> [   13.431330]   SET = 0, FnV = 0
>> [   13.431333]   EA = 0, S1PTW = 0
>> [   13.431335]   FSC = 0x04: level 0 translation fault
>> [   13.431337] Data abort info:
>> [   13.431339]   ISV = 0, ISS = 0x00000044
>> [   13.431340]   CM = 0, WnR = 1
>> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
>> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
>> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
>> [   13.431359] Modules linked in:
>> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
>> [   13.432059] lr : dma_fence_signal+0x30/0x60
>> [   13.432327] Modules linked in:
>> [   13.432789] sp : ffff8000122b3b50
>> [   13.433057]
>> [   13.433331] x29: ffff8000122b3b50
>> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.434008]  x28: 0000000000000000
>> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.434601]  x27: ffff000003791880
>> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.435751]
>> [   13.435753] x26: ffff8000122b3d30
>> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
>> [   13.436504]  x25: 0000000000000001
>> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
>> [   13.437938]  x24: ffff000003791a10
>> [   13.438542] sp : ffff8000123ebb40
>> [   13.439042]
>> [   13.439767] x29: ffff8000123ebb40
>> [   13.440130] x23: 0000000000000000
>> [   13.440398]  x28: ffff8000123ebd58
>> [   13.440687]  x22: ffff000003505280
>> [   13.440819]  x27: 0000000000000000
>> [   13.441108]  x21: ffff8000122b3b88
>> [   13.441931]
>> [   13.442228]
>> [   13.442773] x26: 0000000000000001
>> [   13.443070] x20: ffff000004b051c0
>> [   13.443674]  x25: ffff000004b051c0
>> [   13.443806]  x19: ffff000004b051c0
>> [   13.444095]  x24: 0000000000000000
>> [   13.444513]  x18: 0000000000000000
>> [   13.444811]
>> [   13.445227]
>> [   13.445524] x23: 0000000000000000
>> [   13.445814] x17: 3837783028203032
>> [   13.445945]  x22: ffff000004b051c0
>> [   13.446236]  x16: 3139323835323120
>> [   13.446525]  x21: ffff000004d73100
>> [   13.446822]  x15: 00000205aa24947a
>> [   13.447120]
>> [   13.447417]
>> [   13.447715] x20: ffff000004b05400
>> [   13.447846] x14: 0000000000000326
>> [   13.447977]  x19: 00000000ffffffff
>> [   13.448266]  x13: 0000000000000000
>> [   13.448555]  x18: 0000000000000010
>> [   13.448851]  x12: 0000000000000000
>> [   13.449148]
>> [   13.449446]
>> [   13.449743] x17: 0000000000000000
>> [   13.449874] x11: 0000000000000001
>> [   13.450006]  x16: 0000000000000000
>> [   13.450296]  x10: ffff8000122b3d48
>> [   13.450585]  x15: 000060978994e822
>> [   13.450882]  x9 : 00000000000019e0
>> [   13.451179]
>> [   13.451477]
>> [   13.451774] x14: 00000000000000b6
>> [   13.451905] x8 : ffff8000122b3d78
>> [   13.452037]  x13: 00000000000000b6
>> [   13.452326]  x7 : 0000000000000000
>> [   13.452614]  x12: 0000000000000000
>> [   13.452912]  x6 : 000000001fcf847e
>> [   13.453209]
>> [   13.453506]
>> [   13.453803] x11: 0000000000000001
>> [   13.453934] x5 : 00ffffffffffffff
>> [   13.454066]  x10: 00000000000009a0
>> [   13.454356]  x4 : 0015ef3c03fd7c00
>> [   13.454643]  x9 : ffff8000123eb8c0
>> [   13.454941]  x3 : 0000000000000018
>> [   13.455238]
>> [   13.455536]
>> [   13.455833] x8 : ffff0000050e3340
>> [   13.455965] x2 : ffff000004b051f0
>> [   13.456096]  x7 : ffff00007fb92a80
>> [   13.456386]  x1 : 000000032053be4d
>> [   13.456676]  x6 : 0000000000000115
>> [   13.456973]  x0 : 0000000000000000
>> [   13.457271]
>> [   13.457568]
>> [   13.457866] x5 : 0000000000000000
>> [   13.457998] Call trace:
>> [   13.458128]  x4 : ffff00007fb8c9a0
>> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.458707]  x3 : ffff00007fb8f950
>> [   13.459005]  dma_fence_signal+0x30/0x60
>> [   13.459302]
>> [   13.459600]  drm_sched_fence_finished+0x10/0x18
>> [   13.459897] x2 : ffff00007fb8c9a0
>> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
>> [   13.460159]  x1 : ea6e0584a53f2200
>> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
>> [   13.460738]  x0 : 0000000000000000
>> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   13.461333]
>> [   13.461631]  dma_fence_signal_locked+0x20/0x30
>> [   13.461929] Call trace:
>> [   13.462060]  panfrost_job_handle_done+0x34/0x50
>> [   13.462192]  refcount_warn_saturate+0x6c/0x140
>> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
>> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
>> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   13.463462]  panfrost_job_push+0x1bc/0x200
>> [   13.463760]  irq_thread_fn+0x28/0x98
>> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
>> [   13.464225]  irq_thread+0x12c/0x230
>> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
>> [   13.464909]  kthread+0x174/0x180
>> [   13.465319]  drm_ioctl+0x214/0x450
>> [   13.465617]  ret_from_fork+0x10/0x20
>> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
>> [   13.466756]  invoke_syscall+0x40/0xf8
>> [   13.466891] ---[ end trace ba74542f51246291 ]---
>> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
>> [   13.467883]  do_el0_svc+0x20/0x80
>> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
>> [   13.468678]  el0_svc+0x1c/0x50
>> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.476277]  el0t_64_sync+0x16c/0x170
>> [   13.476601] ---[ end trace ba74542f51246292 ]---
>> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
>> [   14.462959] sched: RT throttling activated
>> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
>> [   34.476312]  (t=5250 jiffies g=505 q=301)
>> [   34.476667] Task dump for CPU 3:
>> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   34.477820] Call trace:
>> [   34.478035]  dump_backtrace+0x0/0x198
>> [   34.478365]  show_stack+0x14/0x60
>> [   34.478659]  sched_show_task+0x148/0x168
>> [   34.479008]  dump_cpu_task+0x40/0x4c
>> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   34.480072]  update_process_times+0x94/0xd8
>> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
>> [   34.480840]  tick_sched_timer+0x48/0x98
>> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
>> [   34.481562]  hrtimer_interrupt+0xe4/0x238
>> [   34.481917]  arch_timer_handler_phys+0x28/0x40
>> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
>> [   34.482710]  generic_handle_domain_irq+0x38/0x58
>> [   34.483116]  gic_handle_irq+0x4c/0x110
>> [   34.483450]  call_on_irq_stack+0x28/0x3c
>> [   34.483798]  do_interrupt_handler+0x78/0x80
>> [   34.484166]  el1_interrupt+0x34/0x80
>> [   34.484484]  el1h_64_irq_handler+0x14/0x20
>> [   34.484846]  el1h_64_irq+0x74/0x78
>> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   34.485568]  _raw_spin_lock+0x5c/0x68
>> [   34.485895]  panfrost_job_run+0x24c/0x3f8
>> [   34.486250]  drm_sched_main+0x130/0x390
>> [   34.486591]  kthread+0x174/0x180
>> [   34.486878]  ret_from_fork+0x10/0x20
>> [   35.810989] vcc3v3_lcd1_n: disabling
>> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
>> [   97.488326]  (t=21003 jiffies g=505 q=379)
>> [   97.488687] Task dump for CPU 3:
>> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   97.489842] Call trace:
>> [   97.490056]  dump_backtrace+0x0/0x198
>> [   97.490388]  show_stack+0x14/0x60
>> [   97.490682]  sched_show_task+0x148/0x168
>> [   97.491030]  dump_cpu_task+0x40/0x4c
>> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   97.492095]  update_process_times+0x94/0xd8
>> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
>> [   97.492862]  tick_sched_timer+0x48/0x98
>> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
>> [   97.493582]  hrtimer_interrupt+0xe4/0x238
>> [   97.493937]  arch_timer_handler_phys+0x28/0x40
>> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
>> [   97.494730]  generic_handle_domain_irq+0x38/0x58
>> [   97.495136]  gic_handle_irq+0x4c/0x110
>> [   97.495473]  call_on_irq_stack+0x28/0x3c
>> [   97.495818]  do_interrupt_handler+0x78/0x80
>> [   97.496186]  el1_interrupt+0x34/0x80
>> [   97.496503]  el1h_64_irq_handler+0x14/0x20
>> [   97.496865]  el1h_64_irq+0x74/0x78
>> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   97.497588]  _raw_spin_lock+0x5c/0x68
>> [   97.497912]  panfrost_job_run+0x24c/0x3f8
>> [   97.498268]  drm_sched_main+0x130/0x390
>> [   97.498607]  kthread+0x174/0x180
>> [   97.498895]  ret_from_fork+0x10/0x20
>> [  140.108141] random: crng init done
>> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
>> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
>> [  160.500322]  (t=36756 jiffies g=505 q=482)
>> [  160.500684] Task dump for CPU 3:
>> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  160.501837] Call trace:
>> [  160.502054]  dump_backtrace+0x0/0x198
>> [  160.502384]  show_stack+0x14/0x60
>> [  160.502679]  sched_show_task+0x148/0x168
>> [  160.503027]  dump_cpu_task+0x40/0x4c
>> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  160.504091]  update_process_times+0x94/0xd8
>> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
>> [  160.504858]  tick_sched_timer+0x48/0x98
>> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
>> [  160.505580]  hrtimer_interrupt+0xe4/0x238
>> [  160.505934]  arch_timer_handler_phys+0x28/0x40
>> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
>> [  160.506727]  generic_handle_domain_irq+0x38/0x58
>> [  160.507133]  gic_handle_irq+0x4c/0x110
>> [  160.507467]  call_on_irq_stack+0x28/0x3c
>> [  160.507813]  do_interrupt_handler+0x78/0x80
>> [  160.508181]  el1_interrupt+0x34/0x80
>> [  160.508497]  el1h_64_irq_handler+0x14/0x20
>> [  160.508858]  el1h_64_irq+0x74/0x78
>> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  160.509579]  _raw_spin_lock+0x5c/0x68
>> [  160.509903]  panfrost_job_run+0x24c/0x3f8
>> [  160.510259]  drm_sched_main+0x130/0x390
>> [  160.510599]  kthread+0x174/0x180
>> [  160.510886]  ret_from_fork+0x10/0x20
>> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
>> [  223.512325]  (t=52509 jiffies g=505 q=536)
>> [  223.512688] Task dump for CPU 3:
>> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  223.513842] Call trace:
>> [  223.514056]  dump_backtrace+0x0/0x198
>> [  223.514387]  show_stack+0x14/0x60
>> [  223.514681]  sched_show_task+0x148/0x168
>> [  223.515029]  dump_cpu_task+0x40/0x4c
>> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  223.516094]  update_process_times+0x94/0xd8
>> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
>> [  223.516860]  tick_sched_timer+0x48/0x98
>> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
>> [  223.517582]  hrtimer_interrupt+0xe4/0x238
>> [  223.517935]  arch_timer_handler_phys+0x28/0x40
>> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
>> [  223.518727]  generic_handle_domain_irq+0x38/0x58
>> [  223.519133]  gic_handle_irq+0x4c/0x110
>> [  223.519466]  call_on_irq_stack+0x28/0x3c
>> [  223.519812]  do_interrupt_handler+0x78/0x80
>> [  223.520181]  el1_interrupt+0x34/0x80
>> [  223.520498]  el1h_64_irq_handler+0x14/0x20
>> [  223.520860]  el1h_64_irq+0x74/0x78
>> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  223.521584]  _raw_spin_lock+0x5c/0x68
>> [  223.521908]  panfrost_job_run+0x24c/0x3f8
>> [  223.522264]  drm_sched_main+0x130/0x390
>> [  223.522605]  kthread+0x174/0x180
>> [  223.522892]  ret_from_fork+0x10/0x20
>> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
>> [  286.524325]  (t=68262 jiffies g=505 q=612)
>> [  286.524687] Task dump for CPU 3:
>> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  286.525840] Call trace:
>> [  286.526057]  dump_backtrace+0x0/0x198
>> [  286.526387]  show_stack+0x14/0x60
>> [  286.526681]  sched_show_task+0x148/0x168
>> [  286.527029]  dump_cpu_task+0x40/0x4c
>> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  286.528092]  update_process_times+0x94/0xd8
>> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
>> [  286.528859]  tick_sched_timer+0x48/0x98
>> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
>> [  286.529579]  hrtimer_interrupt+0xe4/0x238
>> [  286.529933]  arch_timer_handler_phys+0x28/0x40
>> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
>> [  286.530726]  generic_handle_domain_irq+0x38/0x58
>> [  286.531132]  gic_handle_irq+0x4c/0x110
>> [  286.531466]  call_on_irq_stack+0x28/0x3c
>> [  286.531812]  do_interrupt_handler+0x78/0x80
>> [  286.532180]  el1_interrupt+0x34/0x80
>> [  286.532496]  el1h_64_irq_handler+0x14/0x20
>> [  286.532857]  el1h_64_irq+0x74/0x78
>> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  286.533580]  _raw_spin_lock+0x5c/0x68
>> [  286.533904]  panfrost_job_run+0x24c/0x3f8
>> [  286.534259]  drm_sched_main+0x130/0x390
>> [  286.534600]  kthread+0x174/0x180
>> [  286.534887]  ret_from_fork+0x10/0x20
>> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
>> [  349.536324]  (t=84015 jiffies g=505 q=716)
>> [  349.536687] Task dump for CPU 3:
>> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  349.537839] Call trace:
>> [  349.538055]  dump_backtrace+0x0/0x198
>> [  349.538387]  show_stack+0x14/0x60
>> [  349.538681]  sched_show_task+0x148/0x168
>> [  349.539029]  dump_cpu_task+0x40/0x4c
>> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  349.540094]  update_process_times+0x94/0xd8
>> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
>> [  349.540862]  tick_sched_timer+0x48/0x98
>> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
>> [  349.541585]  hrtimer_interrupt+0xe4/0x238
>> [  349.541937]  arch_timer_handler_phys+0x28/0x40
>> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
>> [  349.542730]  generic_handle_domain_irq+0x38/0x58
>> [  349.543136]  gic_handle_irq+0x4c/0x110
>> [  349.543469]  call_on_irq_stack+0x28/0x3c
>> [  349.543815]  do_interrupt_handler+0x78/0x80
>> [  349.544183]  el1_interrupt+0x34/0x80
>> [  349.544500]  el1h_64_irq_handler+0x14/0x20
>> [  349.544862]  el1h_64_irq+0x74/0x78
>> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  349.545586]  _raw_spin_lock+0x5c/0x68
>> [  349.545910]  panfrost_job_run+0x24c/0x3f8
>> [  349.546265]  drm_sched_main+0x130/0x390
>> [  349.546604]  kthread+0x174/0x180
>> [  349.546891]  ret_from_fork+0x10/0x20
>> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
>> [  412.548325]  (t=99768 jiffies g=505 q=784)
>> [  412.548686] Task dump for CPU 3:
>> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  412.549841] Call trace:
>> [  412.550058]  dump_backtrace+0x0/0x198
>> [  412.550389]  show_stack+0x14/0x60
>> [  412.550684]  sched_show_task+0x148/0x168
>> [  412.551031]  dump_cpu_task+0x40/0x4c
>> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  412.552095]  update_process_times+0x94/0xd8
>> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
>> [  412.552863]  tick_sched_timer+0x48/0x98
>> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
>> [  412.553583]  hrtimer_interrupt+0xe4/0x238
>> [  412.553936]  arch_timer_handler_phys+0x28/0x40
>> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
>> [  412.554732]  generic_handle_domain_irq+0x38/0x58
>> [  412.555139]  gic_handle_irq+0x4c/0x110
>> [  412.555471]  call_on_irq_stack+0x28/0x3c
>> [  412.555817]  do_interrupt_handler+0x78/0x80
>> [  412.556186]  el1_interrupt+0x34/0x80
>> [  412.556502]  el1h_64_irq_handler+0x14/0x20
>> [  412.556864]  el1h_64_irq+0x74/0x78
>> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  412.557587]  _raw_spin_lock+0x5c/0x68
>> [  412.557912]  panfrost_job_run+0x24c/0x3f8
>> [  412.558267]  drm_sched_main+0x130/0x390
>> [  412.558607]  kthread+0x174/0x180
>> [  412.558894]  ret_from_fork+0x10/0x20
>> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
>> [  475.560329]  (t=115521 jiffies g=505 q=857)
>> [  475.560697] Task dump for CPU 3:
>> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  475.561850] Call trace:
>> [  475.562067]  dump_backtrace+0x0/0x198
>> [  475.562398]  show_stack+0x14/0x60
>> [  475.562693]  sched_show_task+0x148/0x168
>> [  475.563041]  dump_cpu_task+0x40/0x4c
>> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  475.564104]  update_process_times+0x94/0xd8
>> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
>> [  475.564871]  tick_sched_timer+0x48/0x98
>> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
>> [  475.565592]  hrtimer_interrupt+0xe4/0x238
>> [  475.565946]  arch_timer_handler_phys+0x28/0x40
>> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
>> [  475.566739]  generic_handle_domain_irq+0x38/0x58
>> [  475.567145]  gic_handle_irq+0x4c/0x110
>> [  475.567477]  call_on_irq_stack+0x28/0x3c
>> [  475.567822]  do_interrupt_handler+0x78/0x80
>> [  475.568190]  el1_interrupt+0x34/0x80
>> [  475.568507]  el1h_64_irq_handler+0x14/0x20
>> [  475.568869]  el1h_64_irq+0x74/0x78
>> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  475.569593]  _raw_spin_lock+0x5c/0x68
>> [  475.569915]  panfrost_job_run+0x24c/0x3f8
>> [  475.570270]  drm_sched_main+0x130/0x390
>> [  475.570610]  kthread+0x174/0x180
>> [  475.570897]  ret_from_fork+0x10/0x20
>> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
>> [  538.572333]  (t=131274 jiffies g=505 q=947)
>> [  538.572701] Task dump for CPU 3:
>> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  538.573854] Call trace:
>> [  538.574070]  dump_backtrace+0x0/0x198
>> [  538.574402]  show_stack+0x14/0x60
>> [  538.574696]  sched_show_task+0x148/0x168
>> [  538.575044]  dump_cpu_task+0x40/0x4c
>> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  538.576109]  update_process_times+0x94/0xd8
>> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
>> [  538.576878]  tick_sched_timer+0x48/0x98
>> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
>> [  538.577599]  hrtimer_interrupt+0xe4/0x238
>> [  538.577953]  arch_timer_handler_phys+0x28/0x40
>> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
>> [  538.578745]  generic_handle_domain_irq+0x38/0x58
>> [  538.579151]  gic_handle_irq+0x4c/0x110
>> [  538.579487]  call_on_irq_stack+0x28/0x3c
>> [  538.579833]  do_interrupt_handler+0x78/0x80
>> [  538.580201]  el1_interrupt+0x34/0x80
>> [  538.580518]  el1h_64_irq_handler+0x14/0x20
>> [  538.580880]  el1h_64_irq+0x74/0x78
>> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  538.581603]  _raw_spin_lock+0x5c/0x68
>> [  538.581927]  panfrost_job_run+0x24c/0x3f8
>> [  538.582283]  drm_sched_main+0x130/0x390
>> [  538.582623]  kthread+0x174/0x180
>> [  538.582910]  ret_from_fork+0x10/0x20
>> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
>> [  601.584330]  (t=147027 jiffies g=505 q=1018)
>> [  601.584706] Task dump for CPU 3:
>> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  601.585859] Call trace:
>> [  601.586075]  dump_backtrace+0x0/0x198
>> [  601.586406]  show_stack+0x14/0x60
>> [  601.586701]  sched_show_task+0x148/0x168
>> [  601.587048]  dump_cpu_task+0x40/0x4c
>> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  601.588112]  update_process_times+0x94/0xd8
>> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
>> [  601.588880]  tick_sched_timer+0x48/0x98
>> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
>> [  601.589602]  hrtimer_interrupt+0xe4/0x238
>> [  601.589956]  arch_timer_handler_phys+0x28/0x40
>> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
>> [  601.590747]  generic_handle_domain_irq+0x38/0x58
>> [  601.591153]  gic_handle_irq+0x4c/0x110
>> [  601.591486]  call_on_irq_stack+0x28/0x3c
>> [  601.591832]  do_interrupt_handler+0x78/0x80
>> [  601.592201]  el1_interrupt+0x34/0x80
>> [  601.592517]  el1h_64_irq_handler+0x14/0x20
>> [  601.592879]  el1h_64_irq+0x74/0x78
>> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  601.593603]  _raw_spin_lock+0x5c/0x68
>> [  601.593927]  panfrost_job_run+0x24c/0x3f8
>> [  601.594283]  drm_sched_main+0x130/0x390
>> [  601.594623]  kthread+0x174/0x180
>> [  601.594910]  ret_from_fork+0x10/0x20
>> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
>> [  664.596333]  (t=162780 jiffies g=505 q=1086)
>> [  664.596709] Task dump for CPU 3:
>> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  664.597862] Call trace:
>> [  664.598078]  dump_backtrace+0x0/0x198
>> [  664.598409]  show_stack+0x14/0x60
>> [  664.598704]  sched_show_task+0x148/0x168
>> [  664.599052]  dump_cpu_task+0x40/0x4c
>> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  664.600114]  update_process_times+0x94/0xd8
>> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
>> [  664.600882]  tick_sched_timer+0x48/0x98
>> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
>> [  664.601604]  hrtimer_interrupt+0xe4/0x238
>> [  664.601958]  arch_timer_handler_phys+0x28/0x40
>> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
>> [  664.602751]  generic_handle_domain_irq+0x38/0x58
>> [  664.603158]  gic_handle_irq+0x4c/0x110
>> [  664.603491]  call_on_irq_stack+0x28/0x3c
>> [  664.603838]  do_interrupt_handler+0x78/0x80
>> [  664.604206]  el1_interrupt+0x34/0x80
>> [  664.604522]  el1h_64_irq_handler+0x14/0x20
>> [  664.604883]  el1h_64_irq+0x74/0x78
>> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  664.605609]  _raw_spin_lock+0x5c/0x68
>> [  664.605934]  panfrost_job_run+0x24c/0x3f8
>> [  664.606290]  drm_sched_main+0x130/0x390
>> [  664.606631]  kthread+0x174/0x180
>> [  664.606918]  ret_from_fork+0x10/0x20
>> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
>> [  727.608331]  (t=178533 jiffies g=505 q=1152)
>> [  727.608706] Task dump for CPU 3:
>> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  727.609858] Call trace:
>> [  727.610074]  dump_backtrace+0x0/0x198
>> [  727.610403]  show_stack+0x14/0x60
>> [  727.610698]  sched_show_task+0x148/0x168
>> [  727.611047]  dump_cpu_task+0x40/0x4c
>> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  727.612112]  update_process_times+0x94/0xd8
>> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
>> [  727.612879]  tick_sched_timer+0x48/0x98
>> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
>> [  727.613601]  hrtimer_interrupt+0xe4/0x238
>> [  727.613955]  arch_timer_handler_phys+0x28/0x40
>> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
>> [  727.614748]  generic_handle_domain_irq+0x38/0x58
>> [  727.615154]  gic_handle_irq+0x4c/0x110
>> [  727.615485]  call_on_irq_stack+0x28/0x3c
>> [  727.615832]  do_interrupt_handler+0x78/0x80
>> [  727.616200]  el1_interrupt+0x34/0x80
>> [  727.616517]  el1h_64_irq_handler+0x14/0x20
>> [  727.616879]  el1h_64_irq+0x74/0x78
>> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  727.617602]  _raw_spin_lock+0x5c/0x68
>> [  727.617926]  panfrost_job_run+0x24c/0x3f8
>> [  727.618282]  drm_sched_main+0x130/0x390
>> [  727.618621]  kthread+0x174/0x180
>> [  727.618908]  ret_from_fork+0x10/0x20
>> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
>> [  790.620331]  (t=194286 jiffies g=505 q=1219)
>> [  790.620708] Task dump for CPU 3:
>> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  790.621860] Call trace:
>> [  790.622075]  dump_backtrace+0x0/0x198
>> [  790.622405]  show_stack+0x14/0x60
>> [  790.622699]  sched_show_task+0x148/0x168
>> [  790.623049]  dump_cpu_task+0x40/0x4c
>> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  790.624113]  update_process_times+0x94/0xd8
>> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
>> [  790.624880]  tick_sched_timer+0x48/0x98
>> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
>> [  790.625603]  hrtimer_interrupt+0xe4/0x238
>> [  790.625957]  arch_timer_handler_phys+0x28/0x40
>> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
>> [  790.626752]  generic_handle_domain_irq+0x38/0x58
>> [  790.627158]  gic_handle_irq+0x4c/0x110
>> [  790.627493]  call_on_irq_stack+0x28/0x3c
>> [  790.627839]  do_interrupt_handler+0x78/0x80
>> [  790.628208]  el1_interrupt+0x34/0x80
>> [  790.628526]  el1h_64_irq_handler+0x14/0x20
>> [  790.628888]  el1h_64_irq+0x74/0x78
>> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  790.629613]  _raw_spin_lock+0x5c/0x68
>> [  790.629937]  panfrost_job_run+0x24c/0x3f8
>> [  790.630292]  drm_sched_main+0x130/0x390
>> [  790.630632]  kthread+0x174/0x180
>> [  790.630919]  ret_from_fork+0x10/0x20
>> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
>> [  853.632340]  (t=210039 jiffies g=505 q=1318)
>> [  853.632716] Task dump for CPU 3:
>> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  853.633869] Call trace:
>> [  853.634084]  dump_backtrace+0x0/0x198
>> [  853.634418]  show_stack+0x14/0x60
>> [  853.634712]  sched_show_task+0x148/0x168
>> [  853.635061]  dump_cpu_task+0x40/0x4c
>> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  853.636124]  update_process_times+0x94/0xd8
>> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
>> [  853.636892]  tick_sched_timer+0x48/0x98
>> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
>> [  853.637613]  hrtimer_interrupt+0xe4/0x238
>> [  853.637965]  arch_timer_handler_phys+0x28/0x40
>> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
>> [  853.638760]  generic_handle_domain_irq+0x38/0x58
>> [  853.639166]  gic_handle_irq+0x4c/0x110
>> [  853.639499]  call_on_irq_stack+0x28/0x3c
>> [  853.639845]  do_interrupt_handler+0x78/0x80
>> [  853.640213]  el1_interrupt+0x34/0x80
>> [  853.640530]  el1h_64_irq_handler+0x14/0x20
>> [  853.640892]  el1h_64_irq+0x74/0x78
>> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  853.641616]  _raw_spin_lock+0x5c/0x68
>> [  853.641940]  panfrost_job_run+0x24c/0x3f8
>> [  853.642295]  drm_sched_main+0x130/0x390
>> [  853.642634]  kthread+0x174/0x180
>> [  853.642921]  ret_from_fork+0x10/0x20
>> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
>> [  916.644339]  (t=225792 jiffies g=505 q=1390)
>> [  916.644715] Task dump for CPU 3:
>> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  916.645868] Call trace:
>> [  916.646083]  dump_backtrace+0x0/0x198
>> [  916.646414]  show_stack+0x14/0x60
>> [  916.646708]  sched_show_task+0x148/0x168
>> [  916.647055]  dump_cpu_task+0x40/0x4c
>> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  916.648119]  update_process_times+0x94/0xd8
>> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
>> [  916.648887]  tick_sched_timer+0x48/0x98
>> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
>> [  916.649608]  hrtimer_interrupt+0xe4/0x238
>> [  916.649962]  arch_timer_handler_phys+0x28/0x40
>> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
>> [  916.650756]  generic_handle_domain_irq+0x38/0x58
>> [  916.651162]  gic_handle_irq+0x4c/0x110
>> [  916.651495]  call_on_irq_stack+0x28/0x3c
>> [  916.651842]  do_interrupt_handler+0x78/0x80
>> [  916.652210]  el1_interrupt+0x34/0x80
>> [  916.652527]  el1h_64_irq_handler+0x14/0x20
>> [  916.652889]  el1h_64_irq+0x74/0x78
>> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  916.653614]  _raw_spin_lock+0x5c/0x68
>> [  916.653937]  panfrost_job_run+0x24c/0x3f8
>> [  916.654293]  drm_sched_main+0x130/0x390
>> [  916.654632]  kthread+0x174/0x180
>> [  916.654920]  ret_from_fork+0x10/0x20
>>
>> -- 
>> Pengutronix e.K.                           |                             |
>> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
>> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
>> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-15 20:32         ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-11-15 20:32 UTC (permalink / raw)
  To: Daniel Vetter, Sascha Hauer
  Cc: linaro-mm-sig, dri-devel, linux-media, intel-gfx

Am 15.11.21 um 15:08 schrieb Daniel Vetter:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
>> Hi,
>>
>> On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
>>> Simplifying the code a bit.
>>>
>>> v2: use dma_resv_for_each_fence
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
>>>   1 file changed, 6 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index 042c16b5d54a..5bc5f775abe1 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
>>>   					    struct drm_gem_object *obj,
>>>   					    bool write)
>>>   {
>>> +	struct dma_resv_iter cursor;
>>> +	struct dma_fence *fence;
>>>   	int ret;
>>> -	struct dma_fence **fences;
>>> -	unsigned int i, fence_count;
>>> -
>>> -	if (!write) {
>>> -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
>>> -
>>> -		return drm_sched_job_add_dependency(job, fence);
>>> -	}
>>> -
>>> -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
>>> -	if (ret || !fence_count)
>>> -		return ret;
>>>   
>>> -	for (i = 0; i < fence_count; i++) {
>>> -		ret = drm_sched_job_add_dependency(job, fences[i]);
>>> +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
>>> +		ret = drm_sched_job_add_dependency(job, fence);
>>>   		if (ret)
>>> -			break;
>>> +			return ret;
>>>   	}
>>> -
>>> -	for (; i < fence_count; i++)
>>> -		dma_fence_put(fences[i]);
>>> -	kfree(fences);
>>> -	return ret;
>>> +	return 0;
>>>   }
>>>   EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
>>>   
>> This patch lets the panfrost driver explode on v5.16-rc1 with the
>> following. I didn't bisect it, but it goes away when I revert this
>> patch. I only started weston, nothing more.
>>
>> Any idea what goes wrong here?
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.
>
> Christian, this needs to go into drm-misc-fixes, pls cherry-pick it over.

The problem is it doesn't apply to drm-misc-fixes. Looks like the branch 
wasn't updated.

What's going on here?

Christian.

> -Daniel
>
>> Sascha
>>
>> [   12.512606] Fence drm_sched:pan_js:a:1 released with pending signals!
>> [   12.513225] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.514056] Modules linked in:
>> [   12.514334] CPU: 3 PID: 257 Comm: weston Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.514621] ------------[ cut here ]------------
>> [   12.515040] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.515044] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.515049] pc : dma_fence_release+0xac/0xe8
>> [   12.515056] lr : dma_fence_release+0xac/0xe8
>> [   12.515061] sp : ffff8000123ebb20
>> [   12.515064] x29: ffff8000123ebb20 x28: ffff8000123ebd58
>> [   12.515518] refcount_t: addition on 0; use-after-free.
>> [   12.516015]  x27: 0000000000000000
>> [   12.516668] WARNING: CPU: 0 PID: 145 at lib/refcount.c:25 refcount_warn_saturate+0x98/0x140
>> [   12.516992] x26: 0000000000000001
>> [   12.517366] Modules linked in:
>> [   12.517654]  x25: ffff000004b051c0
>> [   12.518108]
>> [   12.518555]  x24: 0000000000000000
>> [   12.518854] CPU: 0 PID: 145 Comm: irq/25-panfrost Not tainted 5.16.0-rc1-00043-g794870164a37 #443
>> [   12.519576]
>> [   12.519866] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.520133] x23: 0000000000000000
>> [   12.520430] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.520559]  x22: ffff800010d41b78
>> [   12.520856] pc : refcount_warn_saturate+0x98/0x140
>> [   12.521625]  x21: ffff000004b05050
>> [   12.521755] lr : refcount_warn_saturate+0x98/0x140
>> [   12.522299]
>> [   12.522588] sp : ffff8000122b3bc0
>> [   12.523192] x20: ffff000004b05040
>> [   12.523489] x29: ffff8000122b3bc0
>> [   12.523906]  x19: ffff000004b05078
>> [   12.524203]  x28: 0000000000000000
>> [   12.524620]  x18: 0000000000000010
>> [   12.524751]  x27: ffff000003791880
>> [   12.525040]
>> [   12.525329]
>> [   12.525618] x17: 0000000000000000
>> [   12.525915] x26: ffff8000122b3d30
>> [   12.526212]  x16: 0000000000000000
>> [   12.526509]  x25: 0000000000000001
>> [   12.526806]  x15: ffff0000050e2dc0
>> [   12.526937]  x24: ffff000003791a10
>> [   12.527067]
>> [   12.527357]
>> [   12.527646] x14: 00000000000001b5
>> [   12.527942] x23: 0000000000000000
>> [   12.528240]  x13: ffff0000050e2dc0
>> [   12.528536]  x22: ffff000003505280
>> [   12.528833]  x12: 00000000ffffffea
>> [   12.528964]  x21: ffff000003a2a220
>> [   12.529095]
>> [   12.529384]
>> [   12.529673] x11: ffff800011761ec8
>> [   12.529970] x20: ffff000004b05078
>> [   12.530267]  x10: ffff8000115e1e88
>> [   12.530564]  x19: ffff000004b05000
>> [   12.530861]  x9 : ffff8000115e1ee0
>> [   12.530992]  x18: 0000000000000010
>> [   12.531123]
>> [   12.531412]
>> [   12.531701] x8 : 000000000017ffe8
>> [   12.531998] x17: 0000000000500600
>> [   12.532294]  x7 : c0000000fffeffff
>> [   12.532591]  x16: 0000000000000000
>> [   12.532888]  x6 : 0000000000000001
>> [   12.533019]  x15: ffff000003505700
>> [   12.533150]
>> [   12.533439]
>> [   12.533728] x5 : ffff00007fb8c9a0
>> [   12.534025] x14: 0000000000000000
>> [   12.534322]  x4 : 0000000000000000
>> [   12.534619]  x13: 292d2d3d45505954
>> [   12.534914]  x3 : 0000000000000001
>> [   12.535045]  x12: 4220534253532d20
>> [   12.535176]
>> [   12.535465]
>> [   12.535754] x2 : ffff00007fb8c9a8
>> [   12.536051] x11: 5449442d204f4354
>> [   12.536347]  x1 : ea6e0584a53f2200
>> [   12.536643]  x10: 2d204f41552d204e
>> [   12.536941]  x0 : 0000000000000000
>> [   12.537073]  x9 : 4e41502b20666961
>> [   12.537203]
>> [   12.537492]
>> [   12.537782] Call trace:
>> [   12.538078] x8 : 642076635a6e2820
>> [   12.538377]  dma_fence_release+0xac/0xe8
>> [   12.538671]  x7 : 205d343430353135
>> [   12.538967]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.539098]  x6 : 352e32312020205b
>> [   12.539230]  panfrost_job_push+0x1bc/0x200
>> [   12.539442]
>> [   12.539732]  panfrost_ioctl_submit+0x358/0x438
>> [   12.540073] x5 : ffff00007fb539a0
>> [   12.540370]  drm_ioctl_kernel+0xb8/0x170
>> [   12.540771]  x4 : 0000000000000000
>> [   12.541069]  drm_ioctl+0x214/0x450
>> [   12.541424]  x3 : 0000000000000001
>> [   12.541556]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.541943]
>> [   12.542233]  invoke_syscall+0x40/0xf8
>> [   12.542573] x2 : ffff00007fb539a8
>> [   12.542871]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.543167]  x1 : 0ac4fb7a0680bb00
>> [   12.543465]  do_el0_svc+0x20/0x80
>> [   12.543805]  x0 : 0000000000000000
>> [   12.543936]  el0_svc+0x1c/0x50
>> [   12.544255]
>> [   12.544544]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.544955] Call trace:
>> [   12.545250]  el0t_64_sync+0x16c/0x170
>> [   12.545540]  refcount_warn_saturate+0x98/0x140
>> [   12.545837] ---[ end trace ba74542f51246288 ]---
>> [   12.546103]  drm_sched_job_done.isra.0+0x154/0x158
>> [   12.546285] ------------[ cut here ]------------
>> [   12.546598]  drm_sched_job_done_cb+0x10/0x18
>> [   12.546813] refcount_t: underflow; use-after-free.
>> [   12.547133]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.547533] WARNING: CPU: 3 PID: 257 at lib/refcount.c:28 refcount_warn_saturate+0xec/0x140
>> [   12.547920]  dma_fence_signal_locked+0x20/0x30
>> [   12.548336] Modules linked in:
>> [   12.548737]  panfrost_job_handle_done+0x34/0x50
>> [   12.549110]
>> [   12.549525]  panfrost_job_handle_irqs+0x358/0x570
>> [   12.549997] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.550719]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   12.551108] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.551373]  irq_thread_fn+0x28/0x98
>> [   12.551769] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.551899]  irq_thread+0x12c/0x230
>> [   12.552309] pc : refcount_warn_saturate+0xec/0x140
>> [   12.553131]  kthread+0x174/0x180
>> [   12.553578] lr : refcount_warn_saturate+0xec/0x140
>> [   12.554121]  ret_from_fork+0x10/0x20
>> [   12.554432] sp : ffff8000123ebaa0
>> [   12.555038] ---[ end trace ba74542f51246289 ]---
>> [   12.555340] x29: ffff8000123ebaa0 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.558083] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   12.558711] x23: 0000000000000000 x22: ffff0000050e2940 x21: ffff8000123ebb08
>> [   12.559337] x20: ffff000004b05040 x19: ffff000004d85468 x18: 0000000000000010
>> [   12.559965] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.560593] x14: 0000000000000000 x13: 30343178302f3839 x12: 78302b6574617275
>> [   12.561222] x11: 7461735f6e726177 x10: 5f746e756f636665 x9 : 3178302f38397830
>> [   12.561849] x8 : 2b65746172757461 x7 : 205d303435353435 x6 : 352e32312020205b
>> [   12.562477] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.563104] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.563733] Call trace:
>> [   12.563950]  refcount_warn_saturate+0xec/0x140
>> [   12.564344]  drm_sched_entity_wakeup+0x98/0xa0
>> [   12.564736]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   12.565216]  dma_fence_release+0xd4/0xe8
>> [   12.565564]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.565970]  panfrost_job_push+0x1bc/0x200
>> [   12.566333]  panfrost_ioctl_submit+0x358/0x438
>> [   12.566726]  drm_ioctl_kernel+0xb8/0x170
>> [   12.567072]  drm_ioctl+0x214/0x450
>> [   12.567373]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.567721]  invoke_syscall+0x40/0xf8
>> [   12.568047]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.568463]  do_el0_svc+0x20/0x80
>> [   12.568755]  el0_svc+0x1c/0x50
>> [   12.569030]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.569399]  el0t_64_sync+0x16c/0x170
>> [   12.569724] ---[ end trace ba74542f5124628a ]---
>> [   12.595086] ------------[ cut here ]------------
>> [   12.595530] Fence drm_sched:pan_js:a:2 released with pending signals!
>> [   12.596124] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   12.596934] Modules linked in:
>> [   12.597217] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.598045] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.598593] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.599202] pc : dma_fence_release+0xac/0xe8
>> [   12.599584] lr : dma_fence_release+0xac/0xe8
>> [   12.599960] sp : ffff8000123ebb20
>> [   12.600252] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   12.600878] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   12.601503] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   12.602138] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   12.602782] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   12.603409] x14: 000000000000025c x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.604035] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.604662] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.605288] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.605914] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.606542] Call trace:
>> [   12.606760]  dma_fence_release+0xac/0xe8
>> [   12.607111]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   12.607517]  panfrost_job_push+0x1bc/0x200
>> [   12.607882]  panfrost_ioctl_submit+0x358/0x438
>> [   12.608274]  drm_ioctl_kernel+0xb8/0x170
>> [   12.608622]  drm_ioctl+0x214/0x450
>> [   12.608921]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.609269]  invoke_syscall+0x40/0xf8
>> [   12.609597]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.610011]  do_el0_svc+0x20/0x80
>> [   12.610304]  el0_svc+0x1c/0x50
>> [   12.610577]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.610946]  el0t_64_sync+0x16c/0x170
>> [   12.611276] ---[ end trace ba74542f5124628b ]---
>> [   12.612869] ------------[ cut here ]------------
>> [   12.613288] refcount_t: saturated; leaking memory.
>> [   12.613730] WARNING: CPU: 3 PID: 257 at lib/refcount.c:19 refcount_warn_saturate+0xc0/0x140
>> [   12.614476] Modules linked in:
>> [   12.614753] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   12.615586] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   12.616154] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   12.616773] pc : refcount_warn_saturate+0xc0/0x140
>> [   12.617200] lr : refcount_warn_saturate+0xc0/0x140
>> [   12.617622] sp : ffff8000123eba60
>> [   12.617913] x29: ffff8000123eba60 x28: ffff8000123ebc00 x27: ffff000004cdbc00
>> [   12.618548] x26: 0000000000000002 x25: ffff000006f4c100 x24: 0000000000000000
>> [   12.619195] x23: ffff000004b051c0 x22: ffff000005b16100 x21: ffff000006487900
>> [   12.619840] x20: 0000000000000001 x19: ffff000004b051f8 x18: 0000000000000010
>> [   12.620486] x17: 00480000000007a0 x16: 0791078f07a00780 x15: ffff0000050e2dc0
>> [   12.621120] x14: 000000000000027f x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   12.621746] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   12.622372] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   12.623000] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   12.623626] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   12.624256] Call trace:
>> [   12.624474]  refcount_warn_saturate+0xc0/0x140
>> [   12.624867]  drm_gem_plane_helper_prepare_fb+0x118/0x140
>> [   12.625336]  drm_atomic_helper_prepare_planes+0x104/0x1a8
>> [   12.625811]  drm_atomic_helper_commit+0x80/0x360
>> [   12.626218]  drm_atomic_nonblocking_commit+0x48/0x58
>> [   12.626656]  drm_mode_atomic_ioctl+0x9ec/0xb88
>> [   12.627050]  drm_ioctl_kernel+0xb8/0x170
>> [   12.627397]  drm_ioctl+0x214/0x450
>> [   12.627698]  __arm64_sys_ioctl+0xa0/0xe0
>> [   12.628046]  invoke_syscall+0x40/0xf8
>> [   12.628372]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   12.628787]  do_el0_svc+0x20/0x80
>> [   12.629079]  el0_svc+0x1c/0x50
>> [   12.629354]  el0t_64_sync_handler+0xa8/0xb0
>> [   12.629723]  el0t_64_sync+0x16c/0x170
>> [   12.630048] ---[ end trace ba74542f5124628c ]---
>> [   12.683010] inno-video-combo-phy fe850000.video-phy: fin=24000000, rate=996000000, fout=996000000, prediv=1, fbdiv=83
>> [   12.684140] rockchip-drm display-subsystem: [drm] Update mode to 1920x1080p60, type: 11 for vp0, output 0x00000800  HDMI0
>> [   12.685576] rockchip-drm display-subsystem: [drm] Update mode to 1080x1920p60, type: 16 for vp1, output 0x00000020 MIPI0
>> [   12.910994] panel_simple_xfer_dsi_cmd_seq:-----------------> enter
>> [   13.103035] panel_simple_xfer_dsi_cmd_seq:<-----------------leaver
>> [   13.296693] ------------[ cut here ]------------
>> [   13.297140] Fence drm_sched:pan_js:a:3 released with pending signals!
>> [   13.297743] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.298560] Modules linked in:
>> [   13.298840] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.299670] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.300219] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.300830] pc : dma_fence_release+0xac/0xe8
>> [   13.301208] lr : dma_fence_release+0xac/0xe8
>> [   13.301585] sp : ffff8000123ebb20
>> [   13.301877] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.302507] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.303134] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.303761] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.304388] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.305014] x14: 00000000000002a9 x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.305641] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.306268] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.306894] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.307519] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.308149] Call trace:
>> [   13.308367]  dma_fence_release+0xac/0xe8
>> [   13.308713]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.309119]  panfrost_job_push+0x1bc/0x200
>> [   13.309483]  panfrost_ioctl_submit+0x358/0x438
>> [   13.309875]  drm_ioctl_kernel+0xb8/0x170
>> [   13.310221]  drm_ioctl+0x214/0x450
>> [   13.310521]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.310868]  invoke_syscall+0x40/0xf8
>> [   13.311195]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.311609]  do_el0_svc+0x20/0x80
>> [   13.311903]  el0_svc+0x1c/0x50
>> [   13.312177]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.312545]  el0t_64_sync+0x16c/0x170
>> [   13.312869] ---[ end trace ba74542f5124628d ]---
>> [   13.340454] ------------[ cut here ]------------
>> [   13.340897] Fence drm_sched:pan_js:a:4 released with pending signals!
>> [   13.341505] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.342318] Modules linked in:
>> [   13.342598] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.343426] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.343975] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.344584] pc : dma_fence_release+0xac/0xe8
>> [   13.344961] lr : dma_fence_release+0xac/0xe8
>> [   13.345338] sp : ffff8000123ebb20
>> [   13.345629] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.346257] x26: 0000000000000001 x25: ffff000004b05280 x24: 0000000000000000
>> [   13.346884] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.347511] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.348138] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.348764] x14: 00000000000002cc x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.349391] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.350019] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.350646] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.351272] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.351900] Call trace:
>> [   13.352116]  dma_fence_release+0xac/0xe8
>> [   13.352463]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.352869]  panfrost_job_push+0x1bc/0x200
>> [   13.353232]  panfrost_ioctl_submit+0x358/0x438
>> [   13.353624]  drm_ioctl_kernel+0xb8/0x170
>> [   13.353971]  drm_ioctl+0x214/0x450
>> [   13.354269]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.354616]  invoke_syscall+0x40/0xf8
>> [   13.354942]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.355356]  do_el0_svc+0x20/0x80
>> [   13.355650]  el0_svc+0x1c/0x50
>> [   13.355925]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.356293]  el0t_64_sync+0x16c/0x170
>> [   13.356618] ---[ end trace ba74542f5124628e ]---
>> [   13.379841] ------------[ cut here ]------------
>> [   13.380285] Fence drm_sched:pan_js:a:5 released with pending signals!
>> [   13.380877] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.381680] Modules linked in:
>> [   13.381953] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.382781] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.383328] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.383937] pc : dma_fence_release+0xac/0xe8
>> [   13.384314] lr : dma_fence_release+0xac/0xe8
>> [   13.384690] sp : ffff8000123ebb20
>> [   13.384980] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.385608] x26: 0000000000000001 x25: ffff000004b051c0 x24: 0000000000000000
>> [   13.386235] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05050
>> [   13.386860] x20: ffff000004b05040 x19: ffff000004b05078 x18: 0000000000000010
>> [   13.387487] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.388114] x14: 00000000000002ef x13: ffff0000050e2dc0 x12: 00000000ffffffea
>> [   13.388741] x11: ffff800011761ec8 x10: ffff8000115e1e88 x9 : ffff8000115e1ee0
>> [   13.389368] x8 : 000000000017ffe8 x7 : c0000000fffeffff x6 : 0000000000000001
>> [   13.389994] x5 : ffff00007fb8c9a0 x4 : 0000000000000000 x3 : 0000000000000001
>> [   13.390621] x2 : ffff00007fb8c9a8 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.391247] Call trace:
>> [   13.391464]  dma_fence_release+0xac/0xe8
>> [   13.391811]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.392217]  panfrost_job_push+0x1bc/0x200
>> [   13.392581]  panfrost_ioctl_submit+0x358/0x438
>> [   13.392972]  drm_ioctl_kernel+0xb8/0x170
>> [   13.393319]  drm_ioctl+0x214/0x450
>> [   13.393619]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.393967]  invoke_syscall+0x40/0xf8
>> [   13.394294]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.394708]  do_el0_svc+0x20/0x80
>> [   13.395002]  el0_svc+0x1c/0x50
>> [   13.395275]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.395643]  el0t_64_sync+0x16c/0x170
>> [   13.395968] ---[ end trace ba74542f5124628f ]---
>> [   13.398130] ------------[ cut here ]------------
>> [   13.398566] Fence drm_sched:pan_js:a:6 released with pending signals!
>> [   13.399206] WARNING: CPU: 3 PID: 257 at drivers/dma-buf/dma-fence.c:526 dma_fence_release+0xac/0xe8
>> [   13.400011] Modules linked in:
>> [   13.400286] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.401114] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.401660] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.402269] pc : dma_fence_release+0xac/0xe8
>> [   13.402646] lr : dma_fence_release+0xac/0xe8
>> [   13.403024] sp : ffff8000123ebb20
>> [   13.403316] x29: ffff8000123ebb20 x28: ffff8000123ebd58 x27: 0000000000000000
>> [   13.403943] x26: 0000000000000001 x25: ffff000004b05400 x24: 0000000000000000
>> [   13.404570] x23: 0000000000000000 x22: ffff800010d41b78 x21: ffff000004b05350
>> [   13.405197] x20: ffff000004b05340 x19: ffff000004b05378 x18: 0000000000000010
>> [   13.405825] x17: 0000000000000000 x16: 0000000000000000 x15: ffff0000050e2dc0
>> [   13.406451] x14: 0000000000000000 x13: 00000000000000f5 x12: 00000000000001d3
>> [   13.407076] x11: 000000000003f188 x10: 00000000000009a0 x9 : ffff8000123eb8a0
>> [   13.407703] x8 : ffff0000050e3340 x7 : ffff00007fb92a80 x6 : 0000000000000000
>> [   13.408329] x5 : 0000000000000000 x4 : ffff00007fb8c9a0 x3 : ffff00007fb8f950
>> [   13.408955] x2 : ffff00007fb8c9a0 x1 : ea6e0584a53f2200 x0 : 0000000000000000
>> [   13.409583] Call trace:
>> [   13.409800]  dma_fence_release+0xac/0xe8
>> [   13.410146]  dma_resv_add_excl_fence+0x1b8/0x1f0
>> [   13.410553]  panfrost_job_push+0x1bc/0x200
>> [   13.410917]  panfrost_ioctl_submit+0x358/0x438
>> [   13.411309]  drm_ioctl_kernel+0xb8/0x170
>> [   13.411656]  drm_ioctl+0x214/0x450
>> [   13.411956]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.412303]  invoke_syscall+0x40/0xf8
>> [   13.412628]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.413042]  do_el0_svc+0x20/0x80
>> [   13.413335]  el0_svc+0x1c/0x50
>> [   13.413607]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.413976]  el0t_64_sync+0x16c/0x170
>> [   13.414298] ---[ end trace ba74542f51246290 ]---
>> [   13.430129] ------------[ cut here ]------------
>> [   13.430226] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
>> [   13.430557] refcount_t: saturated; leaking memory.
>> [   13.431321] Mem abort info:
>> [   13.431324]   ESR = 0x96000044
>> [   13.431326]   EC = 0x25: DABT (current EL), IL = 32 bits
>> [   13.431330]   SET = 0, FnV = 0
>> [   13.431333]   EA = 0, S1PTW = 0
>> [   13.431335]   FSC = 0x04: level 0 translation fault
>> [   13.431337] Data abort info:
>> [   13.431339]   ISV = 0, ISS = 0x00000044
>> [   13.431340]   CM = 0, WnR = 1
>> [   13.431343] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000004978000
>> [   13.431346] [0000000000000008] pgd=0000000000000000, p4d=0000000000000000
>> [   13.431354] Internal error: Oops: 96000044 [#1] PREEMPT SMP
>> [   13.431359] Modules linked in:
>> [   13.431364] CPU: 0 PID: 145 Comm: irq/25-panfrost Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.431370] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.431374] pstate: 604000c9 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.431379] pc : dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.431854] WARNING: CPU: 3 PID: 257 at lib/refcount.c:22 refcount_warn_saturate+0x6c/0x140
>> [   13.432059] lr : dma_fence_signal+0x30/0x60
>> [   13.432327] Modules linked in:
>> [   13.432789] sp : ffff8000122b3b50
>> [   13.433057]
>> [   13.433331] x29: ffff8000122b3b50
>> [   13.433757] CPU: 3 PID: 257 Comm: weston Tainted: G        W         5.16.0-rc1-00043-g794870164a37 #443
>> [   13.434008]  x28: 0000000000000000
>> [   13.434342] Hardware name: Rockchip RK3568 EVB1 DDR4 V10 Board (DT)
>> [   13.434601]  x27: ffff000003791880
>> [   13.435163] pstate: 40400009 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>> [   13.435751]
>> [   13.435753] x26: ffff8000122b3d30
>> [   13.436237] pc : refcount_warn_saturate+0x6c/0x140
>> [   13.436504]  x25: 0000000000000001
>> [   13.437393] lr : refcount_warn_saturate+0x6c/0x140
>> [   13.437938]  x24: ffff000003791a10
>> [   13.438542] sp : ffff8000123ebb40
>> [   13.439042]
>> [   13.439767] x29: ffff8000123ebb40
>> [   13.440130] x23: 0000000000000000
>> [   13.440398]  x28: ffff8000123ebd58
>> [   13.440687]  x22: ffff000003505280
>> [   13.440819]  x27: 0000000000000000
>> [   13.441108]  x21: ffff8000122b3b88
>> [   13.441931]
>> [   13.442228]
>> [   13.442773] x26: 0000000000000001
>> [   13.443070] x20: ffff000004b051c0
>> [   13.443674]  x25: ffff000004b051c0
>> [   13.443806]  x19: ffff000004b051c0
>> [   13.444095]  x24: 0000000000000000
>> [   13.444513]  x18: 0000000000000000
>> [   13.444811]
>> [   13.445227]
>> [   13.445524] x23: 0000000000000000
>> [   13.445814] x17: 3837783028203032
>> [   13.445945]  x22: ffff000004b051c0
>> [   13.446236]  x16: 3139323835323120
>> [   13.446525]  x21: ffff000004d73100
>> [   13.446822]  x15: 00000205aa24947a
>> [   13.447120]
>> [   13.447417]
>> [   13.447715] x20: ffff000004b05400
>> [   13.447846] x14: 0000000000000326
>> [   13.447977]  x19: 00000000ffffffff
>> [   13.448266]  x13: 0000000000000000
>> [   13.448555]  x18: 0000000000000010
>> [   13.448851]  x12: 0000000000000000
>> [   13.449148]
>> [   13.449446]
>> [   13.449743] x17: 0000000000000000
>> [   13.449874] x11: 0000000000000001
>> [   13.450006]  x16: 0000000000000000
>> [   13.450296]  x10: ffff8000122b3d48
>> [   13.450585]  x15: 000060978994e822
>> [   13.450882]  x9 : 00000000000019e0
>> [   13.451179]
>> [   13.451477]
>> [   13.451774] x14: 00000000000000b6
>> [   13.451905] x8 : ffff8000122b3d78
>> [   13.452037]  x13: 00000000000000b6
>> [   13.452326]  x7 : 0000000000000000
>> [   13.452614]  x12: 0000000000000000
>> [   13.452912]  x6 : 000000001fcf847e
>> [   13.453209]
>> [   13.453506]
>> [   13.453803] x11: 0000000000000001
>> [   13.453934] x5 : 00ffffffffffffff
>> [   13.454066]  x10: 00000000000009a0
>> [   13.454356]  x4 : 0015ef3c03fd7c00
>> [   13.454643]  x9 : ffff8000123eb8c0
>> [   13.454941]  x3 : 0000000000000018
>> [   13.455238]
>> [   13.455536]
>> [   13.455833] x8 : ffff0000050e3340
>> [   13.455965] x2 : ffff000004b051f0
>> [   13.456096]  x7 : ffff00007fb92a80
>> [   13.456386]  x1 : 000000032053be4d
>> [   13.456676]  x6 : 0000000000000115
>> [   13.456973]  x0 : 0000000000000000
>> [   13.457271]
>> [   13.457568]
>> [   13.457866] x5 : 0000000000000000
>> [   13.457998] Call trace:
>> [   13.458128]  x4 : ffff00007fb8c9a0
>> [   13.458419]  dma_fence_signal_timestamp_locked+0x78/0x108
>> [   13.458707]  x3 : ffff00007fb8f950
>> [   13.459005]  dma_fence_signal+0x30/0x60
>> [   13.459302]
>> [   13.459600]  drm_sched_fence_finished+0x10/0x18
>> [   13.459897] x2 : ffff00007fb8c9a0
>> [   13.460029]  drm_sched_job_done.isra.0+0xac/0x158
>> [   13.460159]  x1 : ea6e0584a53f2200
>> [   13.460449]  drm_sched_job_done_cb+0x10/0x18
>> [   13.460738]  x0 : 0000000000000000
>> [   13.461036]  dma_fence_signal_timestamp_locked+0xcc/0x108
>> [   13.461333]
>> [   13.461631]  dma_fence_signal_locked+0x20/0x30
>> [   13.461929] Call trace:
>> [   13.462060]  panfrost_job_handle_done+0x34/0x50
>> [   13.462192]  refcount_warn_saturate+0x6c/0x140
>> [   13.462481]  panfrost_job_handle_irqs+0x358/0x570
>> [   13.462695]  dma_resv_add_excl_fence+0x1d4/0x1f0
>> [   13.462992]  panfrost_job_irq_handler_thread+0x18/0x40
>> [   13.463462]  panfrost_job_push+0x1bc/0x200
>> [   13.463760]  irq_thread_fn+0x28/0x98
>> [   13.464094]  panfrost_ioctl_submit+0x358/0x438
>> [   13.464225]  irq_thread+0x12c/0x230
>> [   13.464620]  drm_ioctl_kernel+0xb8/0x170
>> [   13.464909]  kthread+0x174/0x180
>> [   13.465319]  drm_ioctl+0x214/0x450
>> [   13.465617]  ret_from_fork+0x10/0x20
>> [   13.465988]  __arm64_sys_ioctl+0xa0/0xe0
>> [   13.466290] Code: 3707fe20 f9400a80 9100e3f5 f9001fe0 (f9000415)
>> [   13.466756]  invoke_syscall+0x40/0xf8
>> [   13.466891] ---[ end trace ba74542f51246291 ]---
>> [   13.467275]  el0_svc_common.constprop.0+0xc0/0xe0
>> [   13.467491] note: irq/25-panfrost[145] exited with preempt_count 2
>> [   13.467883]  do_el0_svc+0x20/0x80
>> [   13.468375] genirq: exiting task "irq/25-panfrost" (145) is an active IRQ thread (irq 25)
>> [   13.468678]  el0_svc+0x1c/0x50
>> [   13.475908]  el0t_64_sync_handler+0xa8/0xb0
>> [   13.476277]  el0t_64_sync+0x16c/0x170
>> [   13.476601] ---[ end trace ba74542f51246292 ]---
>> [   13.986987] panfrost fde60000.gpu: gpu sched timeout, js=0, config=0x7300, status=0x0, head=0x8de9f40, tail=0x8de9f40, sched_job=(____ptrval____)
>> [   14.462959] sched: RT throttling activated
>> [   34.474959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   34.475481] rcu:     3-....: (5248 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=2602
>> [   34.476312]  (t=5250 jiffies g=505 q=301)
>> [   34.476667] Task dump for CPU 3:
>> [   34.476951] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   34.477820] Call trace:
>> [   34.478035]  dump_backtrace+0x0/0x198
>> [   34.478365]  show_stack+0x14/0x60
>> [   34.478659]  sched_show_task+0x148/0x168
>> [   34.479008]  dump_cpu_task+0x40/0x4c
>> [   34.479326]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   34.479696]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   34.480072]  update_process_times+0x94/0xd8
>> [   34.480440]  tick_sched_handle.isra.0+0x30/0x50
>> [   34.480840]  tick_sched_timer+0x48/0x98
>> [   34.481178]  __hrtimer_run_queues+0x110/0x1b0
>> [   34.481562]  hrtimer_interrupt+0xe4/0x238
>> [   34.481917]  arch_timer_handler_phys+0x28/0x40
>> [   34.482310]  handle_percpu_devid_irq+0x80/0x130
>> [   34.482710]  generic_handle_domain_irq+0x38/0x58
>> [   34.483116]  gic_handle_irq+0x4c/0x110
>> [   34.483450]  call_on_irq_stack+0x28/0x3c
>> [   34.483798]  do_interrupt_handler+0x78/0x80
>> [   34.484166]  el1_interrupt+0x34/0x80
>> [   34.484484]  el1h_64_irq_handler+0x14/0x20
>> [   34.484846]  el1h_64_irq+0x74/0x78
>> [   34.485148]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   34.485568]  _raw_spin_lock+0x5c/0x68
>> [   34.485895]  panfrost_job_run+0x24c/0x3f8
>> [   34.486250]  drm_sched_main+0x130/0x390
>> [   34.486591]  kthread+0x174/0x180
>> [   34.486878]  ret_from_fork+0x10/0x20
>> [   35.810989] vcc3v3_lcd1_n: disabling
>> [   97.486958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [   97.487479] rcu:     3-....: (20999 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=10402
>> [   97.488326]  (t=21003 jiffies g=505 q=379)
>> [   97.488687] Task dump for CPU 3:
>> [   97.488971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [   97.489842] Call trace:
>> [   97.490056]  dump_backtrace+0x0/0x198
>> [   97.490388]  show_stack+0x14/0x60
>> [   97.490682]  sched_show_task+0x148/0x168
>> [   97.491030]  dump_cpu_task+0x40/0x4c
>> [   97.491349]  rcu_dump_cpu_stacks+0xe8/0x128
>> [   97.491718]  rcu_sched_clock_irq+0x9bc/0xd38
>> [   97.492095]  update_process_times+0x94/0xd8
>> [   97.492463]  tick_sched_handle.isra.0+0x30/0x50
>> [   97.492862]  tick_sched_timer+0x48/0x98
>> [   97.493200]  __hrtimer_run_queues+0x110/0x1b0
>> [   97.493582]  hrtimer_interrupt+0xe4/0x238
>> [   97.493937]  arch_timer_handler_phys+0x28/0x40
>> [   97.494330]  handle_percpu_devid_irq+0x80/0x130
>> [   97.494730]  generic_handle_domain_irq+0x38/0x58
>> [   97.495136]  gic_handle_irq+0x4c/0x110
>> [   97.495473]  call_on_irq_stack+0x28/0x3c
>> [   97.495818]  do_interrupt_handler+0x78/0x80
>> [   97.496186]  el1_interrupt+0x34/0x80
>> [   97.496503]  el1h_64_irq_handler+0x14/0x20
>> [   97.496865]  el1h_64_irq+0x74/0x78
>> [   97.497166]  queued_spin_lock_slowpath+0x118/0x3c0
>> [   97.497588]  _raw_spin_lock+0x5c/0x68
>> [   97.497912]  panfrost_job_run+0x24c/0x3f8
>> [   97.498268]  drm_sched_main+0x130/0x390
>> [   97.498607]  kthread+0x174/0x180
>> [   97.498895]  ret_from_fork+0x10/0x20
>> [  140.108141] random: crng init done
>> [  140.108457] random: 7 urandom warning(s) missed due to ratelimiting
>> [  160.498958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  160.499475] rcu:     3-....: (36750 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=18205
>> [  160.500322]  (t=36756 jiffies g=505 q=482)
>> [  160.500684] Task dump for CPU 3:
>> [  160.500969] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  160.501837] Call trace:
>> [  160.502054]  dump_backtrace+0x0/0x198
>> [  160.502384]  show_stack+0x14/0x60
>> [  160.502679]  sched_show_task+0x148/0x168
>> [  160.503027]  dump_cpu_task+0x40/0x4c
>> [  160.503346]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  160.503714]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  160.504091]  update_process_times+0x94/0xd8
>> [  160.504458]  tick_sched_handle.isra.0+0x30/0x50
>> [  160.504858]  tick_sched_timer+0x48/0x98
>> [  160.505195]  __hrtimer_run_queues+0x110/0x1b0
>> [  160.505580]  hrtimer_interrupt+0xe4/0x238
>> [  160.505934]  arch_timer_handler_phys+0x28/0x40
>> [  160.506327]  handle_percpu_devid_irq+0x80/0x130
>> [  160.506727]  generic_handle_domain_irq+0x38/0x58
>> [  160.507133]  gic_handle_irq+0x4c/0x110
>> [  160.507467]  call_on_irq_stack+0x28/0x3c
>> [  160.507813]  do_interrupt_handler+0x78/0x80
>> [  160.508181]  el1_interrupt+0x34/0x80
>> [  160.508497]  el1h_64_irq_handler+0x14/0x20
>> [  160.508858]  el1h_64_irq+0x74/0x78
>> [  160.509158]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  160.509579]  _raw_spin_lock+0x5c/0x68
>> [  160.509903]  panfrost_job_run+0x24c/0x3f8
>> [  160.510259]  drm_sched_main+0x130/0x390
>> [  160.510599]  kthread+0x174/0x180
>> [  160.510886]  ret_from_fork+0x10/0x20
>> [  223.510959] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  223.511478] rcu:     3-....: (52501 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=26008
>> [  223.512325]  (t=52509 jiffies g=505 q=536)
>> [  223.512688] Task dump for CPU 3:
>> [  223.512971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  223.513842] Call trace:
>> [  223.514056]  dump_backtrace+0x0/0x198
>> [  223.514387]  show_stack+0x14/0x60
>> [  223.514681]  sched_show_task+0x148/0x168
>> [  223.515029]  dump_cpu_task+0x40/0x4c
>> [  223.515348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  223.515717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  223.516094]  update_process_times+0x94/0xd8
>> [  223.516462]  tick_sched_handle.isra.0+0x30/0x50
>> [  223.516860]  tick_sched_timer+0x48/0x98
>> [  223.517198]  __hrtimer_run_queues+0x110/0x1b0
>> [  223.517582]  hrtimer_interrupt+0xe4/0x238
>> [  223.517935]  arch_timer_handler_phys+0x28/0x40
>> [  223.518327]  handle_percpu_devid_irq+0x80/0x130
>> [  223.518727]  generic_handle_domain_irq+0x38/0x58
>> [  223.519133]  gic_handle_irq+0x4c/0x110
>> [  223.519466]  call_on_irq_stack+0x28/0x3c
>> [  223.519812]  do_interrupt_handler+0x78/0x80
>> [  223.520181]  el1_interrupt+0x34/0x80
>> [  223.520498]  el1h_64_irq_handler+0x14/0x20
>> [  223.520860]  el1h_64_irq+0x74/0x78
>> [  223.521161]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  223.521584]  _raw_spin_lock+0x5c/0x68
>> [  223.521908]  panfrost_job_run+0x24c/0x3f8
>> [  223.522264]  drm_sched_main+0x130/0x390
>> [  223.522605]  kthread+0x174/0x180
>> [  223.522892]  ret_from_fork+0x10/0x20
>> [  286.522958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  286.523478] rcu:     3-....: (68252 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=33807
>> [  286.524325]  (t=68262 jiffies g=505 q=612)
>> [  286.524687] Task dump for CPU 3:
>> [  286.524972] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  286.525840] Call trace:
>> [  286.526057]  dump_backtrace+0x0/0x198
>> [  286.526387]  show_stack+0x14/0x60
>> [  286.526681]  sched_show_task+0x148/0x168
>> [  286.527029]  dump_cpu_task+0x40/0x4c
>> [  286.527347]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  286.527715]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  286.528092]  update_process_times+0x94/0xd8
>> [  286.528459]  tick_sched_handle.isra.0+0x30/0x50
>> [  286.528859]  tick_sched_timer+0x48/0x98
>> [  286.529197]  __hrtimer_run_queues+0x110/0x1b0
>> [  286.529579]  hrtimer_interrupt+0xe4/0x238
>> [  286.529933]  arch_timer_handler_phys+0x28/0x40
>> [  286.530326]  handle_percpu_devid_irq+0x80/0x130
>> [  286.530726]  generic_handle_domain_irq+0x38/0x58
>> [  286.531132]  gic_handle_irq+0x4c/0x110
>> [  286.531466]  call_on_irq_stack+0x28/0x3c
>> [  286.531812]  do_interrupt_handler+0x78/0x80
>> [  286.532180]  el1_interrupt+0x34/0x80
>> [  286.532496]  el1h_64_irq_handler+0x14/0x20
>> [  286.532857]  el1h_64_irq+0x74/0x78
>> [  286.533157]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  286.533580]  _raw_spin_lock+0x5c/0x68
>> [  286.533904]  panfrost_job_run+0x24c/0x3f8
>> [  286.534259]  drm_sched_main+0x130/0x390
>> [  286.534600]  kthread+0x174/0x180
>> [  286.534887]  ret_from_fork+0x10/0x20
>> [  349.534957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  349.535478] rcu:     3-....: (84003 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=41602
>> [  349.536324]  (t=84015 jiffies g=505 q=716)
>> [  349.536687] Task dump for CPU 3:
>> [  349.536970] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  349.537839] Call trace:
>> [  349.538055]  dump_backtrace+0x0/0x198
>> [  349.538387]  show_stack+0x14/0x60
>> [  349.538681]  sched_show_task+0x148/0x168
>> [  349.539029]  dump_cpu_task+0x40/0x4c
>> [  349.539348]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  349.539717]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  349.540094]  update_process_times+0x94/0xd8
>> [  349.540462]  tick_sched_handle.isra.0+0x30/0x50
>> [  349.540862]  tick_sched_timer+0x48/0x98
>> [  349.541201]  __hrtimer_run_queues+0x110/0x1b0
>> [  349.541585]  hrtimer_interrupt+0xe4/0x238
>> [  349.541937]  arch_timer_handler_phys+0x28/0x40
>> [  349.542330]  handle_percpu_devid_irq+0x80/0x130
>> [  349.542730]  generic_handle_domain_irq+0x38/0x58
>> [  349.543136]  gic_handle_irq+0x4c/0x110
>> [  349.543469]  call_on_irq_stack+0x28/0x3c
>> [  349.543815]  do_interrupt_handler+0x78/0x80
>> [  349.544183]  el1_interrupt+0x34/0x80
>> [  349.544500]  el1h_64_irq_handler+0x14/0x20
>> [  349.544862]  el1h_64_irq+0x74/0x78
>> [  349.545164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  349.545586]  _raw_spin_lock+0x5c/0x68
>> [  349.545910]  panfrost_job_run+0x24c/0x3f8
>> [  349.546265]  drm_sched_main+0x130/0x390
>> [  349.546604]  kthread+0x174/0x180
>> [  349.546891]  ret_from_fork+0x10/0x20
>> [  412.546958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  412.547478] rcu:     3-....: (99754 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=49377
>> [  412.548325]  (t=99768 jiffies g=505 q=784)
>> [  412.548686] Task dump for CPU 3:
>> [  412.548971] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  412.549841] Call trace:
>> [  412.550058]  dump_backtrace+0x0/0x198
>> [  412.550389]  show_stack+0x14/0x60
>> [  412.550684]  sched_show_task+0x148/0x168
>> [  412.551031]  dump_cpu_task+0x40/0x4c
>> [  412.551350]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  412.551719]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  412.552095]  update_process_times+0x94/0xd8
>> [  412.552463]  tick_sched_handle.isra.0+0x30/0x50
>> [  412.552863]  tick_sched_timer+0x48/0x98
>> [  412.553201]  __hrtimer_run_queues+0x110/0x1b0
>> [  412.553583]  hrtimer_interrupt+0xe4/0x238
>> [  412.553936]  arch_timer_handler_phys+0x28/0x40
>> [  412.554331]  handle_percpu_devid_irq+0x80/0x130
>> [  412.554732]  generic_handle_domain_irq+0x38/0x58
>> [  412.555139]  gic_handle_irq+0x4c/0x110
>> [  412.555471]  call_on_irq_stack+0x28/0x3c
>> [  412.555817]  do_interrupt_handler+0x78/0x80
>> [  412.556186]  el1_interrupt+0x34/0x80
>> [  412.556502]  el1h_64_irq_handler+0x14/0x20
>> [  412.556864]  el1h_64_irq+0x74/0x78
>> [  412.557164]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  412.557587]  _raw_spin_lock+0x5c/0x68
>> [  412.557912]  panfrost_job_run+0x24c/0x3f8
>> [  412.558267]  drm_sched_main+0x130/0x390
>> [  412.558607]  kthread+0x174/0x180
>> [  412.558894]  ret_from_fork+0x10/0x20
>> [  475.558957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  475.559476] rcu:     3-....: (115505 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=57191
>> [  475.560329]  (t=115521 jiffies g=505 q=857)
>> [  475.560697] Task dump for CPU 3:
>> [  475.560981] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  475.561850] Call trace:
>> [  475.562067]  dump_backtrace+0x0/0x198
>> [  475.562398]  show_stack+0x14/0x60
>> [  475.562693]  sched_show_task+0x148/0x168
>> [  475.563041]  dump_cpu_task+0x40/0x4c
>> [  475.563360]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  475.563728]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  475.564104]  update_process_times+0x94/0xd8
>> [  475.564472]  tick_sched_handle.isra.0+0x30/0x50
>> [  475.564871]  tick_sched_timer+0x48/0x98
>> [  475.565209]  __hrtimer_run_queues+0x110/0x1b0
>> [  475.565592]  hrtimer_interrupt+0xe4/0x238
>> [  475.565946]  arch_timer_handler_phys+0x28/0x40
>> [  475.566339]  handle_percpu_devid_irq+0x80/0x130
>> [  475.566739]  generic_handle_domain_irq+0x38/0x58
>> [  475.567145]  gic_handle_irq+0x4c/0x110
>> [  475.567477]  call_on_irq_stack+0x28/0x3c
>> [  475.567822]  do_interrupt_handler+0x78/0x80
>> [  475.568190]  el1_interrupt+0x34/0x80
>> [  475.568507]  el1h_64_irq_handler+0x14/0x20
>> [  475.568869]  el1h_64_irq+0x74/0x78
>> [  475.569170]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  475.569593]  _raw_spin_lock+0x5c/0x68
>> [  475.569915]  panfrost_job_run+0x24c/0x3f8
>> [  475.570270]  drm_sched_main+0x130/0x390
>> [  475.570610]  kthread+0x174/0x180
>> [  475.570897]  ret_from_fork+0x10/0x20
>> [  538.570958] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  538.571478] rcu:     3-....: (131256 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=64992
>> [  538.572333]  (t=131274 jiffies g=505 q=947)
>> [  538.572701] Task dump for CPU 3:
>> [  538.572986] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  538.573854] Call trace:
>> [  538.574070]  dump_backtrace+0x0/0x198
>> [  538.574402]  show_stack+0x14/0x60
>> [  538.574696]  sched_show_task+0x148/0x168
>> [  538.575044]  dump_cpu_task+0x40/0x4c
>> [  538.575363]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  538.575732]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  538.576109]  update_process_times+0x94/0xd8
>> [  538.576477]  tick_sched_handle.isra.0+0x30/0x50
>> [  538.576878]  tick_sched_timer+0x48/0x98
>> [  538.577216]  __hrtimer_run_queues+0x110/0x1b0
>> [  538.577599]  hrtimer_interrupt+0xe4/0x238
>> [  538.577953]  arch_timer_handler_phys+0x28/0x40
>> [  538.578346]  handle_percpu_devid_irq+0x80/0x130
>> [  538.578745]  generic_handle_domain_irq+0x38/0x58
>> [  538.579151]  gic_handle_irq+0x4c/0x110
>> [  538.579487]  call_on_irq_stack+0x28/0x3c
>> [  538.579833]  do_interrupt_handler+0x78/0x80
>> [  538.580201]  el1_interrupt+0x34/0x80
>> [  538.580518]  el1h_64_irq_handler+0x14/0x20
>> [  538.580880]  el1h_64_irq+0x74/0x78
>> [  538.581181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  538.581603]  _raw_spin_lock+0x5c/0x68
>> [  538.581927]  panfrost_job_run+0x24c/0x3f8
>> [  538.582283]  drm_sched_main+0x130/0x390
>> [  538.582623]  kthread+0x174/0x180
>> [  538.582910]  ret_from_fork+0x10/0x20
>> [  601.582956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  601.583477] rcu:     3-....: (147007 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=72788
>> [  601.584330]  (t=147027 jiffies g=505 q=1018)
>> [  601.584706] Task dump for CPU 3:
>> [  601.584991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  601.585859] Call trace:
>> [  601.586075]  dump_backtrace+0x0/0x198
>> [  601.586406]  show_stack+0x14/0x60
>> [  601.586701]  sched_show_task+0x148/0x168
>> [  601.587048]  dump_cpu_task+0x40/0x4c
>> [  601.587368]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  601.587736]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  601.588112]  update_process_times+0x94/0xd8
>> [  601.588480]  tick_sched_handle.isra.0+0x30/0x50
>> [  601.588880]  tick_sched_timer+0x48/0x98
>> [  601.589218]  __hrtimer_run_queues+0x110/0x1b0
>> [  601.589602]  hrtimer_interrupt+0xe4/0x238
>> [  601.589956]  arch_timer_handler_phys+0x28/0x40
>> [  601.590348]  handle_percpu_devid_irq+0x80/0x130
>> [  601.590747]  generic_handle_domain_irq+0x38/0x58
>> [  601.591153]  gic_handle_irq+0x4c/0x110
>> [  601.591486]  call_on_irq_stack+0x28/0x3c
>> [  601.591832]  do_interrupt_handler+0x78/0x80
>> [  601.592201]  el1_interrupt+0x34/0x80
>> [  601.592517]  el1h_64_irq_handler+0x14/0x20
>> [  601.592879]  el1h_64_irq+0x74/0x78
>> [  601.593181]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  601.593603]  _raw_spin_lock+0x5c/0x68
>> [  601.593927]  panfrost_job_run+0x24c/0x3f8
>> [  601.594283]  drm_sched_main+0x130/0x390
>> [  601.594623]  kthread+0x174/0x180
>> [  601.594910]  ret_from_fork+0x10/0x20
>> [  664.594957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  664.595479] rcu:     3-....: (162758 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=80598
>> [  664.596333]  (t=162780 jiffies g=505 q=1086)
>> [  664.596709] Task dump for CPU 3:
>> [  664.596993] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  664.597862] Call trace:
>> [  664.598078]  dump_backtrace+0x0/0x198
>> [  664.598409]  show_stack+0x14/0x60
>> [  664.598704]  sched_show_task+0x148/0x168
>> [  664.599052]  dump_cpu_task+0x40/0x4c
>> [  664.599369]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  664.599738]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  664.600114]  update_process_times+0x94/0xd8
>> [  664.600482]  tick_sched_handle.isra.0+0x30/0x50
>> [  664.600882]  tick_sched_timer+0x48/0x98
>> [  664.601220]  __hrtimer_run_queues+0x110/0x1b0
>> [  664.601604]  hrtimer_interrupt+0xe4/0x238
>> [  664.601958]  arch_timer_handler_phys+0x28/0x40
>> [  664.602352]  handle_percpu_devid_irq+0x80/0x130
>> [  664.602751]  generic_handle_domain_irq+0x38/0x58
>> [  664.603158]  gic_handle_irq+0x4c/0x110
>> [  664.603491]  call_on_irq_stack+0x28/0x3c
>> [  664.603838]  do_interrupt_handler+0x78/0x80
>> [  664.604206]  el1_interrupt+0x34/0x80
>> [  664.604522]  el1h_64_irq_handler+0x14/0x20
>> [  664.604883]  el1h_64_irq+0x74/0x78
>> [  664.605187]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  664.605609]  _raw_spin_lock+0x5c/0x68
>> [  664.605934]  panfrost_job_run+0x24c/0x3f8
>> [  664.606290]  drm_sched_main+0x130/0x390
>> [  664.606631]  kthread+0x174/0x180
>> [  664.606918]  ret_from_fork+0x10/0x20
>> [  727.606956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  727.607476] rcu:     3-....: (178509 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=88380
>> [  727.608331]  (t=178533 jiffies g=505 q=1152)
>> [  727.608706] Task dump for CPU 3:
>> [  727.608990] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  727.609858] Call trace:
>> [  727.610074]  dump_backtrace+0x0/0x198
>> [  727.610403]  show_stack+0x14/0x60
>> [  727.610698]  sched_show_task+0x148/0x168
>> [  727.611047]  dump_cpu_task+0x40/0x4c
>> [  727.611366]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  727.611735]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  727.612112]  update_process_times+0x94/0xd8
>> [  727.612479]  tick_sched_handle.isra.0+0x30/0x50
>> [  727.612879]  tick_sched_timer+0x48/0x98
>> [  727.613216]  __hrtimer_run_queues+0x110/0x1b0
>> [  727.613601]  hrtimer_interrupt+0xe4/0x238
>> [  727.613955]  arch_timer_handler_phys+0x28/0x40
>> [  727.614348]  handle_percpu_devid_irq+0x80/0x130
>> [  727.614748]  generic_handle_domain_irq+0x38/0x58
>> [  727.615154]  gic_handle_irq+0x4c/0x110
>> [  727.615485]  call_on_irq_stack+0x28/0x3c
>> [  727.615832]  do_interrupt_handler+0x78/0x80
>> [  727.616200]  el1_interrupt+0x34/0x80
>> [  727.616517]  el1h_64_irq_handler+0x14/0x20
>> [  727.616879]  el1h_64_irq+0x74/0x78
>> [  727.617180]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  727.617602]  _raw_spin_lock+0x5c/0x68
>> [  727.617926]  panfrost_job_run+0x24c/0x3f8
>> [  727.618282]  drm_sched_main+0x130/0x390
>> [  727.618621]  kthread+0x174/0x180
>> [  727.618908]  ret_from_fork+0x10/0x20
>> [  790.618957] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  790.619475] rcu:     3-....: (194260 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=96141
>> [  790.620331]  (t=194286 jiffies g=505 q=1219)
>> [  790.620708] Task dump for CPU 3:
>> [  790.620991] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  790.621860] Call trace:
>> [  790.622075]  dump_backtrace+0x0/0x198
>> [  790.622405]  show_stack+0x14/0x60
>> [  790.622699]  sched_show_task+0x148/0x168
>> [  790.623049]  dump_cpu_task+0x40/0x4c
>> [  790.623367]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  790.623737]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  790.624113]  update_process_times+0x94/0xd8
>> [  790.624481]  tick_sched_handle.isra.0+0x30/0x50
>> [  790.624880]  tick_sched_timer+0x48/0x98
>> [  790.625218]  __hrtimer_run_queues+0x110/0x1b0
>> [  790.625603]  hrtimer_interrupt+0xe4/0x238
>> [  790.625957]  arch_timer_handler_phys+0x28/0x40
>> [  790.626350]  handle_percpu_devid_irq+0x80/0x130
>> [  790.626752]  generic_handle_domain_irq+0x38/0x58
>> [  790.627158]  gic_handle_irq+0x4c/0x110
>> [  790.627493]  call_on_irq_stack+0x28/0x3c
>> [  790.627839]  do_interrupt_handler+0x78/0x80
>> [  790.628208]  el1_interrupt+0x34/0x80
>> [  790.628526]  el1h_64_irq_handler+0x14/0x20
>> [  790.628888]  el1h_64_irq+0x74/0x78
>> [  790.629188]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  790.629613]  _raw_spin_lock+0x5c/0x68
>> [  790.629937]  panfrost_job_run+0x24c/0x3f8
>> [  790.630292]  drm_sched_main+0x130/0x390
>> [  790.630632]  kthread+0x174/0x180
>> [  790.630919]  ret_from_fork+0x10/0x20
>> [  853.630955] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  853.631478] rcu:     3-....: (210011 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=103932
>> [  853.632340]  (t=210039 jiffies g=505 q=1318)
>> [  853.632716] Task dump for CPU 3:
>> [  853.633000] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  853.633869] Call trace:
>> [  853.634084]  dump_backtrace+0x0/0x198
>> [  853.634418]  show_stack+0x14/0x60
>> [  853.634712]  sched_show_task+0x148/0x168
>> [  853.635061]  dump_cpu_task+0x40/0x4c
>> [  853.635379]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  853.635748]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  853.636124]  update_process_times+0x94/0xd8
>> [  853.636492]  tick_sched_handle.isra.0+0x30/0x50
>> [  853.636892]  tick_sched_timer+0x48/0x98
>> [  853.637230]  __hrtimer_run_queues+0x110/0x1b0
>> [  853.637613]  hrtimer_interrupt+0xe4/0x238
>> [  853.637965]  arch_timer_handler_phys+0x28/0x40
>> [  853.638358]  handle_percpu_devid_irq+0x80/0x130
>> [  853.638760]  generic_handle_domain_irq+0x38/0x58
>> [  853.639166]  gic_handle_irq+0x4c/0x110
>> [  853.639499]  call_on_irq_stack+0x28/0x3c
>> [  853.639845]  do_interrupt_handler+0x78/0x80
>> [  853.640213]  el1_interrupt+0x34/0x80
>> [  853.640530]  el1h_64_irq_handler+0x14/0x20
>> [  853.640892]  el1h_64_irq+0x74/0x78
>> [  853.641193]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  853.641616]  _raw_spin_lock+0x5c/0x68
>> [  853.641940]  panfrost_job_run+0x24c/0x3f8
>> [  853.642295]  drm_sched_main+0x130/0x390
>> [  853.642634]  kthread+0x174/0x180
>> [  853.642921]  ret_from_fork+0x10/0x20
>> [  916.642956] rcu: INFO: rcu_preempt self-detected stall on CPU
>> [  916.643477] rcu:     3-....: (225762 ticks this GP) idle=09f/1/0x4000000000000000 softirq=2517/2517 fqs=111709
>> [  916.644339]  (t=225792 jiffies g=505 q=1390)
>> [  916.644715] Task dump for CPU 3:
>> [  916.644999] task:pan_js          state:R  running task     stack:    0 pid:  146 ppid:     2 flags:0x0000000a
>> [  916.645868] Call trace:
>> [  916.646083]  dump_backtrace+0x0/0x198
>> [  916.646414]  show_stack+0x14/0x60
>> [  916.646708]  sched_show_task+0x148/0x168
>> [  916.647055]  dump_cpu_task+0x40/0x4c
>> [  916.647373]  rcu_dump_cpu_stacks+0xe8/0x128
>> [  916.647743]  rcu_sched_clock_irq+0x9bc/0xd38
>> [  916.648119]  update_process_times+0x94/0xd8
>> [  916.648488]  tick_sched_handle.isra.0+0x30/0x50
>> [  916.648887]  tick_sched_timer+0x48/0x98
>> [  916.649225]  __hrtimer_run_queues+0x110/0x1b0
>> [  916.649608]  hrtimer_interrupt+0xe4/0x238
>> [  916.649962]  arch_timer_handler_phys+0x28/0x40
>> [  916.650355]  handle_percpu_devid_irq+0x80/0x130
>> [  916.650756]  generic_handle_domain_irq+0x38/0x58
>> [  916.651162]  gic_handle_irq+0x4c/0x110
>> [  916.651495]  call_on_irq_stack+0x28/0x3c
>> [  916.651842]  do_interrupt_handler+0x78/0x80
>> [  916.652210]  el1_interrupt+0x34/0x80
>> [  916.652527]  el1h_64_irq_handler+0x14/0x20
>> [  916.652889]  el1h_64_irq+0x74/0x78
>> [  916.653190]  queued_spin_lock_slowpath+0x118/0x3c0
>> [  916.653614]  _raw_spin_lock+0x5c/0x68
>> [  916.653937]  panfrost_job_run+0x24c/0x3f8
>> [  916.654293]  drm_sched_main+0x130/0x390
>> [  916.654632]  kthread+0x174/0x180
>> [  916.654920]  ret_from_fork+0x10/0x20
>>
>> -- 
>> Pengutronix e.K.                           |                             |
>> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
>> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
>> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
  2021-11-15 14:08       ` Daniel Vetter
  (?)
@ 2021-11-16  7:56         ` Sascha Hauer
  -1 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-16  7:56 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Christian König, linaro-mm-sig, dri-devel, linux-media, intel-gfx

On Mon, Nov 15, 2021 at 03:08:49PM +0100, Daniel Vetter wrote:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> > Hi,
> > 
> > On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > > Simplifying the code a bit.
> > > 
> > > v2: use dma_resv_for_each_fence
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> > >  1 file changed, 6 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index 042c16b5d54a..5bc5f775abe1 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> > >  					    struct drm_gem_object *obj,
> > >  					    bool write)
> > >  {
> > > +	struct dma_resv_iter cursor;
> > > +	struct dma_fence *fence;
> > >  	int ret;
> > > -	struct dma_fence **fences;
> > > -	unsigned int i, fence_count;
> > > -
> > > -	if (!write) {
> > > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > > -
> > > -		return drm_sched_job_add_dependency(job, fence);
> > > -	}
> > > -
> > > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > > -	if (ret || !fence_count)
> > > -		return ret;
> > >  
> > > -	for (i = 0; i < fence_count; i++) {
> > > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > > +		ret = drm_sched_job_add_dependency(job, fence);
> > >  		if (ret)
> > > -			break;
> > > +			return ret;
> > >  	}
> > > -
> > > -	for (; i < fence_count; i++)
> > > -		dma_fence_put(fences[i]);
> > > -	kfree(fences);
> > > -	return ret;
> > > +	return 0;
> > >  }
> > >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> > >  
> > 
> > This patch lets the panfrost driver explode on v5.16-rc1 with the
> > following. I didn't bisect it, but it goes away when I revert this
> > patch. I only started weston, nothing more.
> > 
> > Any idea what goes wrong here?
> 
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.

I can confirm 13e9e30cafea1 fixes the issue, thanks

Sascha


-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-16  7:56         ` Sascha Hauer
  0 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-16  7:56 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: linaro-mm-sig, Christian König, intel-gfx, dri-devel, linux-media

On Mon, Nov 15, 2021 at 03:08:49PM +0100, Daniel Vetter wrote:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> > Hi,
> > 
> > On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > > Simplifying the code a bit.
> > > 
> > > v2: use dma_resv_for_each_fence
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> > >  1 file changed, 6 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index 042c16b5d54a..5bc5f775abe1 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> > >  					    struct drm_gem_object *obj,
> > >  					    bool write)
> > >  {
> > > +	struct dma_resv_iter cursor;
> > > +	struct dma_fence *fence;
> > >  	int ret;
> > > -	struct dma_fence **fences;
> > > -	unsigned int i, fence_count;
> > > -
> > > -	if (!write) {
> > > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > > -
> > > -		return drm_sched_job_add_dependency(job, fence);
> > > -	}
> > > -
> > > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > > -	if (ret || !fence_count)
> > > -		return ret;
> > >  
> > > -	for (i = 0; i < fence_count; i++) {
> > > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > > +		ret = drm_sched_job_add_dependency(job, fence);
> > >  		if (ret)
> > > -			break;
> > > +			return ret;
> > >  	}
> > > -
> > > -	for (; i < fence_count; i++)
> > > -		dma_fence_put(fences[i]);
> > > -	kfree(fences);
> > > -	return ret;
> > > +	return 0;
> > >  }
> > >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> > >  
> > 
> > This patch lets the panfrost driver explode on v5.16-rc1 with the
> > following. I didn't bisect it, but it goes away when I revert this
> > patch. I only started weston, nothing more.
> > 
> > Any idea what goes wrong here?
> 
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.

I can confirm 13e9e30cafea1 fixes the issue, thanks

Sascha


-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [Intel-gfx] [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2
@ 2021-11-16  7:56         ` Sascha Hauer
  0 siblings, 0 replies; 115+ messages in thread
From: Sascha Hauer @ 2021-11-16  7:56 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: linaro-mm-sig, Christian König, intel-gfx, dri-devel, linux-media

On Mon, Nov 15, 2021 at 03:08:49PM +0100, Daniel Vetter wrote:
> On Mon, Nov 15, 2021 at 03:03:53PM +0100, Sascha Hauer wrote:
> > Hi,
> > 
> > On Fri, Sep 17, 2021 at 02:34:59PM +0200, Christian König wrote:
> > > Simplifying the code a bit.
> > > 
> > > v2: use dma_resv_for_each_fence
> > > 
> > > Signed-off-by: Christian König <christian.koenig@amd.com>
> > > ---
> > >  drivers/gpu/drm/scheduler/sched_main.c | 26 ++++++--------------------
> > >  1 file changed, 6 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index 042c16b5d54a..5bc5f775abe1 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
> > >  					    struct drm_gem_object *obj,
> > >  					    bool write)
> > >  {
> > > +	struct dma_resv_iter cursor;
> > > +	struct dma_fence *fence;
> > >  	int ret;
> > > -	struct dma_fence **fences;
> > > -	unsigned int i, fence_count;
> > > -
> > > -	if (!write) {
> > > -		struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
> > > -
> > > -		return drm_sched_job_add_dependency(job, fence);
> > > -	}
> > > -
> > > -	ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
> > > -	if (ret || !fence_count)
> > > -		return ret;
> > >  
> > > -	for (i = 0; i < fence_count; i++) {
> > > -		ret = drm_sched_job_add_dependency(job, fences[i]);
> > > +	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > > +		ret = drm_sched_job_add_dependency(job, fence);
> > >  		if (ret)
> > > -			break;
> > > +			return ret;
> > >  	}
> > > -
> > > -	for (; i < fence_count; i++)
> > > -		dma_fence_put(fences[i]);
> > > -	kfree(fences);
> > > -	return ret;
> > > +	return 0;
> > >  }
> > >  EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
> > >  
> > 
> > This patch lets the panfrost driver explode on v5.16-rc1 with the
> > following. I didn't bisect it, but it goes away when I revert this
> > patch. I only started weston, nothing more.
> > 
> > Any idea what goes wrong here?
> 
> Should be fixed in 13e9e30cafea1, but Christian pushed it to the wrong
> patch so it missed -rc1.

I can confirm 13e9e30cafea1 fixes the issue, thanks

Sascha


-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2
  2021-09-16 11:30 Deploying new iterator interface for dma-buf Christian König
@ 2021-09-16 11:30 ` Christian König
  0 siblings, 0 replies; 115+ messages in thread
From: Christian König @ 2021-09-16 11:30 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx; +Cc: daniel, tvrtko.ursulin

Simplifying the code a bit.

v2: add missing rcu_read_lock()/unlock()

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_gem.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 09c820045859..8c3ff098e49e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1340,31 +1340,21 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 				     struct drm_gem_object *obj,
 				     bool write)
 {
-	int ret;
-	struct dma_fence **fences;
-	unsigned int i, fence_count;
-
-	if (!write) {
-		struct dma_fence *fence =
-			dma_resv_get_excl_unlocked(obj->resv);
-
-		return drm_gem_fence_array_add(fence_array, fence);
-	}
-
-	ret = dma_resv_get_fences(obj->resv, NULL,
-						&fence_count, &fences);
-	if (ret || !fence_count)
-		return ret;
-
-	for (i = 0; i < fence_count; i++) {
-		ret = drm_gem_fence_array_add(fence_array, fences[i]);
-		if (ret)
+	struct dma_resv_iter cursor;
+	struct dma_fence *fence;
+	int ret = 0;
+
+	rcu_read_lock();
+	dma_resv_for_each_fence_unlocked(obj->resv, &cursor, write, fence) {
+		rcu_read_unlock();
+		ret = drm_gem_fence_array_add(fence_array, fence);
+		rcu_read_lock();
+		if (ret) {
+			dma_fence_put(fence);
 			break;
+		}
 	}
-
-	for (; i < fence_count; i++)
-		dma_fence_put(fences[i]);
-	kfree(fences);
+	rcu_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 115+ messages in thread

end of thread, other threads:[~2021-11-16 14:05 UTC | newest]

Thread overview: 115+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-17 12:34 Deploying new iterator interface for dma-buf Christian König
2021-09-17 12:34 ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2 Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 13:23   ` Daniel Vetter
2021-09-17 13:23     ` [Intel-gfx] " Daniel Vetter
2021-09-20  8:43     ` Tvrtko Ursulin
2021-09-20 10:09       ` Christian König
2021-09-20 10:26         ` Tvrtko Ursulin
2021-09-17 12:34 ` [PATCH 02/26] dma-buf: add dma_resv_for_each_fence Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 13:27   ` Daniel Vetter
2021-09-17 13:27     ` [Intel-gfx] " Daniel Vetter
2021-09-17 14:30     ` Daniel Vetter
2021-09-17 14:30       ` Daniel Vetter
2021-09-17 12:34 ` [PATCH 03/26] dma-buf: use new iterator in dma_resv_copy_fences Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:35   ` Daniel Vetter
2021-09-17 14:35     ` [Intel-gfx] " Daniel Vetter
2021-09-20  7:23     ` Christian König
2021-09-20  7:23       ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 04/26] dma-buf: use new iterator in dma_resv_get_fences v2 Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:39   ` Daniel Vetter
2021-09-17 14:39     ` [Intel-gfx] " Daniel Vetter
2021-09-17 12:34 ` [PATCH 05/26] dma-buf: use new iterator in dma_resv_wait_timeout Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:43   ` Daniel Vetter
2021-09-17 14:43     ` [Intel-gfx] " Daniel Vetter
2021-09-20  7:27     ` Christian König
2021-09-20  7:27       ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 06/26] dma-buf: use new iterator in dma_resv_test_signaled Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:45   ` Daniel Vetter
2021-09-17 14:45     ` [Intel-gfx] " Daniel Vetter
2021-09-17 12:34 ` [PATCH 07/26] drm/ttm: use the new iterator in ttm_bo_flush_all_fences Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:50   ` Daniel Vetter
2021-09-17 14:50     ` [Intel-gfx] " Daniel Vetter
2021-09-17 12:34 ` [PATCH 08/26] drm/amdgpu: use the new iterator in amdgpu_sync_resv Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 09/26] drm/amdgpu: use new iterator in amdgpu_ttm_bo_eviction_valuable Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 10/26] drm/msm: use new iterator in msm_gem_describe Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 11/26] drm/radeon: use new iterator in radeon_sync_resv Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 12:34 ` [PATCH 12/26] drm/scheduler: use new iterator in drm_sched_job_add_implicit_dependencies v2 Christian König
2021-09-17 12:34   ` [Intel-gfx] " Christian König
2021-09-17 14:52   ` Daniel Vetter
2021-09-17 14:52     ` [Intel-gfx] " Daniel Vetter
2021-11-15 14:03   ` Sascha Hauer
2021-11-15 14:03     ` Sascha Hauer
2021-11-15 14:03     ` [Intel-gfx] " Sascha Hauer
2021-11-15 14:08     ` Daniel Vetter
2021-11-15 14:08       ` [Intel-gfx] " Daniel Vetter
2021-11-15 14:08       ` Daniel Vetter
2021-11-15 20:32       ` Christian König
2021-11-15 20:32         ` Christian König
2021-11-15 20:32         ` [Intel-gfx] " Christian König
2021-11-16  7:56       ` Sascha Hauer
2021-11-16  7:56         ` [Intel-gfx] " Sascha Hauer
2021-11-16  7:56         ` Sascha Hauer
2021-09-17 12:35 ` [PATCH 13/26] drm/i915: use the new iterator in i915_gem_busy_ioctl Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-20  8:45   ` Tvrtko Ursulin
2021-09-20 10:13     ` Christian König
2021-09-20 10:33       ` Tvrtko Ursulin
2021-09-21  9:41         ` Christian König
2021-09-21 13:10           ` Tvrtko Ursulin
2021-09-17 12:35 ` [PATCH 14/26] drm/i915: use the new iterator in i915_sw_fence_await_reservation v3 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-20  8:45   ` Tvrtko Ursulin
2021-09-20  8:47     ` Tvrtko Ursulin
2021-09-20 10:14       ` Christian König
2021-09-17 12:35 ` [PATCH 15/26] drm/i915: use the new iterator in i915_request_await_object v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 16/26] drm/i915: use new iterator in i915_gem_object_wait_reservation v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-20 10:00   ` Tvrtko Ursulin
2021-09-21 17:35     ` Christian König
2021-09-17 12:35 ` [PATCH 17/26] drm/i915: use new iterator in i915_gem_object_wait_priority v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 18/26] drm/i915: use new iterator in i915_gem_object_last_write_engine v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 19/26] drm/i915: use new cursor in intel_prepare_plane_fb v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 14:53   ` Daniel Vetter
2021-09-17 14:53     ` [Intel-gfx] " Daniel Vetter
2021-09-20  7:31     ` Christian König
2021-09-20  7:31       ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 21/26] drm: use new iterator in drm_gem_plane_helper_prepare_fb v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 14:55   ` Daniel Vetter
2021-09-17 14:55     ` [Intel-gfx] " Daniel Vetter
2021-09-20  7:35     ` Christian König
2021-09-20  7:35       ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 22/26] drm/nouveau: use the new iterator in nouveau_fence_sync Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 23/26] drm/nouveau: use the new interator in nv50_wndw_prepare_fb v2 Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 24/26] drm/etnaviv: use new iterator in etnaviv_gem_describe Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 25/26] drm/etnaviv: replace dma_resv_get_excl_unlocked Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 12:35 ` [PATCH 26/26] dma-buf: nuke dma_resv_get_excl_unlocked Christian König
2021-09-17 12:35   ` [Intel-gfx] " Christian König
2021-09-17 14:56   ` Daniel Vetter
2021-09-17 14:56     ` [Intel-gfx] " Daniel Vetter
2021-09-17 14:01 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/26] dma-buf: add dma_resv_for_each_fence_unlocked v2 Patchwork
2021-09-17 14:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-09-17 15:43 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2021-09-16 11:30 Deploying new iterator interface for dma-buf Christian König
2021-09-16 11:30 ` [PATCH 20/26] drm: use new iterator in drm_gem_fence_array_add_implicit v2 Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.