All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/8] dma-buf: fix busy wait for new shared fences
@ 2019-08-06 15:01 Christian König
  2019-08-06 15:01 ` [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences Christian König
                   ` (8 more replies)
  0 siblings, 9 replies; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

When reservation_object_add_shared_fence is replacing an old fence with a new
one we should not drop the old one before the new one is in place.

Otherwise other cores can busy wait for the new one to appear.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/reservation.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c71b85c8c159..d59207ca72d2 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -196,6 +196,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct dma_fence *fence)
 {
 	struct reservation_object_list *fobj;
+	struct dma_fence *old;
 	unsigned int i, count;
 
 	dma_fence_get(fence);
@@ -209,18 +210,16 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 	write_seqcount_begin(&obj->seq);
 
 	for (i = 0; i < count; ++i) {
-		struct dma_fence *old_fence;
 
-		old_fence = rcu_dereference_protected(fobj->shared[i],
-						      reservation_object_held(obj));
-		if (old_fence->context == fence->context ||
-		    dma_fence_is_signaled(old_fence)) {
-			dma_fence_put(old_fence);
+		old = rcu_dereference_protected(fobj->shared[i],
+						reservation_object_held(obj));
+		if (old->context == fence->context ||
+		    dma_fence_is_signaled(old))
 			goto replace;
-		}
 	}
 
 	BUG_ON(fobj->shared_count >= fobj->shared_max);
+	old = NULL;
 	count++;
 
 replace:
@@ -230,6 +229,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 
 	write_seqcount_end(&obj->seq);
 	preempt_enable();
+	dma_fence_put(old);
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:06   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning Christian König
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

Add some helpers to correctly allocate/free reservation_object_lists.

Otherwise we might forget to drop dma_fence references on list destruction.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/reservation.c | 65 +++++++++++++++++++++++++----------
 1 file changed, 46 insertions(+), 19 deletions(-)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index d59207ca72d2..c0ba05936ab6 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -55,6 +55,47 @@ EXPORT_SYMBOL(reservation_seqcount_class);
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
 
+/**
+ * reservation_object_list_alloc - allocate fence list
+ * @shared_max: number of fences we need space for
+ *
+ * Allocate a new reservation_object_list and make sure to correctly initialize
+ * shared_max.
+ */
+static struct reservation_object_list *
+reservation_object_list_alloc(unsigned int shared_max)
+{
+	struct reservation_object_list *list;
+
+	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+	if (!list)
+		return NULL;
+
+	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
+		sizeof(*list->shared);
+
+	return list;
+}
+
+/**
+ * reservation_object_list_free - free fence list
+ * @list: list to free
+ *
+ * Free a reservation_object_list and make sure to drop all references.
+ */
+static void reservation_object_list_free(struct reservation_object_list *list)
+{
+	unsigned int i;
+
+	if (!list)
+		return;
+
+	for (i = 0; i < list->shared_count; ++i)
+		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+
+	kfree_rcu(list, rcu);
+}
+
 /**
  * reservation_object_init - initialize a reservation object
  * @obj: the reservation object
@@ -76,7 +117,6 @@ EXPORT_SYMBOL(reservation_object_init);
  */
 void reservation_object_fini(struct reservation_object *obj)
 {
-	int i;
 	struct reservation_object_list *fobj;
 	struct dma_fence *excl;
 
@@ -89,13 +129,7 @@ void reservation_object_fini(struct reservation_object *obj)
 		dma_fence_put(excl);
 
 	fobj = rcu_dereference_protected(obj->fence, 1);
-	if (fobj) {
-		for (i = 0; i < fobj->shared_count; ++i)
-			dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
-
-		kfree(fobj);
-	}
-
+	reservation_object_list_free(fobj);
 	ww_mutex_destroy(&obj->lock);
 }
 EXPORT_SYMBOL(reservation_object_fini);
@@ -132,7 +166,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 		max = 4;
 	}
 
-	new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+	new = reservation_object_list_alloc(max);
 	if (!new)
 		return -ENOMEM;
 
@@ -153,9 +187,6 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 			RCU_INIT_POINTER(new->shared[j++], fence);
 	}
 	new->shared_count = j;
-	new->shared_max =
-		(ksize(new) - offsetof(typeof(*new), shared)) /
-		sizeof(*new->shared);
 
 	/*
 	 * We are not changing the effective set of fences here so can
@@ -286,7 +317,6 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 {
 	struct reservation_object_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	size_t size;
 	unsigned i;
 
 	reservation_object_assert_held(dst);
@@ -298,10 +328,9 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	if (src_list) {
 		unsigned shared_count = src_list->shared_count;
 
-		size = offsetof(typeof(*src_list), shared[shared_count]);
 		rcu_read_unlock();
 
-		dst_list = kmalloc(size, GFP_KERNEL);
+		dst_list = reservation_object_list_alloc(shared_count);
 		if (!dst_list)
 			return -ENOMEM;
 
@@ -313,7 +342,6 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 		}
 
 		dst_list->shared_count = 0;
-		dst_list->shared_max = shared_count;
 		for (i = 0; i < src_list->shared_count; ++i) {
 			struct dma_fence *fence;
 
@@ -323,7 +351,7 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 				continue;
 
 			if (!dma_fence_get_rcu(fence)) {
-				kfree(dst_list);
+				reservation_object_list_free(dst_list);
 				src_list = rcu_dereference(src->fence);
 				goto retry;
 			}
@@ -353,8 +381,7 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	write_seqcount_end(&dst->seq);
 	preempt_enable();
 
-	if (src_list)
-		kfree_rcu(src_list, rcu);
+	reservation_object_list_free(src_list);
 	dma_fence_put(old);
 
 	return 0;
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
  2019-08-06 15:01 ` [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:07   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 4/8] drm/i915: use new reservation_object_fences helper Christian König
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

After waiting for a reservation object use reservation_object_test_signaled_rcu
to opportunistically prune the fences on the object.

This allows removal of the seqcount handling in the reservation object.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_wait.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 26ec6579b7cd..fa46a54bcbe7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -35,7 +35,6 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 				 unsigned int flags,
 				 long timeout)
 {
-	unsigned int seq = __read_seqcount_begin(&resv->seq);
 	struct dma_fence *excl;
 	bool prune_fences = false;
 
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 
 	/*
 	 * Opportunistically prune the fences iff we know they have *all* been
-	 * signaled and that the reservation object has not been changed (i.e.
-	 * no new fences have been added).
+	 * signaled.
 	 */
-	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
-		if (reservation_object_trylock(resv)) {
-			if (!__read_seqcount_retry(&resv->seq, seq))
-				reservation_object_add_excl_fence(resv, NULL);
-			reservation_object_unlock(resv);
-		}
+	if (prune_fences && reservation_object_trylock(resv)) {
+		if (reservation_object_test_signaled_rcu(resv, true))
+			reservation_object_add_excl_fence(resv, NULL);
+		reservation_object_unlock(resv);
 	}
 
 	return timeout;
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 4/8] drm/i915: use new reservation_object_fences helper
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
  2019-08-06 15:01 ` [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences Christian König
  2019-08-06 15:01 ` [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:09   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence Christian König
                   ` (5 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

Instead of open coding the sequence loop use the new helper.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_busy.c | 12 +++---------
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..8da0594eda88 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -83,7 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
 	struct reservation_object_list *list;
-	unsigned int seq;
+	struct dma_fence *excl;
 	int err;
 
 	err = -ENOENT;
@@ -109,15 +109,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-retry:
-	seq = raw_read_seqcount(&obj->base.resv->seq);
+	reservation_object_fences(obj->base.resv, &excl, &list);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy =
-		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+	args->busy = busy_check_writer(excl);
 
 	/* Translate shared fences to READ set of engines */
-	list = rcu_dereference(obj->base.resv->fence);
 	if (list) {
 		unsigned int shared_count = list->shared_count, i;
 
@@ -129,9 +126,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		}
 	}
 
-	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
-		goto retry;
-
 	err = 0;
 out:
 	rcu_read_unlock();
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (2 preceding siblings ...)
  2019-08-06 15:01 ` [PATCH 4/8] drm/i915: use new reservation_object_fences helper Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:25   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit Christian König
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

Other cores don't busy wait any more and we removed the last user of checking
the seqno for changes. Drop updating the number for shared fences altogether.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/reservation.c                    | 6 ------
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 +------
 2 files changed, 1 insertion(+), 12 deletions(-)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0ba05936ab6..944d962ddddf 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -237,9 +237,6 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 	fobj = reservation_object_get_list(obj);
 	count = fobj->shared_count;
 
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
-
 	for (i = 0; i < count; ++i) {
 
 		old = rcu_dereference_protected(fobj->shared[i],
@@ -257,9 +254,6 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 	RCU_INIT_POINTER(fobj->shared[i], fence);
 	/* pointer update must be visible before we extend the shared_count */
 	smp_store_mb(fobj->shared_count, count);
-
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
 	dma_fence_put(old);
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fe062b76ec91..a4640ddc24d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -251,12 +251,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	new->shared_max = old->shared_max;
 	new->shared_count = k;
 
-	/* Install the new fence list, seqcount provides the barriers */
-	preempt_disable();
-	write_seqcount_begin(&resv->seq);
-	RCU_INIT_POINTER(resv->fence, new);
-	write_seqcount_end(&resv->seq);
-	preempt_enable();
+	rcu_assign_pointer(resv->fence, new);
 
 	/* Drop the references to the removed fences or move them to ef_list */
 	for (i = j, k = 0; i < old->shared_count; ++i) {
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (3 preceding siblings ...)
  2019-08-06 15:01 ` [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:11   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 7/8] dma-buf: add reservation_object_fences helper Christian König
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

We can add the exclusive fence to the list after making sure we got
a consistent state.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/reservation.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 944d962ddddf..7505eb289023 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -453,13 +453,6 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				if (!dma_fence_get_rcu(shared[i]))
 					break;
 			}
-
-			if (!pfence_excl && fence_excl) {
-				shared[i] = fence_excl;
-				fence_excl = NULL;
-				++i;
-				++shared_count;
-			}
 		}
 
 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
@@ -474,6 +467,11 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 		rcu_read_unlock();
 	} while (ret);
 
+	if (pfence_excl)
+		*pfence_excl = fence_excl;
+	else if (fence_excl)
+		shared[++shared_count] = fence_excl;
+
 	if (!shared_count) {
 		kfree(shared);
 		shared = NULL;
@@ -481,9 +479,6 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 
 	*pshared_count = shared_count;
 	*pshared = shared;
-	if (pfence_excl)
-		*pfence_excl = fence_excl;
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 7/8] dma-buf: add reservation_object_fences helper
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (4 preceding siblings ...)
  2019-08-06 15:01 ` [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:24   ` Chris Wilson
  2019-08-06 15:01 ` [PATCH 8/8] dma-buf: nuke reservation_object seq number Christian König
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

Add a new helper to get a consistent set of pointers from the reservation
object. While at it group all access helpers together in the header file.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c     |  27 ++------
 drivers/dma-buf/reservation.c |  60 ++++++------------
 include/linux/reservation.h   | 113 ++++++++++++++++++++--------------
 3 files changed, 94 insertions(+), 106 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f45bfb29ef96..0d11b3cc961e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	struct reservation_object_list *fobj;
 	struct dma_fence *fence_excl;
 	__poll_t events;
-	unsigned shared_count, seq;
+	unsigned shared_count;
 
 	dmabuf = file->private_data;
 	if (!dmabuf || !dmabuf->resv)
@@ -213,20 +213,12 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	if (!events)
 		return 0;
 
-retry:
-	seq = read_seqcount_begin(&resv->seq);
 	rcu_read_lock();
-
-	fobj = rcu_dereference(resv->fence);
+	reservation_object_fences(resv, &fence_excl, &fobj);
 	if (fobj)
 		shared_count = fobj->shared_count;
 	else
 		shared_count = 0;
-	fence_excl = rcu_dereference(resv->fence_excl);
-	if (read_seqcount_retry(&resv->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
 
 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
@@ -1157,7 +1149,6 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 	struct reservation_object *robj;
 	struct reservation_object_list *fobj;
 	struct dma_fence *fence;
-	unsigned seq;
 	int count = 0, attach_count, shared_count, i;
 	size_t size = 0;
 
@@ -1188,16 +1179,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
-		while (true) {
-			seq = read_seqcount_begin(&robj->seq);
-			rcu_read_lock();
-			fobj = rcu_dereference(robj->fence);
-			shared_count = fobj ? fobj->shared_count : 0;
-			fence = rcu_dereference(robj->fence_excl);
-			if (!read_seqcount_retry(&robj->seq, seq))
-				break;
-			rcu_read_unlock();
-		}
+		rcu_read_lock();
+		reservation_object_fences(robj, &fence, &fobj);
+		shared_count = fobj ? fobj->shared_count : 0;
+		rcu_read_unlock();
 
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 7505eb289023..839d72af7ad8 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -316,9 +316,9 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	reservation_object_assert_held(dst);
 
 	rcu_read_lock();
-	src_list = rcu_dereference(src->fence);
 
 retry:
+	reservation_object_fences(src, &new, &src_list);
 	if (src_list) {
 		unsigned shared_count = src_list->shared_count;
 
@@ -329,7 +329,7 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 			return -ENOMEM;
 
 		rcu_read_lock();
-		src_list = rcu_dereference(src->fence);
+		reservation_object_fences(src, &new, &src_list);
 		if (!src_list || src_list->shared_count > shared_count) {
 			kfree(dst_list);
 			goto retry;
@@ -346,7 +346,6 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 
 			if (!dma_fence_get_rcu(fence)) {
 				reservation_object_list_free(dst_list);
-				src_list = rcu_dereference(src->fence);
 				goto retry;
 			}
 
@@ -361,7 +360,10 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 		dst_list = NULL;
 	}
 
-	new = dma_fence_get_rcu_safe(&src->fence_excl);
+	if (new && !dma_fence_get_rcu(new)) {
+		reservation_object_list_free(dst_list);
+		goto retry;
+	}
 	rcu_read_unlock();
 
 	src_list = reservation_object_get_list(dst);
@@ -407,19 +409,17 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 
 	do {
 		struct reservation_object_list *fobj;
-		unsigned int i, seq;
+		unsigned int i;
 		size_t sz = 0;
 
 		shared_count = i = 0;
 
 		rcu_read_lock();
-		seq = read_seqcount_begin(&obj->seq);
+		reservation_object_fences(obj, &fence_excl, &fobj);
 
-		fence_excl = rcu_dereference(obj->fence_excl);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
-		fobj = rcu_dereference(obj->fence);
 		if (fobj)
 			sz += sizeof(*shared) * fobj->shared_max;
 
@@ -455,7 +455,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 			}
 		}
 
-		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+		if (i != shared_count) {
 			while (i--)
 				dma_fence_put(shared[i]);
 			dma_fence_put(fence_excl);
@@ -499,18 +499,18 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
 					 unsigned long timeout)
 {
+	struct reservation_object_list *fobj;
 	struct dma_fence *fence;
-	unsigned seq, shared_count;
+	unsigned shared_count;
 	long ret = timeout ? timeout : 1;
 	int i;
 
 retry:
 	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
 	rcu_read_lock();
 	i = -1;
 
-	fence = rcu_dereference(obj->fence_excl);
+	reservation_object_fences(obj, &fence, &fobj);
 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		if (!dma_fence_get_rcu(fence))
 			goto unlock_retry;
@@ -525,9 +525,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 	}
 
 	if (wait_all) {
-		struct reservation_object_list *fobj =
-						rcu_dereference(obj->fence);
-
 		if (fobj)
 			shared_count = fobj->shared_count;
 
@@ -553,11 +550,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 
 	rcu_read_unlock();
 	if (fence) {
-		if (read_seqcount_retry(&obj->seq, seq)) {
-			dma_fence_put(fence);
-			goto retry;
-		}
-
 		ret = dma_fence_wait_timeout(fence, intr, ret);
 		dma_fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -602,21 +594,20 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 					  bool test_all)
 {
-	unsigned seq, shared_count;
+	struct reservation_object_list *fobj;
+	struct dma_fence *fence_excl;
+	unsigned shared_count;
 	int ret;
 
 	rcu_read_lock();
 retry:
 	ret = true;
 	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
 
+	reservation_object_fences(obj, &fence_excl, &fobj);
 	if (test_all) {
 		unsigned i;
 
-		struct reservation_object_list *fobj =
-						rcu_dereference(obj->fence);
-
 		if (fobj)
 			shared_count = fobj->shared_count;
 
@@ -629,23 +620,12 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 			else if (!ret)
 				break;
 		}
-
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto retry;
 	}
 
-	if (!shared_count) {
-		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
-
-		if (fence_excl) {
-			ret = reservation_object_test_signaled_single(
-								fence_excl);
-			if (ret < 0)
-				goto retry;
-
-			if (read_seqcount_retry(&obj->seq, seq))
-				goto retry;
-		}
+	if (!shared_count && fence_excl) {
+		ret = reservation_object_test_signaled_single(fence_excl);
+		if (ret < 0)
+			goto retry;
 	}
 
 	rcu_read_unlock();
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 56b782fec49b..b8b8273eef00 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -81,6 +81,51 @@ struct reservation_object {
 #define reservation_object_assert_held(obj) \
 	lockdep_assert_held(&(obj)->lock.base)
 
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+reservation_object_get_excl(struct reservation_object *obj)
+{
+	return rcu_dereference_protected(obj->fence_excl,
+					 reservation_object_held(obj));
+}
+
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *
+reservation_object_get_excl_rcu(struct reservation_object *obj)
+{
+	struct dma_fence *fence;
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
+	rcu_read_lock();
+	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+	rcu_read_unlock();
+
+	return fence;
+}
+
 /**
  * reservation_object_get_list - get the reservation object's
  * shared fence list, with update-side lock held
@@ -96,6 +141,29 @@ reservation_object_get_list(struct reservation_object *obj)
 					 reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_fences - read consistent fence pointers
+ * @obj: reservation object where we get the fences from
+ * @excl: pointer for the exclusive fence
+ * @list: pointer for the shared fence list
+ *
+ * Make sure we have a consisten exclusive fence and shared fence list.
+ * Must be called with rcu read side lock held.
+ */
+static inline void
+reservation_object_fences(struct reservation_object *obj,
+			  struct dma_fence **excl,
+			  struct reservation_object_list **list)
+{
+	unsigned int seq;
+
+	do {
+		seq = read_seqcount_begin(&obj->seq);
+		*excl = rcu_dereference(obj->fence_excl);
+		*list = rcu_dereference(obj->fence);
+	} while (read_seqcount_retry(&obj->seq, seq));
+}
+
 /**
  * reservation_object_lock - lock the reservation object
  * @obj: the reservation object
@@ -239,51 +307,6 @@ reservation_object_unlock(struct reservation_object *obj)
 	ww_mutex_unlock(&obj->lock);
 }
 
-/**
- * reservation_object_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
-{
-	return rcu_dereference_protected(obj->fence_excl,
-					 reservation_object_held(obj));
-}
-
-/**
- * reservation_object_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
 void reservation_object_init(struct reservation_object *obj);
 void reservation_object_fini(struct reservation_object *obj);
 int reservation_object_reserve_shared(struct reservation_object *obj,
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 8/8] dma-buf: nuke reservation_object seq number
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (5 preceding siblings ...)
  2019-08-06 15:01 ` [PATCH 7/8] dma-buf: add reservation_object_fences helper Christian König
@ 2019-08-06 15:01 ` Christian König
  2019-08-06 19:57   ` Chris Wilson
  2019-08-06 18:57 ` [PATCH 1/8] dma-buf: fix busy wait for new shared fences Chris Wilson
  2019-08-09 13:27 ` ✗ Fi.CI.BAT: failure for series starting with [1/8] " Patchwork
  8 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-06 15:01 UTC (permalink / raw)
  To: intel-gfx, linaro-mm-sig, dri-devel, chris

The only remaining use for this is to protect against setting a new exclusive
fence while we grab both exclusive and shared. That can also be archived by
looking if the exclusive fence has changed or not after completing the
operation.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/reservation.c | 20 +++-----------------
 include/linux/reservation.h   |  9 ++-------
 2 files changed, 5 insertions(+), 24 deletions(-)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 839d72af7ad8..43549a4d6658 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -49,12 +49,6 @@
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
-struct lock_class_key reservation_seqcount_class;
-EXPORT_SYMBOL(reservation_seqcount_class);
-
-const char reservation_seqcount_string[] = "reservation_seqcount";
-EXPORT_SYMBOL(reservation_seqcount_string);
-
 /**
  * reservation_object_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
@@ -103,9 +97,6 @@ static void reservation_object_list_free(struct reservation_object_list *list)
 void reservation_object_init(struct reservation_object *obj)
 {
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
-
-	__seqcount_init(&obj->seq, reservation_seqcount_string,
-			&reservation_seqcount_class);
 	RCU_INIT_POINTER(obj->fence, NULL);
 	RCU_INIT_POINTER(obj->fence_excl, NULL);
 }
@@ -282,12 +273,10 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 		dma_fence_get(fence);
 
 	preempt_disable();
-	write_seqcount_begin(&obj->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
 	RCU_INIT_POINTER(obj->fence_excl, fence);
+	/* pointer update must be visible before we modify the shared_count */
 	if (old)
-		old->shared_count = 0;
-	write_seqcount_end(&obj->seq);
+		smp_store_mb(old->shared_count, 0);
 	preempt_enable();
 
 	/* inplace update, no shared fences */
@@ -370,11 +359,8 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	old = reservation_object_get_excl(dst);
 
 	preempt_disable();
-	write_seqcount_begin(&dst->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
 	RCU_INIT_POINTER(dst->fence_excl, new);
-	RCU_INIT_POINTER(dst->fence, dst_list);
-	write_seqcount_end(&dst->seq);
+	rcu_assign_pointer(dst->fence, dst_list);
 	preempt_enable();
 
 	reservation_object_list_free(src_list);
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b8b8273eef00..1dfaf7b1f1da 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -46,8 +46,6 @@
 #include <linux/rcupdate.h>
 
 extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
 
 /**
  * struct reservation_object_list - a list of shared fences
@@ -71,7 +69,6 @@ struct reservation_object_list {
  */
 struct reservation_object {
 	struct ww_mutex lock;
-	seqcount_t seq;
 
 	struct dma_fence __rcu *fence_excl;
 	struct reservation_object_list __rcu *fence;
@@ -155,13 +152,11 @@ reservation_object_fences(struct reservation_object *obj,
 			  struct dma_fence **excl,
 			  struct reservation_object_list **list)
 {
-	unsigned int seq;
-
 	do {
-		seq = read_seqcount_begin(&obj->seq);
 		*excl = rcu_dereference(obj->fence_excl);
 		*list = rcu_dereference(obj->fence);
-	} while (read_seqcount_retry(&obj->seq, seq));
+		smp_rmb(); /* See reservation_object_add_excl_fence */
+	} while (rcu_access_pointer(obj->fence_excl) != *excl);
 }
 
 /**
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/8] dma-buf: fix busy wait for new shared fences
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (6 preceding siblings ...)
  2019-08-06 15:01 ` [PATCH 8/8] dma-buf: nuke reservation_object seq number Christian König
@ 2019-08-06 18:57 ` Chris Wilson
  2019-08-09 13:27 ` ✗ Fi.CI.BAT: failure for series starting with [1/8] " Patchwork
  8 siblings, 0 replies; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 18:57 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:27)
> When reservation_object_add_shared_fence is replacing an old fence with a new
> one we should not drop the old one before the new one is in place.
> 
> Otherwise other cores can busy wait for the new one to appear.

I see. The reader will see a refcount==0 fence and restart, whereas by
dropping the ref later, that reader has a better chance of getting to
the end before noticing the change.

> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences
  2019-08-06 15:01 ` [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences Christian König
@ 2019-08-06 19:06   ` Chris Wilson
  2019-08-07 10:43     ` Christian König
  0 siblings, 1 reply; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:06 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:28)
> Add some helpers to correctly allocate/free reservation_object_lists.
> 
> Otherwise we might forget to drop dma_fence references on list destruction.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/reservation.c | 65 +++++++++++++++++++++++++----------
>  1 file changed, 46 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
> index d59207ca72d2..c0ba05936ab6 100644
> --- a/drivers/dma-buf/reservation.c
> +++ b/drivers/dma-buf/reservation.c
> @@ -55,6 +55,47 @@ EXPORT_SYMBOL(reservation_seqcount_class);
>  const char reservation_seqcount_string[] = "reservation_seqcount";
>  EXPORT_SYMBOL(reservation_seqcount_string);
>  
> +/**
> + * reservation_object_list_alloc - allocate fence list
> + * @shared_max: number of fences we need space for
> + *
> + * Allocate a new reservation_object_list and make sure to correctly initialize
> + * shared_max.
> + */
> +static struct reservation_object_list *
> +reservation_object_list_alloc(unsigned int shared_max)
> +{
> +       struct reservation_object_list *list;
> +
> +       list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
> +       if (!list)
> +               return NULL;
> +
> +       list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
> +               sizeof(*list->shared);
> +
> +       return list;
> +}
> +
> +/**
> + * reservation_object_list_free - free fence list
> + * @list: list to free
> + *
> + * Free a reservation_object_list and make sure to drop all references.
> + */
> +static void reservation_object_list_free(struct reservation_object_list *list)
> +{
> +       unsigned int i;
> +
> +       if (!list)
> +               return;
> +
> +       for (i = 0; i < list->shared_count; ++i)
> +               dma_fence_put(rcu_dereference_protected(list->shared[i], true));
> +
> +       kfree_rcu(list, rcu);

So 2 out of 3 paths don't need another RCU grace period before freeing.
Actually, that lack of RCU inside reservation_object_fini has caught me
by surprise before. Not sure if that's worth treating as anything other
than my own bug... But if we accept it is worth preventing here then the
only odd one out is on a reservation_object_copy_fences() error path,
where the extra delay shouldn't be an issue.

So to double-RCU defer on reservation_object_fini() or not?

For the rest of the mechanical changes,
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning
  2019-08-06 15:01 ` [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning Christian König
@ 2019-08-06 19:07   ` Chris Wilson
  0 siblings, 0 replies; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:07 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:29)
> After waiting for a reservation object use reservation_object_test_signaled_rcu
> to opportunistically prune the fences on the object.
> 
> This allows removal of the seqcount handling in the reservation object.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

I like keeping the reminder about the lack of pruning on idle objects :)
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 4/8] drm/i915: use new reservation_object_fences helper
  2019-08-06 15:01 ` [PATCH 4/8] drm/i915: use new reservation_object_fences helper Christian König
@ 2019-08-06 19:09   ` Chris Wilson
  0 siblings, 0 replies; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:09 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:30)
> Instead of open coding the sequence loop use the new helper.

I've missed something. What reservation_object_fences()?
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit
  2019-08-06 15:01 ` [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit Christian König
@ 2019-08-06 19:11   ` Chris Wilson
  0 siblings, 0 replies; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:11 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:32)
> We can add the exclusive fence to the list after making sure we got
> a consistent state.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] dma-buf: add reservation_object_fences helper
  2019-08-06 15:01 ` [PATCH 7/8] dma-buf: add reservation_object_fences helper Christian König
@ 2019-08-06 19:24   ` Chris Wilson
  2019-08-07  9:06     ` Christian König
  0 siblings, 1 reply; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:24 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:33)
> Add a new helper to get a consistent set of pointers from the reservation
> object. While at it group all access helpers together in the header file.

Ah, needs to be earlier :)
 
> +/**
> + * reservation_object_fences - read consistent fence pointers
> + * @obj: reservation object where we get the fences from
> + * @excl: pointer for the exclusive fence
> + * @list: pointer for the shared fence list
> + *
> + * Make sure we have a consisten exclusive fence and shared fence list.
> + * Must be called with rcu read side lock held.
> + */
> +static inline void
> +reservation_object_fences(struct reservation_object *obj,
> +                         struct dma_fence **excl,
> +                         struct reservation_object_list **list)
> +{
> +       unsigned int seq;
> +
> +       do {
> +               seq = read_seqcount_begin(&obj->seq);
> +               *excl = rcu_dereference(obj->fence_excl);
> +               *list = rcu_dereference(obj->fence);
> +       } while (read_seqcount_retry(&obj->seq, seq));
> +}

I would personally prefer return excl rather than have it as a second
outparam, but I'd leave that to gcc to decide.

Having stared at this, I agree this does the right thing. The important
point from all callers' perspective is that the combination of pointers
is consistent for this rcu_read_lock. And rcu_dereference enforces the
callers do hold rcu_read_lock.

I didn't check all the conversions, just stared at the heart of the
problem.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence
  2019-08-06 15:01 ` [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence Christian König
@ 2019-08-06 19:25   ` Chris Wilson
  0 siblings, 0 replies; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:25 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:31)
> Other cores don't busy wait any more and we removed the last user of checking
> the seqno for changes. Drop updating the number for shared fences altogether.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/reservation.c                    | 6 ------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 +------
>  2 files changed, 1 insertion(+), 12 deletions(-)
> 
> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
> index c0ba05936ab6..944d962ddddf 100644
> --- a/drivers/dma-buf/reservation.c
> +++ b/drivers/dma-buf/reservation.c
> @@ -237,9 +237,6 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
>         fobj = reservation_object_get_list(obj);
>         count = fobj->shared_count;
>  
> -       preempt_disable();
> -       write_seqcount_begin(&obj->seq);
> -
>         for (i = 0; i < count; ++i) {
>  
>                 old = rcu_dereference_protected(fobj->shared[i],
> @@ -257,9 +254,6 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
>         RCU_INIT_POINTER(fobj->shared[i], fence);
>         /* pointer update must be visible before we extend the shared_count */
>         smp_store_mb(fobj->shared_count, count);

Yup, that's all the mb rules we need to apply for the rcu readers to see
a consistent view.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 8/8] dma-buf: nuke reservation_object seq number
  2019-08-06 15:01 ` [PATCH 8/8] dma-buf: nuke reservation_object seq number Christian König
@ 2019-08-06 19:57   ` Chris Wilson
  2019-08-07 12:08     ` Christian König
  0 siblings, 1 reply; 22+ messages in thread
From: Chris Wilson @ 2019-08-06 19:57 UTC (permalink / raw)
  To: Christian König, dri-devel, intel-gfx, linaro-mm-sig

Quoting Christian König (2019-08-06 16:01:34)
> The only remaining use for this is to protect against setting a new exclusive
> fence while we grab both exclusive and shared. That can also be archived by
> looking if the exclusive fence has changed or not after completing the
> operation.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/reservation.c | 20 +++-----------------
>  include/linux/reservation.h   |  9 ++-------
>  2 files changed, 5 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
> index 839d72af7ad8..43549a4d6658 100644
> --- a/drivers/dma-buf/reservation.c
> +++ b/drivers/dma-buf/reservation.c
> @@ -49,12 +49,6 @@
>  DEFINE_WD_CLASS(reservation_ww_class);
>  EXPORT_SYMBOL(reservation_ww_class);
>  
> -struct lock_class_key reservation_seqcount_class;
> -EXPORT_SYMBOL(reservation_seqcount_class);
> -
> -const char reservation_seqcount_string[] = "reservation_seqcount";
> -EXPORT_SYMBOL(reservation_seqcount_string);
> -
>  /**
>   * reservation_object_list_alloc - allocate fence list
>   * @shared_max: number of fences we need space for
> @@ -103,9 +97,6 @@ static void reservation_object_list_free(struct reservation_object_list *list)
>  void reservation_object_init(struct reservation_object *obj)
>  {
>         ww_mutex_init(&obj->lock, &reservation_ww_class);
> -
> -       __seqcount_init(&obj->seq, reservation_seqcount_string,
> -                       &reservation_seqcount_class);
>         RCU_INIT_POINTER(obj->fence, NULL);
>         RCU_INIT_POINTER(obj->fence_excl, NULL);
>  }
> @@ -282,12 +273,10 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
>                 dma_fence_get(fence);
>  
>         preempt_disable();
> -       write_seqcount_begin(&obj->seq);
> -       /* write_seqcount_begin provides the necessary memory barrier */
>         RCU_INIT_POINTER(obj->fence_excl, fence);

I think, now has to be rcu_assign_pointer.

 * Initialize an RCU-protected pointer in special cases where readers
 * do not need ordering constraints on the CPU or the compiler.  These
 * special cases are:
 *
 * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
 * 2.   The caller has taken whatever steps are required to prevent
 *      RCU readers from concurrently accessing this pointer *or*
 * 3.   The referenced data structure has already been exposed to
 *      readers either at compile time or via rcu_assign_pointer() *and*
 *
 *      a.      You have not made *any* reader-visible changes to
 *              this structure since then *or*
 *      b.      It is OK for readers accessing this structure from its
 *              new location to see the old state of the structure.  (For
 *              example, the changes were to statistical counters or to
 *              other state where exact synchronization is not required.)

We used to apply 2 from the seqcount, and 3 does not apply here.

> +       /* pointer update must be visible before we modify the shared_count */
>         if (old)
> -               old->shared_count = 0;
> -       write_seqcount_end(&obj->seq);
> +               smp_store_mb(old->shared_count, 0);

Hmm. Ok, I think.

>  {
> -       unsigned int seq;
> -
>         do {
> -               seq = read_seqcount_begin(&obj->seq);
>                 *excl = rcu_dereference(obj->fence_excl);
>                 *list = rcu_dereference(obj->fence);
> -       } while (read_seqcount_retry(&obj->seq, seq));
> +               smp_rmb(); /* See reservation_object_add_excl_fence */
> +       } while (rcu_access_pointer(obj->fence_excl) != *excl);
>  }

So, if we are add_excl_fence and cancelling a shared-list, before we
return we guarantee that the shared-list is set to zero if excl is set
as we read.

If we add to shared-list during the read, ... Hmm, actually we should
return num_list, i.e.

do {
	*list = rcu_dereference(obj->fence);
	num_list = *list ? (*list)->count : 0;
	smp_rmb();
} while (...)

return num_list.

as the relationship between the count and the fence entries is also
determined by the mb in add_shared_fence.

Oops, that was an oversight in the previous review.

>         preempt_enable();
>  
>         /* inplace update, no shared fences */
> @@ -370,11 +359,8 @@ int reservation_object_copy_fences(struct reservation_object *dst,
>         old = reservation_object_get_excl(dst);
>  
>         preempt_disable();
> -       write_seqcount_begin(&dst->seq);
> -       /* write_seqcount_begin provides the necessary memory barrier */
>         RCU_INIT_POINTER(dst->fence_excl, new);

rcu_assign_pointer again I believe.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] dma-buf: add reservation_object_fences helper
  2019-08-06 19:24   ` Chris Wilson
@ 2019-08-07  9:06     ` Christian König
  0 siblings, 0 replies; 22+ messages in thread
From: Christian König @ 2019-08-07  9:06 UTC (permalink / raw)
  To: Chris Wilson, dri-devel, intel-gfx, linaro-mm-sig

Am 06.08.19 um 21:24 schrieb Chris Wilson:
> Quoting Christian König (2019-08-06 16:01:33)
>> Add a new helper to get a consistent set of pointers from the reservation
>> object. While at it group all access helpers together in the header file.
> Ah, needs to be earlier :)

Ah, crap. That got incorrectly reordered while moving the fixes to the 
beginning of the set.

>   
>> +/**
>> + * reservation_object_fences - read consistent fence pointers
>> + * @obj: reservation object where we get the fences from
>> + * @excl: pointer for the exclusive fence
>> + * @list: pointer for the shared fence list
>> + *
>> + * Make sure we have a consisten exclusive fence and shared fence list.
>> + * Must be called with rcu read side lock held.
>> + */
>> +static inline void
>> +reservation_object_fences(struct reservation_object *obj,
>> +                         struct dma_fence **excl,
>> +                         struct reservation_object_list **list)
>> +{
>> +       unsigned int seq;
>> +
>> +       do {
>> +               seq = read_seqcount_begin(&obj->seq);
>> +               *excl = rcu_dereference(obj->fence_excl);
>> +               *list = rcu_dereference(obj->fence);
>> +       } while (read_seqcount_retry(&obj->seq, seq));
>> +}
> I would personally prefer return excl rather than have it as a second
> outparam, but I'd leave that to gcc to decide.
>
> Having stared at this, I agree this does the right thing. The important
> point from all callers' perspective is that the combination of pointers
> is consistent for this rcu_read_lock. And rcu_dereference enforces the
> callers do hold rcu_read_lock.
>
> I didn't check all the conversions, just stared at the heart of the
> problem.
>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

Thanks.

Going to fix that up,
Christian.

> -Chris

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences
  2019-08-06 19:06   ` Chris Wilson
@ 2019-08-07 10:43     ` Christian König
  0 siblings, 0 replies; 22+ messages in thread
From: Christian König @ 2019-08-07 10:43 UTC (permalink / raw)
  To: Chris Wilson, dri-devel, intel-gfx, linaro-mm-sig

Am 06.08.19 um 21:06 schrieb Chris Wilson:
> Quoting Christian König (2019-08-06 16:01:28)
>> Add some helpers to correctly allocate/free reservation_object_lists.
>>
>> Otherwise we might forget to drop dma_fence references on list destruction.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/reservation.c | 65 +++++++++++++++++++++++++----------
>>   1 file changed, 46 insertions(+), 19 deletions(-)
>>
>> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
>> index d59207ca72d2..c0ba05936ab6 100644
>> --- a/drivers/dma-buf/reservation.c
>> +++ b/drivers/dma-buf/reservation.c
>> @@ -55,6 +55,47 @@ EXPORT_SYMBOL(reservation_seqcount_class);
>>   const char reservation_seqcount_string[] = "reservation_seqcount";
>>   EXPORT_SYMBOL(reservation_seqcount_string);
>>   
>> +/**
>> + * reservation_object_list_alloc - allocate fence list
>> + * @shared_max: number of fences we need space for
>> + *
>> + * Allocate a new reservation_object_list and make sure to correctly initialize
>> + * shared_max.
>> + */
>> +static struct reservation_object_list *
>> +reservation_object_list_alloc(unsigned int shared_max)
>> +{
>> +       struct reservation_object_list *list;
>> +
>> +       list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
>> +       if (!list)
>> +               return NULL;
>> +
>> +       list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
>> +               sizeof(*list->shared);
>> +
>> +       return list;
>> +}
>> +
>> +/**
>> + * reservation_object_list_free - free fence list
>> + * @list: list to free
>> + *
>> + * Free a reservation_object_list and make sure to drop all references.
>> + */
>> +static void reservation_object_list_free(struct reservation_object_list *list)
>> +{
>> +       unsigned int i;
>> +
>> +       if (!list)
>> +               return;
>> +
>> +       for (i = 0; i < list->shared_count; ++i)
>> +               dma_fence_put(rcu_dereference_protected(list->shared[i], true));
>> +
>> +       kfree_rcu(list, rcu);
> So 2 out of 3 paths don't need another RCU grace period before freeing.
> Actually, that lack of RCU inside reservation_object_fini has caught me
> by surprise before. Not sure if that's worth treating as anything other
> than my own bug... But if we accept it is worth preventing here then the
> only odd one out is on a reservation_object_copy_fences() error path,
> where the extra delay shouldn't be an issue.
>
> So to double-RCU defer on reservation_object_fini() or not?

Yeah, I think in the _fini path using kfree might be legal because 
nobody else should have an extra reference to the object.

But the key point is I don't think an extra grace period would hurt us 
in any way,
Christian.

>
> For the rest of the mechanical changes,
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> -Chris

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 8/8] dma-buf: nuke reservation_object seq number
  2019-08-06 19:57   ` Chris Wilson
@ 2019-08-07 12:08     ` Christian König
  2019-08-07 12:19       ` Chris Wilson
  0 siblings, 1 reply; 22+ messages in thread
From: Christian König @ 2019-08-07 12:08 UTC (permalink / raw)
  To: Chris Wilson, dri-devel, intel-gfx, linaro-mm-sig

Am 06.08.19 um 21:57 schrieb Chris Wilson:
> Quoting Christian König (2019-08-06 16:01:34)
>> The only remaining use for this is to protect against setting a new exclusive
>> fence while we grab both exclusive and shared. That can also be archived by
>> looking if the exclusive fence has changed or not after completing the
>> operation.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/dma-buf/reservation.c | 20 +++-----------------
>>   include/linux/reservation.h   |  9 ++-------
>>   2 files changed, 5 insertions(+), 24 deletions(-)
>>
>> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
>> index 839d72af7ad8..43549a4d6658 100644
>> --- a/drivers/dma-buf/reservation.c
>> +++ b/drivers/dma-buf/reservation.c
>> @@ -49,12 +49,6 @@
>>   DEFINE_WD_CLASS(reservation_ww_class);
>>   EXPORT_SYMBOL(reservation_ww_class);
>>   
>> -struct lock_class_key reservation_seqcount_class;
>> -EXPORT_SYMBOL(reservation_seqcount_class);
>> -
>> -const char reservation_seqcount_string[] = "reservation_seqcount";
>> -EXPORT_SYMBOL(reservation_seqcount_string);
>> -
>>   /**
>>    * reservation_object_list_alloc - allocate fence list
>>    * @shared_max: number of fences we need space for
>> @@ -103,9 +97,6 @@ static void reservation_object_list_free(struct reservation_object_list *list)
>>   void reservation_object_init(struct reservation_object *obj)
>>   {
>>          ww_mutex_init(&obj->lock, &reservation_ww_class);
>> -
>> -       __seqcount_init(&obj->seq, reservation_seqcount_string,
>> -                       &reservation_seqcount_class);
>>          RCU_INIT_POINTER(obj->fence, NULL);
>>          RCU_INIT_POINTER(obj->fence_excl, NULL);
>>   }
>> @@ -282,12 +273,10 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
>>                  dma_fence_get(fence);
>>   
>>          preempt_disable();
>> -       write_seqcount_begin(&obj->seq);
>> -       /* write_seqcount_begin provides the necessary memory barrier */
>>          RCU_INIT_POINTER(obj->fence_excl, fence);
> I think, now has to be rcu_assign_pointer.
>
>   * Initialize an RCU-protected pointer in special cases where readers
>   * do not need ordering constraints on the CPU or the compiler.  These
>   * special cases are:
>   *
>   * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
>   * 2.   The caller has taken whatever steps are required to prevent
>   *      RCU readers from concurrently accessing this pointer *or*
>   * 3.   The referenced data structure has already been exposed to
>   *      readers either at compile time or via rcu_assign_pointer() *and*
>   *
>   *      a.      You have not made *any* reader-visible changes to
>   *              this structure since then *or*
>   *      b.      It is OK for readers accessing this structure from its
>   *              new location to see the old state of the structure.  (For
>   *              example, the changes were to statistical counters or to
>   *              other state where exact synchronization is not required.)
>
> We used to apply 2 from the seqcount, and 3 does not apply here.
>
>> +       /* pointer update must be visible before we modify the shared_count */
>>          if (old)
>> -               old->shared_count = 0;
>> -       write_seqcount_end(&obj->seq);
>> +               smp_store_mb(old->shared_count, 0);
> Hmm. Ok, I think.
>
>>   {
>> -       unsigned int seq;
>> -
>>          do {
>> -               seq = read_seqcount_begin(&obj->seq);
>>                  *excl = rcu_dereference(obj->fence_excl);
>>                  *list = rcu_dereference(obj->fence);
>> -       } while (read_seqcount_retry(&obj->seq, seq));
>> +               smp_rmb(); /* See reservation_object_add_excl_fence */
>> +       } while (rcu_access_pointer(obj->fence_excl) != *excl);
>>   }
> So, if we are add_excl_fence and cancelling a shared-list, before we
> return we guarantee that the shared-list is set to zero if excl is set
> as we read.
>
> If we add to shared-list during the read, ... Hmm, actually we should
> return num_list, i.e.
>
> do {
> 	*list = rcu_dereference(obj->fence);
> 	num_list = *list ? (*list)->count : 0;
> 	smp_rmb();
> } while (...)
>
> return num_list.
>
> as the relationship between the count and the fence entries is also
> determined by the mb in add_shared_fence.

I've read that multiple times now, but can't follow. Why should we do this?

The only important thing is that the readers see the new fence before 
the increment of the number of fences.

When you see this is rather irrelevant.

Christian.

>
> Oops, that was an oversight in the previous review.
>
>>          preempt_enable();
>>   
>>          /* inplace update, no shared fences */
>> @@ -370,11 +359,8 @@ int reservation_object_copy_fences(struct reservation_object *dst,
>>          old = reservation_object_get_excl(dst);
>>   
>>          preempt_disable();
>> -       write_seqcount_begin(&dst->seq);
>> -       /* write_seqcount_begin provides the necessary memory barrier */
>>          RCU_INIT_POINTER(dst->fence_excl, new);
> rcu_assign_pointer again I believe.
> -Chris

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 8/8] dma-buf: nuke reservation_object seq number
  2019-08-07 12:08     ` Christian König
@ 2019-08-07 12:19       ` Chris Wilson
  2019-08-07 13:05         ` Koenig, Christian
  0 siblings, 1 reply; 22+ messages in thread
From: Chris Wilson @ 2019-08-07 12:19 UTC (permalink / raw)
  To: Christian König, christian.koenig, dri-devel, intel-gfx,
	linaro-mm-sig

Quoting Christian König (2019-08-07 13:08:38)
> Am 06.08.19 um 21:57 schrieb Chris Wilson:
> > If we add to shared-list during the read, ... Hmm, actually we should
> > return num_list, i.e.
> >
> > do {
> >       *list = rcu_dereference(obj->fence);
> >       num_list = *list ? (*list)->count : 0;
> >       smp_rmb();
> > } while (...)
> >
> > return num_list.
> >
> > as the relationship between the count and the fence entries is also
> > determined by the mb in add_shared_fence.
> 
> I've read that multiple times now, but can't follow. Why should we do this?
> 
> The only important thing is that the readers see the new fence before 
> the increment of the number of fences.

Exactly. We order the store so that the fence is in the list before we
update the count (so that we don't read garbage because the fence isn't
there yet).

But we don't have the equivalent here for the read once the rmb is
removed from the seqcount_read_begin/end looping. We need to see the
update in the same order as was stored, and only use the coherent
portion of the list.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 8/8] dma-buf: nuke reservation_object seq number
  2019-08-07 12:19       ` Chris Wilson
@ 2019-08-07 13:05         ` Koenig, Christian
  0 siblings, 0 replies; 22+ messages in thread
From: Koenig, Christian @ 2019-08-07 13:05 UTC (permalink / raw)
  To: Chris Wilson, Christian König, dri-devel, intel-gfx, linaro-mm-sig

Am 07.08.19 um 14:19 schrieb Chris Wilson:
> Quoting Christian König (2019-08-07 13:08:38)
>> Am 06.08.19 um 21:57 schrieb Chris Wilson:
>>> If we add to shared-list during the read, ... Hmm, actually we should
>>> return num_list, i.e.
>>>
>>> do {
>>>        *list = rcu_dereference(obj->fence);
>>>        num_list = *list ? (*list)->count : 0;
>>>        smp_rmb();
>>> } while (...)
>>>
>>> return num_list.
>>>
>>> as the relationship between the count and the fence entries is also
>>> determined by the mb in add_shared_fence.
>> I've read that multiple times now, but can't follow. Why should we do this?
>>
>> The only important thing is that the readers see the new fence before
>> the increment of the number of fences.
> Exactly. We order the store so that the fence is in the list before we
> update the count (so that we don't read garbage because the fence isn't
> there yet).
>
> But we don't have the equivalent here for the read once the rmb is
> removed from the seqcount_read_begin/end looping. We need to see the
> update in the same order as was stored, and only use the coherent
> portion of the list.

Ok that makes sense. Going to fix up the code regarding to that.

Christian.

> -Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [1/8] dma-buf: fix busy wait for new shared fences
  2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
                   ` (7 preceding siblings ...)
  2019-08-06 18:57 ` [PATCH 1/8] dma-buf: fix busy wait for new shared fences Chris Wilson
@ 2019-08-09 13:27 ` Patchwork
  8 siblings, 0 replies; 22+ messages in thread
From: Patchwork @ 2019-08-09 13:27 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/8] dma-buf: fix busy wait for new shared fences
URL   : https://patchwork.freedesktop.org/series/64951/
State : failure

== Summary ==

Applying: dma-buf: fix busy wait for new shared fences
Using index info to reconstruct a base tree...
M	drivers/dma-buf/reservation.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/dma-buf/reservation.c
No changes -- Patch already applied.
Applying: dma-buf: fix shared fence list handling in reservation_object_copy_fences
Using index info to reconstruct a base tree...
M	drivers/dma-buf/reservation.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/dma-buf/reservation.c
No changes -- Patch already applied.
Applying: drm/i915: stop using seqcount for fenc pruning
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/gem/i915_gem_wait.c
Falling back to patching base and 3-way merge...
No changes -- Patch already applied.
Applying: drm/i915: use new reservation_object_fences helper
Applying: dma-buf: further relax reservation_object_add_shared_fence
Applying: dma-buf: simplify reservation_object_get_fences_rcu a bit
error: sha1 information is lacking or useless (drivers/dma-buf/reservation.c).
error: could not build fake ancestor
hint: Use 'git am --show-current-patch' to see the failed patch
Patch failed at 0006 dma-buf: simplify reservation_object_get_fences_rcu a bit
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2019-08-09 13:27 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-06 15:01 [PATCH 1/8] dma-buf: fix busy wait for new shared fences Christian König
2019-08-06 15:01 ` [PATCH 2/8] dma-buf: fix shared fence list handling in reservation_object_copy_fences Christian König
2019-08-06 19:06   ` Chris Wilson
2019-08-07 10:43     ` Christian König
2019-08-06 15:01 ` [PATCH 3/8] drm/i915: stop using seqcount for fenc pruning Christian König
2019-08-06 19:07   ` Chris Wilson
2019-08-06 15:01 ` [PATCH 4/8] drm/i915: use new reservation_object_fences helper Christian König
2019-08-06 19:09   ` Chris Wilson
2019-08-06 15:01 ` [PATCH 5/8] dma-buf: further relax reservation_object_add_shared_fence Christian König
2019-08-06 19:25   ` Chris Wilson
2019-08-06 15:01 ` [PATCH 6/8] dma-buf: simplify reservation_object_get_fences_rcu a bit Christian König
2019-08-06 19:11   ` Chris Wilson
2019-08-06 15:01 ` [PATCH 7/8] dma-buf: add reservation_object_fences helper Christian König
2019-08-06 19:24   ` Chris Wilson
2019-08-07  9:06     ` Christian König
2019-08-06 15:01 ` [PATCH 8/8] dma-buf: nuke reservation_object seq number Christian König
2019-08-06 19:57   ` Chris Wilson
2019-08-07 12:08     ` Christian König
2019-08-07 12:19       ` Chris Wilson
2019-08-07 13:05         ` Koenig, Christian
2019-08-06 18:57 ` [PATCH 1/8] dma-buf: fix busy wait for new shared fences Chris Wilson
2019-08-09 13:27 ` ✗ Fi.CI.BAT: failure for series starting with [1/8] " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.