All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dma-buf: Restore seqlock around dma_resv updates
@ 2019-08-14 18:24 Chris Wilson
  2019-08-14 18:26 ` Chris Wilson
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Chris Wilson @ 2019-08-14 18:24 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter, intel-gfx

This reverts
67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
5d344f58da76 ("dma-buf: nuke reservation_object seq number")

The scenario that defeats simply grabbing a set of shared/exclusive
fences and using them blissfully under RCU is that any of those fences
may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
scenario, while keeping the rcu_read_lock we need to establish that no
fence was changed in the dma_resv after a read (or full) memory barrier.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/dma-buf/dma-buf.c                     |  31 ++++-
 drivers/dma-buf/dma-resv.c                    | 109 ++++++++++++-----
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |   7 +-
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  24 ++--
 include/linux/dma-resv.h                      | 113 ++++++++----------
 5 files changed, 175 insertions(+), 109 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b3400d6524ab..433d91d710e4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence_excl;
 	__poll_t events;
-	unsigned shared_count;
+	unsigned shared_count, seq;
 
 	dmabuf = file->private_data;
 	if (!dmabuf || !dmabuf->resv)
@@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	if (!events)
 		return 0;
 
+retry:
+	seq = read_seqcount_begin(&resv->seq);
 	rcu_read_lock();
-	dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
+
+	fobj = rcu_dereference(resv->fence);
+	if (fobj)
+		shared_count = fobj->shared_count;
+	else
+		shared_count = 0;
+	fence_excl = rcu_dereference(resv->fence_excl);
+	if (read_seqcount_retry(&resv->seq, seq)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 		__poll_t pevents = EPOLLIN;
@@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 	struct dma_resv *robj;
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
+	unsigned seq;
 	int count = 0, attach_count, shared_count, i;
 	size_t size = 0;
 
@@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
-		rcu_read_lock();
-		dma_resv_fences(robj, &fence, &fobj, &shared_count);
-		rcu_read_unlock();
+		while (true) {
+			seq = read_seqcount_begin(&robj->seq);
+			rcu_read_lock();
+			fobj = rcu_dereference(robj->fence);
+			shared_count = fobj ? fobj->shared_count : 0;
+			fence = rcu_dereference(robj->fence_excl);
+			if (!read_seqcount_retry(&robj->seq, seq))
+				break;
+			rcu_read_unlock();
+		}
 
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index f5142683c851..42a8f3f11681 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -49,6 +49,12 @@
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
+struct lock_class_key reservation_seqcount_class;
+EXPORT_SYMBOL(reservation_seqcount_class);
+
+const char reservation_seqcount_string[] = "reservation_seqcount";
+EXPORT_SYMBOL(reservation_seqcount_string);
+
 /**
  * dma_resv_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
@@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 void dma_resv_init(struct dma_resv *obj)
 {
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+	__seqcount_init(&obj->seq, reservation_seqcount_string,
+			&reservation_seqcount_class);
 	RCU_INIT_POINTER(obj->fence, NULL);
 	RCU_INIT_POINTER(obj->fence_excl, NULL);
 }
@@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 	fobj = dma_resv_get_list(obj);
 	count = fobj->shared_count;
 
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
 	for (i = 0; i < count; ++i) {
 
 		old = rcu_dereference_protected(fobj->shared[i],
@@ -242,6 +254,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 	RCU_INIT_POINTER(fobj->shared[i], fence);
 	/* pointer update must be visible before we extend the shared_count */
 	smp_store_mb(fobj->shared_count, count);
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
 	dma_fence_put(old);
 }
 EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 		dma_fence_get(fence);
 
 	preempt_disable();
-	rcu_assign_pointer(obj->fence_excl, fence);
-	/* pointer update must be visible before we modify the shared_count */
+	write_seqcount_begin(&obj->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(obj->fence_excl, fence);
 	if (old)
-		smp_store_mb(old->shared_count, 0);
+		old->shared_count = 0;
+	write_seqcount_end(&obj->seq);
 	preempt_enable();
 
 	/* inplace update, no shared fences */
@@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
 	struct dma_resv_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	unsigned int i, shared_count;
+	unsigned i;
 
 	dma_resv_assert_held(dst);
 
 	rcu_read_lock();
+	src_list = rcu_dereference(src->fence);
 
 retry:
-	dma_resv_fences(src, &new, &src_list, &shared_count);
-	if (shared_count) {
+	if (src_list) {
+		unsigned shared_count = src_list->shared_count;
+
 		rcu_read_unlock();
 
 		dst_list = dma_resv_list_alloc(shared_count);
@@ -311,14 +330,14 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 			return -ENOMEM;
 
 		rcu_read_lock();
-		dma_resv_fences(src, &new, &src_list, &shared_count);
-		if (!src_list || shared_count > dst_list->shared_max) {
+		src_list = rcu_dereference(src->fence);
+		if (!src_list || src_list->shared_count > shared_count) {
 			kfree(dst_list);
 			goto retry;
 		}
 
 		dst_list->shared_count = 0;
-		for (i = 0; i < shared_count; ++i) {
+		for (i = 0; i < src_list->shared_count; ++i) {
 			struct dma_fence *fence;
 
 			fence = rcu_dereference(src_list->shared[i]);
@@ -328,6 +347,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 			if (!dma_fence_get_rcu(fence)) {
 				dma_resv_list_free(dst_list);
+				src_list = rcu_dereference(src->fence);
 				goto retry;
 			}
 
@@ -342,18 +362,18 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 		dst_list = NULL;
 	}
 
-	if (new && !dma_fence_get_rcu(new)) {
-		dma_resv_list_free(dst_list);
-		goto retry;
-	}
+	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
 	src_list = dma_resv_get_list(dst);
 	old = dma_resv_get_excl(dst);
 
 	preempt_disable();
-	rcu_assign_pointer(dst->fence_excl, new);
-	rcu_assign_pointer(dst->fence, dst_list);
+	write_seqcount_begin(&dst->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(dst->fence_excl, new);
+	RCU_INIT_POINTER(dst->fence, dst_list);
+	write_seqcount_end(&dst->seq);
 	preempt_enable();
 
 	dma_resv_list_free(src_list);
@@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 
 	do {
 		struct dma_resv_list *fobj;
-		unsigned int i;
+		unsigned int i, seq;
 		size_t sz = 0;
 
-		i = 0;
+		shared_count = i = 0;
 
 		rcu_read_lock();
-		dma_resv_fences(obj, &fence_excl, &fobj,
-					  &shared_count);
+		seq = read_seqcount_begin(&obj->seq);
 
+		fence_excl = rcu_dereference(obj->fence_excl);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
+		fobj = rcu_dereference(obj->fence);
 		if (fobj)
 			sz += sizeof(*shared) * fobj->shared_max;
 
@@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 				break;
 			}
 			shared = nshared;
+			shared_count = fobj ? fobj->shared_count : 0;
 			for (i = 0; i < shared_count; ++i) {
 				shared[i] = rcu_dereference(fobj->shared[i]);
 				if (!dma_fence_get_rcu(shared[i]))
@@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 			}
 		}
 
-		if (i != shared_count) {
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
 			while (i--)
 				dma_fence_put(shared[i]);
 			dma_fence_put(fence_excl);
@@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 			       bool wait_all, bool intr,
 			       unsigned long timeout)
 {
-	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
-	unsigned shared_count;
+	unsigned seq, shared_count;
 	long ret = timeout ? timeout : 1;
 	int i;
 
 retry:
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
 	rcu_read_lock();
 	i = -1;
 
-	dma_resv_fences(obj, &fence, &fobj, &shared_count);
+	fence = rcu_dereference(obj->fence_excl);
 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		if (!dma_fence_get_rcu(fence))
 			goto unlock_retry;
@@ -503,6 +526,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	}
 
 	if (wait_all) {
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
 		for (i = 0; !fence && i < shared_count; ++i) {
 			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
 
@@ -525,6 +553,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 
 	rcu_read_unlock();
 	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			dma_fence_put(fence);
+			goto retry;
+		}
+
 		ret = dma_fence_wait_timeout(fence, intr, ret);
 		dma_fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
  */
 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 {
-	struct dma_resv_list *fobj;
-	struct dma_fence *fence_excl;
-	unsigned shared_count;
+	unsigned seq, shared_count;
 	int ret;
 
 	rcu_read_lock();
 retry:
 	ret = true;
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
 
-	dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
 	if (test_all) {
 		unsigned i;
 
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
 		for (i = 0; i < shared_count; ++i) {
 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
@@ -589,14 +626,24 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 			else if (!ret)
 				break;
 		}
-	}
 
-	if (!shared_count && fence_excl) {
-		ret = dma_resv_test_signaled_single(fence_excl);
-		if (ret < 0)
+		if (read_seqcount_retry(&obj->seq, seq))
 			goto retry;
 	}
 
+	if (!shared_count) {
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+		if (fence_excl) {
+			ret = dma_resv_test_signaled_single(fence_excl);
+			if (ret < 0)
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
+		}
+	}
+
 	rcu_read_unlock();
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index bc4ec6b20a87..76e3516484e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	new->shared_max = old->shared_max;
 	new->shared_count = k;
 
-	rcu_assign_pointer(resv->fence, new);
+	/* Install the new fence list, seqcount provides the barriers */
+	preempt_disable();
+	write_seqcount_begin(&resv->seq);
+	RCU_INIT_POINTER(resv->fence, new);
+	write_seqcount_end(&resv->seq);
+	preempt_enable();
 
 	/* Drop the references to the removed fences or move them to ef_list */
 	for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index a2aff1d8290e..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
 	struct dma_resv_list *list;
-	unsigned int i, shared_count;
-	struct dma_fence *excl;
+	unsigned int seq;
 	int err;
 
 	err = -ENOENT;
@@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
-	dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
+retry:
+	seq = raw_read_seqcount(&obj->base.resv->seq);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy = busy_check_writer(excl);
+	args->busy =
+		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
 
 	/* Translate shared fences to READ set of engines */
-	for (i = 0; i < shared_count; ++i) {
-		struct dma_fence *fence = rcu_dereference(list->shared[i]);
+	list = rcu_dereference(obj->base.resv->fence);
+	if (list) {
+		unsigned int shared_count = list->shared_count, i;
 
-		args->busy |= busy_check_reader(fence);
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence =
+				rcu_dereference(list->shared[i]);
+
+			args->busy |= busy_check_reader(fence);
+		}
 	}
 
+	if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
+		goto retry;
+
 	err = 0;
 out:
 	rcu_read_unlock();
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 38f2802afabb..ee50d10f052b 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,6 +46,8 @@
 #include <linux/rcupdate.h>
 
 extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
 
 /**
  * struct dma_resv_list - a list of shared fences
@@ -69,6 +71,7 @@ struct dma_resv_list {
  */
 struct dma_resv {
 	struct ww_mutex lock;
+	seqcount_t seq;
 
 	struct dma_fence __rcu *fence_excl;
 	struct dma_resv_list __rcu *fence;
@@ -77,24 +80,6 @@ struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence_excl,
-					 dma_resv_held(obj));
-}
-
 /**
  * dma_resv_get_list - get the reservation object's
  * shared fence list, with update-side lock held
@@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
 					 dma_resv_held(obj));
 }
 
-/**
- * dma_resv_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void dma_resv_fences(struct dma_resv *obj,
-				   struct dma_fence **excl,
-				   struct dma_resv_list **list,
-				   u32 *shared_count)
-{
-	do {
-		*excl = rcu_dereference(obj->fence_excl);
-		*list = rcu_dereference(obj->fence);
-		*shared_count = *list ? (*list)->shared_count : 0;
-		smp_rmb(); /* See dma_resv_add_excl_fence */
-	} while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
-	struct dma_fence *fence;
-
-	if (!rcu_access_pointer(obj->fence_excl))
-		return NULL;
-
-	rcu_read_lock();
-	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-	rcu_read_unlock();
-
-	return fence;
-}
-
 /**
  * dma_resv_lock - lock the reservation object
  * @obj: the reservation object
@@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 	ww_mutex_unlock(&obj->lock);
 }
 
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+dma_resv_get_excl(struct dma_resv *obj)
+{
+	return rcu_dereference_protected(obj->fence_excl,
+					 dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *
+dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+	struct dma_fence *fence;
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
+	rcu_read_lock();
+	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+	rcu_read_unlock();
+
+	return fence;
+}
+
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-- 
2.23.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:24 [PATCH] dma-buf: Restore seqlock around dma_resv updates Chris Wilson
@ 2019-08-14 18:26 ` Chris Wilson
  2019-08-14 20:07   ` Daniel Vetter
  2019-08-15  7:30   ` Koenig, Christian
  2019-08-14 19:06 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 8+ messages in thread
From: Chris Wilson @ 2019-08-14 18:26 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter, intel-gfx, Christian König

Quoting Chris Wilson (2019-08-14 19:24:01)
> This reverts
> 67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
> dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
> 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
> 5d344f58da76 ("dma-buf: nuke reservation_object seq number")
> 
> The scenario that defeats simply grabbing a set of shared/exclusive
> fences and using them blissfully under RCU is that any of those fences
> may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
> scenario, while keeping the rcu_read_lock we need to establish that no
> fence was changed in the dma_resv after a read (or full) memory barrier.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>

I said I needed to go lie down, that proves it.

Cc: Christian König <christian.koenig@amd.com>

> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> ---
>  drivers/dma-buf/dma-buf.c                     |  31 ++++-
>  drivers/dma-buf/dma-resv.c                    | 109 ++++++++++++-----
>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |   7 +-
>  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  24 ++--
>  include/linux/dma-resv.h                      | 113 ++++++++----------
>  5 files changed, 175 insertions(+), 109 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index b3400d6524ab..433d91d710e4 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>         struct dma_resv_list *fobj;
>         struct dma_fence *fence_excl;
>         __poll_t events;
> -       unsigned shared_count;
> +       unsigned shared_count, seq;
>  
>         dmabuf = file->private_data;
>         if (!dmabuf || !dmabuf->resv)
> @@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>         if (!events)
>                 return 0;
>  
> +retry:
> +       seq = read_seqcount_begin(&resv->seq);
>         rcu_read_lock();
> -       dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
> +
> +       fobj = rcu_dereference(resv->fence);
> +       if (fobj)
> +               shared_count = fobj->shared_count;
> +       else
> +               shared_count = 0;
> +       fence_excl = rcu_dereference(resv->fence_excl);
> +       if (read_seqcount_retry(&resv->seq, seq)) {
> +               rcu_read_unlock();
> +               goto retry;
> +       }
> +
>         if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
>                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
>                 __poll_t pevents = EPOLLIN;
> @@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>         struct dma_resv *robj;
>         struct dma_resv_list *fobj;
>         struct dma_fence *fence;
> +       unsigned seq;
>         int count = 0, attach_count, shared_count, i;
>         size_t size = 0;
>  
> @@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>                                 buf_obj->name ?: "");
>  
>                 robj = buf_obj->resv;
> -               rcu_read_lock();
> -               dma_resv_fences(robj, &fence, &fobj, &shared_count);
> -               rcu_read_unlock();
> +               while (true) {
> +                       seq = read_seqcount_begin(&robj->seq);
> +                       rcu_read_lock();
> +                       fobj = rcu_dereference(robj->fence);
> +                       shared_count = fobj ? fobj->shared_count : 0;
> +                       fence = rcu_dereference(robj->fence_excl);
> +                       if (!read_seqcount_retry(&robj->seq, seq))
> +                               break;
> +                       rcu_read_unlock();
> +               }
>  
>                 if (fence)
>                         seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index f5142683c851..42a8f3f11681 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -49,6 +49,12 @@
>  DEFINE_WD_CLASS(reservation_ww_class);
>  EXPORT_SYMBOL(reservation_ww_class);
>  
> +struct lock_class_key reservation_seqcount_class;
> +EXPORT_SYMBOL(reservation_seqcount_class);
> +
> +const char reservation_seqcount_string[] = "reservation_seqcount";
> +EXPORT_SYMBOL(reservation_seqcount_string);
> +
>  /**
>   * dma_resv_list_alloc - allocate fence list
>   * @shared_max: number of fences we need space for
> @@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
>  void dma_resv_init(struct dma_resv *obj)
>  {
>         ww_mutex_init(&obj->lock, &reservation_ww_class);
> +
> +       __seqcount_init(&obj->seq, reservation_seqcount_string,
> +                       &reservation_seqcount_class);
>         RCU_INIT_POINTER(obj->fence, NULL);
>         RCU_INIT_POINTER(obj->fence_excl, NULL);
>  }
> @@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>         fobj = dma_resv_get_list(obj);
>         count = fobj->shared_count;
>  
> +       preempt_disable();
> +       write_seqcount_begin(&obj->seq);
> +
>         for (i = 0; i < count; ++i) {
>  
>                 old = rcu_dereference_protected(fobj->shared[i],
> @@ -242,6 +254,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>         RCU_INIT_POINTER(fobj->shared[i], fence);
>         /* pointer update must be visible before we extend the shared_count */
>         smp_store_mb(fobj->shared_count, count);
> +
> +       write_seqcount_end(&obj->seq);
> +       preempt_enable();
>         dma_fence_put(old);
>  }
>  EXPORT_SYMBOL(dma_resv_add_shared_fence);
> @@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>                 dma_fence_get(fence);
>  
>         preempt_disable();
> -       rcu_assign_pointer(obj->fence_excl, fence);
> -       /* pointer update must be visible before we modify the shared_count */
> +       write_seqcount_begin(&obj->seq);
> +       /* write_seqcount_begin provides the necessary memory barrier */
> +       RCU_INIT_POINTER(obj->fence_excl, fence);
>         if (old)
> -               smp_store_mb(old->shared_count, 0);
> +               old->shared_count = 0;
> +       write_seqcount_end(&obj->seq);
>         preempt_enable();
>  
>         /* inplace update, no shared fences */
> @@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  {
>         struct dma_resv_list *src_list, *dst_list;
>         struct dma_fence *old, *new;
> -       unsigned int i, shared_count;
> +       unsigned i;
>  
>         dma_resv_assert_held(dst);
>  
>         rcu_read_lock();
> +       src_list = rcu_dereference(src->fence);
>  
>  retry:
> -       dma_resv_fences(src, &new, &src_list, &shared_count);
> -       if (shared_count) {
> +       if (src_list) {
> +               unsigned shared_count = src_list->shared_count;
> +
>                 rcu_read_unlock();
>  
>                 dst_list = dma_resv_list_alloc(shared_count);
> @@ -311,14 +330,14 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>                         return -ENOMEM;
>  
>                 rcu_read_lock();
> -               dma_resv_fences(src, &new, &src_list, &shared_count);
> -               if (!src_list || shared_count > dst_list->shared_max) {
> +               src_list = rcu_dereference(src->fence);
> +               if (!src_list || src_list->shared_count > shared_count) {
>                         kfree(dst_list);
>                         goto retry;
>                 }
>  
>                 dst_list->shared_count = 0;
> -               for (i = 0; i < shared_count; ++i) {
> +               for (i = 0; i < src_list->shared_count; ++i) {
>                         struct dma_fence *fence;
>  
>                         fence = rcu_dereference(src_list->shared[i]);
> @@ -328,6 +347,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>  
>                         if (!dma_fence_get_rcu(fence)) {
>                                 dma_resv_list_free(dst_list);
> +                               src_list = rcu_dereference(src->fence);
>                                 goto retry;
>                         }
>  
> @@ -342,18 +362,18 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>                 dst_list = NULL;
>         }
>  
> -       if (new && !dma_fence_get_rcu(new)) {
> -               dma_resv_list_free(dst_list);
> -               goto retry;
> -       }
> +       new = dma_fence_get_rcu_safe(&src->fence_excl);
>         rcu_read_unlock();
>  
>         src_list = dma_resv_get_list(dst);
>         old = dma_resv_get_excl(dst);
>  
>         preempt_disable();
> -       rcu_assign_pointer(dst->fence_excl, new);
> -       rcu_assign_pointer(dst->fence, dst_list);
> +       write_seqcount_begin(&dst->seq);
> +       /* write_seqcount_begin provides the necessary memory barrier */
> +       RCU_INIT_POINTER(dst->fence_excl, new);
> +       RCU_INIT_POINTER(dst->fence, dst_list);
> +       write_seqcount_end(&dst->seq);
>         preempt_enable();
>  
>         dma_resv_list_free(src_list);
> @@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>  
>         do {
>                 struct dma_resv_list *fobj;
> -               unsigned int i;
> +               unsigned int i, seq;
>                 size_t sz = 0;
>  
> -               i = 0;
> +               shared_count = i = 0;
>  
>                 rcu_read_lock();
> -               dma_resv_fences(obj, &fence_excl, &fobj,
> -                                         &shared_count);
> +               seq = read_seqcount_begin(&obj->seq);
>  
> +               fence_excl = rcu_dereference(obj->fence_excl);
>                 if (fence_excl && !dma_fence_get_rcu(fence_excl))
>                         goto unlock;
>  
> +               fobj = rcu_dereference(obj->fence);
>                 if (fobj)
>                         sz += sizeof(*shared) * fobj->shared_max;
>  
> @@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>                                 break;
>                         }
>                         shared = nshared;
> +                       shared_count = fobj ? fobj->shared_count : 0;
>                         for (i = 0; i < shared_count; ++i) {
>                                 shared[i] = rcu_dereference(fobj->shared[i]);
>                                 if (!dma_fence_get_rcu(shared[i]))
> @@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>                         }
>                 }
>  
> -               if (i != shared_count) {
> +               if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
>                         while (i--)
>                                 dma_fence_put(shared[i]);
>                         dma_fence_put(fence_excl);
> @@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>                                bool wait_all, bool intr,
>                                unsigned long timeout)
>  {
> -       struct dma_resv_list *fobj;
>         struct dma_fence *fence;
> -       unsigned shared_count;
> +       unsigned seq, shared_count;
>         long ret = timeout ? timeout : 1;
>         int i;
>  
>  retry:
> +       shared_count = 0;
> +       seq = read_seqcount_begin(&obj->seq);
>         rcu_read_lock();
>         i = -1;
>  
> -       dma_resv_fences(obj, &fence, &fobj, &shared_count);
> +       fence = rcu_dereference(obj->fence_excl);
>         if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>                 if (!dma_fence_get_rcu(fence))
>                         goto unlock_retry;
> @@ -503,6 +526,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>         }
>  
>         if (wait_all) {
> +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +
> +               if (fobj)
> +                       shared_count = fobj->shared_count;
> +
>                 for (i = 0; !fence && i < shared_count; ++i) {
>                         struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
>  
> @@ -525,6 +553,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>  
>         rcu_read_unlock();
>         if (fence) {
> +               if (read_seqcount_retry(&obj->seq, seq)) {
> +                       dma_fence_put(fence);
> +                       goto retry;
> +               }
> +
>                 ret = dma_fence_wait_timeout(fence, intr, ret);
>                 dma_fence_put(fence);
>                 if (ret > 0 && wait_all && (i + 1 < shared_count))
> @@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>   */
>  bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>  {
> -       struct dma_resv_list *fobj;
> -       struct dma_fence *fence_excl;
> -       unsigned shared_count;
> +       unsigned seq, shared_count;
>         int ret;
>  
>         rcu_read_lock();
>  retry:
>         ret = true;
> +       shared_count = 0;
> +       seq = read_seqcount_begin(&obj->seq);
>  
> -       dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
>         if (test_all) {
>                 unsigned i;
>  
> +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> +
> +               if (fobj)
> +                       shared_count = fobj->shared_count;
> +
>                 for (i = 0; i < shared_count; ++i) {
>                         struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
>  
> @@ -589,14 +626,24 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>                         else if (!ret)
>                                 break;
>                 }
> -       }
>  
> -       if (!shared_count && fence_excl) {
> -               ret = dma_resv_test_signaled_single(fence_excl);
> -               if (ret < 0)
> +               if (read_seqcount_retry(&obj->seq, seq))
>                         goto retry;
>         }
>  
> +       if (!shared_count) {
> +               struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
> +
> +               if (fence_excl) {
> +                       ret = dma_resv_test_signaled_single(fence_excl);
> +                       if (ret < 0)
> +                               goto retry;
> +
> +                       if (read_seqcount_retry(&obj->seq, seq))
> +                               goto retry;
> +               }
> +       }
> +
>         rcu_read_unlock();
>         return ret;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index bc4ec6b20a87..76e3516484e7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>         new->shared_max = old->shared_max;
>         new->shared_count = k;
>  
> -       rcu_assign_pointer(resv->fence, new);
> +       /* Install the new fence list, seqcount provides the barriers */
> +       preempt_disable();
> +       write_seqcount_begin(&resv->seq);
> +       RCU_INIT_POINTER(resv->fence, new);
> +       write_seqcount_end(&resv->seq);
> +       preempt_enable();
>  
>         /* Drop the references to the removed fences or move them to ef_list */
>         for (i = j, k = 0; i < old->shared_count; ++i) {
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> index a2aff1d8290e..3d4f5775a4ba 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> @@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>         struct drm_i915_gem_busy *args = data;
>         struct drm_i915_gem_object *obj;
>         struct dma_resv_list *list;
> -       unsigned int i, shared_count;
> -       struct dma_fence *excl;
> +       unsigned int seq;
>         int err;
>  
>         err = -ENOENT;
> @@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>          * to report the overall busyness. This is what the wait-ioctl does.
>          *
>          */
> -       dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
> +retry:
> +       seq = raw_read_seqcount(&obj->base.resv->seq);
>  
>         /* Translate the exclusive fence to the READ *and* WRITE engine */
> -       args->busy = busy_check_writer(excl);
> +       args->busy =
> +               busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
>  
>         /* Translate shared fences to READ set of engines */
> -       for (i = 0; i < shared_count; ++i) {
> -               struct dma_fence *fence = rcu_dereference(list->shared[i]);
> +       list = rcu_dereference(obj->base.resv->fence);
> +       if (list) {
> +               unsigned int shared_count = list->shared_count, i;
>  
> -               args->busy |= busy_check_reader(fence);
> +               for (i = 0; i < shared_count; ++i) {
> +                       struct dma_fence *fence =
> +                               rcu_dereference(list->shared[i]);
> +
> +                       args->busy |= busy_check_reader(fence);
> +               }
>         }
>  
> +       if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
> +               goto retry;
> +
>         err = 0;
>  out:
>         rcu_read_unlock();
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 38f2802afabb..ee50d10f052b 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -46,6 +46,8 @@
>  #include <linux/rcupdate.h>
>  
>  extern struct ww_class reservation_ww_class;
> +extern struct lock_class_key reservation_seqcount_class;
> +extern const char reservation_seqcount_string[];
>  
>  /**
>   * struct dma_resv_list - a list of shared fences
> @@ -69,6 +71,7 @@ struct dma_resv_list {
>   */
>  struct dma_resv {
>         struct ww_mutex lock;
> +       seqcount_t seq;
>  
>         struct dma_fence __rcu *fence_excl;
>         struct dma_resv_list __rcu *fence;
> @@ -77,24 +80,6 @@ struct dma_resv {
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> -/**
> - * dma_resv_get_excl - get the reservation object's
> - * exclusive fence, with update-side lock held
> - * @obj: the reservation object
> - *
> - * Returns the exclusive fence (if any).  Does NOT take a
> - * reference. Writers must hold obj->lock, readers may only
> - * hold a RCU read side lock.
> - *
> - * RETURNS
> - * The exclusive fence or NULL
> - */
> -static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
> -{
> -       return rcu_dereference_protected(obj->fence_excl,
> -                                        dma_resv_held(obj));
> -}
> -
>  /**
>   * dma_resv_get_list - get the reservation object's
>   * shared fence list, with update-side lock held
> @@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
>                                          dma_resv_held(obj));
>  }
>  
> -/**
> - * dma_resv_fences - read consistent fence pointers
> - * @obj: reservation object where we get the fences from
> - * @excl: pointer for the exclusive fence
> - * @list: pointer for the shared fence list
> - *
> - * Make sure we have a consisten exclusive fence and shared fence list.
> - * Must be called with rcu read side lock held.
> - */
> -static inline void dma_resv_fences(struct dma_resv *obj,
> -                                  struct dma_fence **excl,
> -                                  struct dma_resv_list **list,
> -                                  u32 *shared_count)
> -{
> -       do {
> -               *excl = rcu_dereference(obj->fence_excl);
> -               *list = rcu_dereference(obj->fence);
> -               *shared_count = *list ? (*list)->shared_count : 0;
> -               smp_rmb(); /* See dma_resv_add_excl_fence */
> -       } while (rcu_access_pointer(obj->fence_excl) != *excl);
> -}
> -
> -/**
> - * dma_resv_get_excl_rcu - get the reservation object's
> - * exclusive fence, without lock held.
> - * @obj: the reservation object
> - *
> - * If there is an exclusive fence, this atomically increments it's
> - * reference count and returns it.
> - *
> - * RETURNS
> - * The exclusive fence or NULL if none
> - */
> -static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
> -{
> -       struct dma_fence *fence;
> -
> -       if (!rcu_access_pointer(obj->fence_excl))
> -               return NULL;
> -
> -       rcu_read_lock();
> -       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> -       rcu_read_unlock();
> -
> -       return fence;
> -}
> -
>  /**
>   * dma_resv_lock - lock the reservation object
>   * @obj: the reservation object
> @@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>         ww_mutex_unlock(&obj->lock);
>  }
>  
> +/**
> + * dma_resv_get_excl - get the reservation object's
> + * exclusive fence, with update-side lock held
> + * @obj: the reservation object
> + *
> + * Returns the exclusive fence (if any).  Does NOT take a
> + * reference. Writers must hold obj->lock, readers may only
> + * hold a RCU read side lock.
> + *
> + * RETURNS
> + * The exclusive fence or NULL
> + */
> +static inline struct dma_fence *
> +dma_resv_get_excl(struct dma_resv *obj)
> +{
> +       return rcu_dereference_protected(obj->fence_excl,
> +                                        dma_resv_held(obj));
> +}
> +
> +/**
> + * dma_resv_get_excl_rcu - get the reservation object's
> + * exclusive fence, without lock held.
> + * @obj: the reservation object
> + *
> + * If there is an exclusive fence, this atomically increments it's
> + * reference count and returns it.
> + *
> + * RETURNS
> + * The exclusive fence or NULL if none
> + */
> +static inline struct dma_fence *
> +dma_resv_get_excl_rcu(struct dma_resv *obj)
> +{
> +       struct dma_fence *fence;
> +
> +       if (!rcu_access_pointer(obj->fence_excl))
> +               return NULL;
> +
> +       rcu_read_lock();
> +       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> +       rcu_read_unlock();
> +
> +       return fence;
> +}
> +
>  void dma_resv_init(struct dma_resv *obj);
>  void dma_resv_fini(struct dma_resv *obj);
>  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> -- 
> 2.23.0.rc1
> 
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:24 [PATCH] dma-buf: Restore seqlock around dma_resv updates Chris Wilson
  2019-08-14 18:26 ` Chris Wilson
@ 2019-08-14 19:06 ` Patchwork
  2019-08-14 19:27 ` ✓ Fi.CI.BAT: success " Patchwork
  2019-08-15 10:58 ` ✓ Fi.CI.IGT: " Patchwork
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-08-14 19:06 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: dma-buf: Restore seqlock around dma_resv updates
URL   : https://patchwork.freedesktop.org/series/65196/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
cffdefd474fc dma-buf: Restore seqlock around dma_resv updates
-:7: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 67c97fb79a7f ("dma-buf: add reservation_object_fences helper")'
#7: 
67c97fb79a7f ("dma-buf: add reservation_object_fences helper")

-:8: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")'
#8: 
dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")

-:9: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")'
#9: 
0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")

-:10: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 5d344f58da76 ("dma-buf: nuke reservation_object seq number")'
#10: 
5d344f58da76 ("dma-buf: nuke reservation_object seq number")

-:31: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#31: FILE: drivers/dma-buf/dma-buf.c:202:
+	unsigned shared_count, seq;

-:62: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#62: FILE: drivers/dma-buf/dma-buf.c:1160:
+	unsigned seq;

-:97: WARNING:STATIC_CONST_CHAR_ARRAY: const array should probably be static const
#97: FILE: drivers/dma-buf/dma-resv.c:55:
+const char reservation_seqcount_string[] = "reservation_seqcount";

-:154: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#154: FILE: drivers/dma-buf/dma-resv.c:315:
+	unsigned i;

-:165: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#165: FILE: drivers/dma-buf/dma-resv.c:324:
+		unsigned shared_count = src_list->shared_count;

-:230: CHECK:MULTIPLE_ASSIGNMENTS: multiple assignments should be avoided
#230: FILE: drivers/dma-buf/dma-resv.c:414:
+		shared_count = i = 0;

-:269: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#269: FILE: drivers/dma-buf/dma-resv.c:504:
+	unsigned seq, shared_count;

-:315: WARNING:UNSPECIFIED_INT: Prefer 'unsigned int' to bare use of 'unsigned'
#315: FILE: drivers/dma-buf/dma-resv.c:603:
+	unsigned seq, shared_count;

total: 4 errors, 7 warnings, 1 checks, 513 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✓ Fi.CI.BAT: success for dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:24 [PATCH] dma-buf: Restore seqlock around dma_resv updates Chris Wilson
  2019-08-14 18:26 ` Chris Wilson
  2019-08-14 19:06 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2019-08-14 19:27 ` Patchwork
  2019-08-15 10:58 ` ✓ Fi.CI.IGT: " Patchwork
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-08-14 19:27 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: dma-buf: Restore seqlock around dma_resv updates
URL   : https://patchwork.freedesktop.org/series/65196/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6709 -> Patchwork_14016
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/

Known issues
------------

  Here are the changes found in Patchwork_14016 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_create@basic-files:
    - fi-bxt-dsi:         [PASS][1] -> [INCOMPLETE][2] ([fdo#103927])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-bxt-dsi/igt@gem_ctx_create@basic-files.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-bxt-dsi/igt@gem_ctx_create@basic-files.html

  * igt@i915_pm_rpm@module-reload:
    - fi-skl-6770hq:      [PASS][3] -> [FAIL][4] ([fdo#108511])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live_execlists:
    - fi-skl-gvtdvm:      [PASS][5] -> [DMESG-FAIL][6] ([fdo#111108])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-skl-gvtdvm/igt@i915_selftest@live_execlists.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-skl-gvtdvm/igt@i915_selftest@live_execlists.html

  * igt@i915_selftest@live_requests:
    - fi-byt-j1900:       [PASS][7] -> [INCOMPLETE][8] ([fdo#102657])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-byt-j1900/igt@i915_selftest@live_requests.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-byt-j1900/igt@i915_selftest@live_requests.html

  * igt@kms_busy@basic-flip-a:
    - fi-kbl-7567u:       [PASS][9] -> [SKIP][10] ([fdo#109271] / [fdo#109278]) +2 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html

  * igt@kms_chamelium@dp-edid-read:
    - fi-icl-u2:          [PASS][11] -> [FAIL][12] ([fdo#106766])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-icl-u2/igt@kms_chamelium@dp-edid-read.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-icl-u2/igt@kms_chamelium@dp-edid-read.html

  * igt@prime_vgem@basic-fence-wait-default:
    - fi-icl-u3:          [PASS][13] -> [DMESG-WARN][14] ([fdo#107724])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-icl-u3/igt@prime_vgem@basic-fence-wait-default.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-icl-u3/igt@prime_vgem@basic-fence-wait-default.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-blb-e6850:       [INCOMPLETE][15] ([fdo#107718]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-blb-e6850/igt@gem_exec_suspend@basic-s4-devices.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-blb-e6850/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@i915_selftest@live_sanitycheck:
    - fi-icl-u3:          [DMESG-WARN][17] ([fdo#107724]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7567u:       [FAIL][19] ([fdo#109485]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/fi-kbl-7567u/igt@kms_chamelium@hdmi-hpd-fast.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/fi-kbl-7567u/igt@kms_chamelium@hdmi-hpd-fast.html

  
  [fdo#102657]: https://bugs.freedesktop.org/show_bug.cgi?id=102657
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#106766]: https://bugs.freedesktop.org/show_bug.cgi?id=106766
  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108511]: https://bugs.freedesktop.org/show_bug.cgi?id=108511
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109485]: https://bugs.freedesktop.org/show_bug.cgi?id=109485
  [fdo#111108]: https://bugs.freedesktop.org/show_bug.cgi?id=111108


Participating hosts (53 -> 43)
------------------------------

  Additional (1): fi-kbl-guc 
  Missing    (11): fi-kbl-soraka fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-skl-6260u fi-cfl-8109u fi-icl-y fi-bdw-samus fi-byt-clapper fi-skl-6700k2 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6709 -> Patchwork_14016

  CI-20190529: 20190529
  CI_DRM_6709: 4c9976607118e10dfc9f9feb3b9be2b3579631c9 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5134: 81df2f22385bc275975cf199d962eed9bc10f916 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14016: cffdefd474fc9ecf39ce403c3733e03d5326c0be @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

cffdefd474fc dma-buf: Restore seqlock around dma_resv updates

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:26 ` Chris Wilson
@ 2019-08-14 20:07   ` Daniel Vetter
  2019-08-15  9:36     ` Koenig, Christian
  2019-08-15  7:30   ` Koenig, Christian
  1 sibling, 1 reply; 8+ messages in thread
From: Daniel Vetter @ 2019-08-14 20:07 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Daniel Vetter, intel-gfx, Christian König, dri-devel

On Wed, Aug 14, 2019 at 07:26:44PM +0100, Chris Wilson wrote:
> Quoting Chris Wilson (2019-08-14 19:24:01)
> > This reverts
> > 67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
> > dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
> > 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
> > 5d344f58da76 ("dma-buf: nuke reservation_object seq number")

Oh I didn't realize they landed already.

> > The scenario that defeats simply grabbing a set of shared/exclusive
> > fences and using them blissfully under RCU is that any of those fences
> > may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
> > scenario, while keeping the rcu_read_lock we need to establish that no
> > fence was changed in the dma_resv after a read (or full) memory barrier.

So if I'm reading correctly what Chris is saying here I guess my half
comment in that other thread pointed at a real oversight. Since I still
haven't checked and it's too late for real review not more for now.

> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Chris Wilson <chris@chris-wilson.co.uk>
> 
> I said I needed to go lie down, that proves it.

Yeah I guess we need to wait for Christian to wake up an have a working
brain.
-Daniel

> 
> Cc: Christian König <christian.koenig@amd.com>
> 
> > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > ---
> >  drivers/dma-buf/dma-buf.c                     |  31 ++++-
> >  drivers/dma-buf/dma-resv.c                    | 109 ++++++++++++-----
> >  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |   7 +-
> >  drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  24 ++--
> >  include/linux/dma-resv.h                      | 113 ++++++++----------
> >  5 files changed, 175 insertions(+), 109 deletions(-)
> > 
> > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > index b3400d6524ab..433d91d710e4 100644
> > --- a/drivers/dma-buf/dma-buf.c
> > +++ b/drivers/dma-buf/dma-buf.c
> > @@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
> >         struct dma_resv_list *fobj;
> >         struct dma_fence *fence_excl;
> >         __poll_t events;
> > -       unsigned shared_count;
> > +       unsigned shared_count, seq;
> >  
> >         dmabuf = file->private_data;
> >         if (!dmabuf || !dmabuf->resv)
> > @@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
> >         if (!events)
> >                 return 0;
> >  
> > +retry:
> > +       seq = read_seqcount_begin(&resv->seq);
> >         rcu_read_lock();
> > -       dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
> > +
> > +       fobj = rcu_dereference(resv->fence);
> > +       if (fobj)
> > +               shared_count = fobj->shared_count;
> > +       else
> > +               shared_count = 0;
> > +       fence_excl = rcu_dereference(resv->fence_excl);
> > +       if (read_seqcount_retry(&resv->seq, seq)) {
> > +               rcu_read_unlock();
> > +               goto retry;
> > +       }
> > +
> >         if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
> >                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
> >                 __poll_t pevents = EPOLLIN;
> > @@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
> >         struct dma_resv *robj;
> >         struct dma_resv_list *fobj;
> >         struct dma_fence *fence;
> > +       unsigned seq;
> >         int count = 0, attach_count, shared_count, i;
> >         size_t size = 0;
> >  
> > @@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
> >                                 buf_obj->name ?: "");
> >  
> >                 robj = buf_obj->resv;
> > -               rcu_read_lock();
> > -               dma_resv_fences(robj, &fence, &fobj, &shared_count);
> > -               rcu_read_unlock();
> > +               while (true) {
> > +                       seq = read_seqcount_begin(&robj->seq);
> > +                       rcu_read_lock();
> > +                       fobj = rcu_dereference(robj->fence);
> > +                       shared_count = fobj ? fobj->shared_count : 0;
> > +                       fence = rcu_dereference(robj->fence_excl);
> > +                       if (!read_seqcount_retry(&robj->seq, seq))
> > +                               break;
> > +                       rcu_read_unlock();
> > +               }
> >  
> >                 if (fence)
> >                         seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
> > diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> > index f5142683c851..42a8f3f11681 100644
> > --- a/drivers/dma-buf/dma-resv.c
> > +++ b/drivers/dma-buf/dma-resv.c
> > @@ -49,6 +49,12 @@
> >  DEFINE_WD_CLASS(reservation_ww_class);
> >  EXPORT_SYMBOL(reservation_ww_class);
> >  
> > +struct lock_class_key reservation_seqcount_class;
> > +EXPORT_SYMBOL(reservation_seqcount_class);
> > +
> > +const char reservation_seqcount_string[] = "reservation_seqcount";
> > +EXPORT_SYMBOL(reservation_seqcount_string);
> > +
> >  /**
> >   * dma_resv_list_alloc - allocate fence list
> >   * @shared_max: number of fences we need space for
> > @@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
> >  void dma_resv_init(struct dma_resv *obj)
> >  {
> >         ww_mutex_init(&obj->lock, &reservation_ww_class);
> > +
> > +       __seqcount_init(&obj->seq, reservation_seqcount_string,
> > +                       &reservation_seqcount_class);
> >         RCU_INIT_POINTER(obj->fence, NULL);
> >         RCU_INIT_POINTER(obj->fence_excl, NULL);
> >  }
> > @@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
> >         fobj = dma_resv_get_list(obj);
> >         count = fobj->shared_count;
> >  
> > +       preempt_disable();
> > +       write_seqcount_begin(&obj->seq);
> > +
> >         for (i = 0; i < count; ++i) {
> >  
> >                 old = rcu_dereference_protected(fobj->shared[i],
> > @@ -242,6 +254,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
> >         RCU_INIT_POINTER(fobj->shared[i], fence);
> >         /* pointer update must be visible before we extend the shared_count */
> >         smp_store_mb(fobj->shared_count, count);
> > +
> > +       write_seqcount_end(&obj->seq);
> > +       preempt_enable();
> >         dma_fence_put(old);
> >  }
> >  EXPORT_SYMBOL(dma_resv_add_shared_fence);
> > @@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
> >                 dma_fence_get(fence);
> >  
> >         preempt_disable();
> > -       rcu_assign_pointer(obj->fence_excl, fence);
> > -       /* pointer update must be visible before we modify the shared_count */
> > +       write_seqcount_begin(&obj->seq);
> > +       /* write_seqcount_begin provides the necessary memory barrier */
> > +       RCU_INIT_POINTER(obj->fence_excl, fence);
> >         if (old)
> > -               smp_store_mb(old->shared_count, 0);
> > +               old->shared_count = 0;
> > +       write_seqcount_end(&obj->seq);
> >         preempt_enable();
> >  
> >         /* inplace update, no shared fences */
> > @@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >  {
> >         struct dma_resv_list *src_list, *dst_list;
> >         struct dma_fence *old, *new;
> > -       unsigned int i, shared_count;
> > +       unsigned i;
> >  
> >         dma_resv_assert_held(dst);
> >  
> >         rcu_read_lock();
> > +       src_list = rcu_dereference(src->fence);
> >  
> >  retry:
> > -       dma_resv_fences(src, &new, &src_list, &shared_count);
> > -       if (shared_count) {
> > +       if (src_list) {
> > +               unsigned shared_count = src_list->shared_count;
> > +
> >                 rcu_read_unlock();
> >  
> >                 dst_list = dma_resv_list_alloc(shared_count);
> > @@ -311,14 +330,14 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >                         return -ENOMEM;
> >  
> >                 rcu_read_lock();
> > -               dma_resv_fences(src, &new, &src_list, &shared_count);
> > -               if (!src_list || shared_count > dst_list->shared_max) {
> > +               src_list = rcu_dereference(src->fence);
> > +               if (!src_list || src_list->shared_count > shared_count) {
> >                         kfree(dst_list);
> >                         goto retry;
> >                 }
> >  
> >                 dst_list->shared_count = 0;
> > -               for (i = 0; i < shared_count; ++i) {
> > +               for (i = 0; i < src_list->shared_count; ++i) {
> >                         struct dma_fence *fence;
> >  
> >                         fence = rcu_dereference(src_list->shared[i]);
> > @@ -328,6 +347,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >  
> >                         if (!dma_fence_get_rcu(fence)) {
> >                                 dma_resv_list_free(dst_list);
> > +                               src_list = rcu_dereference(src->fence);
> >                                 goto retry;
> >                         }
> >  
> > @@ -342,18 +362,18 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
> >                 dst_list = NULL;
> >         }
> >  
> > -       if (new && !dma_fence_get_rcu(new)) {
> > -               dma_resv_list_free(dst_list);
> > -               goto retry;
> > -       }
> > +       new = dma_fence_get_rcu_safe(&src->fence_excl);
> >         rcu_read_unlock();
> >  
> >         src_list = dma_resv_get_list(dst);
> >         old = dma_resv_get_excl(dst);
> >  
> >         preempt_disable();
> > -       rcu_assign_pointer(dst->fence_excl, new);
> > -       rcu_assign_pointer(dst->fence, dst_list);
> > +       write_seqcount_begin(&dst->seq);
> > +       /* write_seqcount_begin provides the necessary memory barrier */
> > +       RCU_INIT_POINTER(dst->fence_excl, new);
> > +       RCU_INIT_POINTER(dst->fence, dst_list);
> > +       write_seqcount_end(&dst->seq);
> >         preempt_enable();
> >  
> >         dma_resv_list_free(src_list);
> > @@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
> >  
> >         do {
> >                 struct dma_resv_list *fobj;
> > -               unsigned int i;
> > +               unsigned int i, seq;
> >                 size_t sz = 0;
> >  
> > -               i = 0;
> > +               shared_count = i = 0;
> >  
> >                 rcu_read_lock();
> > -               dma_resv_fences(obj, &fence_excl, &fobj,
> > -                                         &shared_count);
> > +               seq = read_seqcount_begin(&obj->seq);
> >  
> > +               fence_excl = rcu_dereference(obj->fence_excl);
> >                 if (fence_excl && !dma_fence_get_rcu(fence_excl))
> >                         goto unlock;
> >  
> > +               fobj = rcu_dereference(obj->fence);
> >                 if (fobj)
> >                         sz += sizeof(*shared) * fobj->shared_max;
> >  
> > @@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
> >                                 break;
> >                         }
> >                         shared = nshared;
> > +                       shared_count = fobj ? fobj->shared_count : 0;
> >                         for (i = 0; i < shared_count; ++i) {
> >                                 shared[i] = rcu_dereference(fobj->shared[i]);
> >                                 if (!dma_fence_get_rcu(shared[i]))
> > @@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
> >                         }
> >                 }
> >  
> > -               if (i != shared_count) {
> > +               if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
> >                         while (i--)
> >                                 dma_fence_put(shared[i]);
> >                         dma_fence_put(fence_excl);
> > @@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> >                                bool wait_all, bool intr,
> >                                unsigned long timeout)
> >  {
> > -       struct dma_resv_list *fobj;
> >         struct dma_fence *fence;
> > -       unsigned shared_count;
> > +       unsigned seq, shared_count;
> >         long ret = timeout ? timeout : 1;
> >         int i;
> >  
> >  retry:
> > +       shared_count = 0;
> > +       seq = read_seqcount_begin(&obj->seq);
> >         rcu_read_lock();
> >         i = -1;
> >  
> > -       dma_resv_fences(obj, &fence, &fobj, &shared_count);
> > +       fence = rcu_dereference(obj->fence_excl);
> >         if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
> >                 if (!dma_fence_get_rcu(fence))
> >                         goto unlock_retry;
> > @@ -503,6 +526,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> >         }
> >  
> >         if (wait_all) {
> > +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> > +
> > +               if (fobj)
> > +                       shared_count = fobj->shared_count;
> > +
> >                 for (i = 0; !fence && i < shared_count; ++i) {
> >                         struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
> >  
> > @@ -525,6 +553,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
> >  
> >         rcu_read_unlock();
> >         if (fence) {
> > +               if (read_seqcount_retry(&obj->seq, seq)) {
> > +                       dma_fence_put(fence);
> > +                       goto retry;
> > +               }
> > +
> >                 ret = dma_fence_wait_timeout(fence, intr, ret);
> >                 dma_fence_put(fence);
> >                 if (ret > 0 && wait_all && (i + 1 < shared_count))
> > @@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
> >   */
> >  bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> >  {
> > -       struct dma_resv_list *fobj;
> > -       struct dma_fence *fence_excl;
> > -       unsigned shared_count;
> > +       unsigned seq, shared_count;
> >         int ret;
> >  
> >         rcu_read_lock();
> >  retry:
> >         ret = true;
> > +       shared_count = 0;
> > +       seq = read_seqcount_begin(&obj->seq);
> >  
> > -       dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
> >         if (test_all) {
> >                 unsigned i;
> >  
> > +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
> > +
> > +               if (fobj)
> > +                       shared_count = fobj->shared_count;
> > +
> >                 for (i = 0; i < shared_count; ++i) {
> >                         struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
> >  
> > @@ -589,14 +626,24 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
> >                         else if (!ret)
> >                                 break;
> >                 }
> > -       }
> >  
> > -       if (!shared_count && fence_excl) {
> > -               ret = dma_resv_test_signaled_single(fence_excl);
> > -               if (ret < 0)
> > +               if (read_seqcount_retry(&obj->seq, seq))
> >                         goto retry;
> >         }
> >  
> > +       if (!shared_count) {
> > +               struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
> > +
> > +               if (fence_excl) {
> > +                       ret = dma_resv_test_signaled_single(fence_excl);
> > +                       if (ret < 0)
> > +                               goto retry;
> > +
> > +                       if (read_seqcount_retry(&obj->seq, seq))
> > +                               goto retry;
> > +               }
> > +       }
> > +
> >         rcu_read_unlock();
> >         return ret;
> >  }
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > index bc4ec6b20a87..76e3516484e7 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > @@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
> >         new->shared_max = old->shared_max;
> >         new->shared_count = k;
> >  
> > -       rcu_assign_pointer(resv->fence, new);
> > +       /* Install the new fence list, seqcount provides the barriers */
> > +       preempt_disable();
> > +       write_seqcount_begin(&resv->seq);
> > +       RCU_INIT_POINTER(resv->fence, new);
> > +       write_seqcount_end(&resv->seq);
> > +       preempt_enable();
> >  
> >         /* Drop the references to the removed fences or move them to ef_list */
> >         for (i = j, k = 0; i < old->shared_count; ++i) {
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > index a2aff1d8290e..3d4f5775a4ba 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
> > @@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
> >         struct drm_i915_gem_busy *args = data;
> >         struct drm_i915_gem_object *obj;
> >         struct dma_resv_list *list;
> > -       unsigned int i, shared_count;
> > -       struct dma_fence *excl;
> > +       unsigned int seq;
> >         int err;
> >  
> >         err = -ENOENT;
> > @@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
> >          * to report the overall busyness. This is what the wait-ioctl does.
> >          *
> >          */
> > -       dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
> > +retry:
> > +       seq = raw_read_seqcount(&obj->base.resv->seq);
> >  
> >         /* Translate the exclusive fence to the READ *and* WRITE engine */
> > -       args->busy = busy_check_writer(excl);
> > +       args->busy =
> > +               busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
> >  
> >         /* Translate shared fences to READ set of engines */
> > -       for (i = 0; i < shared_count; ++i) {
> > -               struct dma_fence *fence = rcu_dereference(list->shared[i]);
> > +       list = rcu_dereference(obj->base.resv->fence);
> > +       if (list) {
> > +               unsigned int shared_count = list->shared_count, i;
> >  
> > -               args->busy |= busy_check_reader(fence);
> > +               for (i = 0; i < shared_count; ++i) {
> > +                       struct dma_fence *fence =
> > +                               rcu_dereference(list->shared[i]);
> > +
> > +                       args->busy |= busy_check_reader(fence);
> > +               }
> >         }
> >  
> > +       if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
> > +               goto retry;
> > +
> >         err = 0;
> >  out:
> >         rcu_read_unlock();
> > diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> > index 38f2802afabb..ee50d10f052b 100644
> > --- a/include/linux/dma-resv.h
> > +++ b/include/linux/dma-resv.h
> > @@ -46,6 +46,8 @@
> >  #include <linux/rcupdate.h>
> >  
> >  extern struct ww_class reservation_ww_class;
> > +extern struct lock_class_key reservation_seqcount_class;
> > +extern const char reservation_seqcount_string[];
> >  
> >  /**
> >   * struct dma_resv_list - a list of shared fences
> > @@ -69,6 +71,7 @@ struct dma_resv_list {
> >   */
> >  struct dma_resv {
> >         struct ww_mutex lock;
> > +       seqcount_t seq;
> >  
> >         struct dma_fence __rcu *fence_excl;
> >         struct dma_resv_list __rcu *fence;
> > @@ -77,24 +80,6 @@ struct dma_resv {
> >  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
> >  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
> >  
> > -/**
> > - * dma_resv_get_excl - get the reservation object's
> > - * exclusive fence, with update-side lock held
> > - * @obj: the reservation object
> > - *
> > - * Returns the exclusive fence (if any).  Does NOT take a
> > - * reference. Writers must hold obj->lock, readers may only
> > - * hold a RCU read side lock.
> > - *
> > - * RETURNS
> > - * The exclusive fence or NULL
> > - */
> > -static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
> > -{
> > -       return rcu_dereference_protected(obj->fence_excl,
> > -                                        dma_resv_held(obj));
> > -}
> > -
> >  /**
> >   * dma_resv_get_list - get the reservation object's
> >   * shared fence list, with update-side lock held
> > @@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
> >                                          dma_resv_held(obj));
> >  }
> >  
> > -/**
> > - * dma_resv_fences - read consistent fence pointers
> > - * @obj: reservation object where we get the fences from
> > - * @excl: pointer for the exclusive fence
> > - * @list: pointer for the shared fence list
> > - *
> > - * Make sure we have a consisten exclusive fence and shared fence list.
> > - * Must be called with rcu read side lock held.
> > - */
> > -static inline void dma_resv_fences(struct dma_resv *obj,
> > -                                  struct dma_fence **excl,
> > -                                  struct dma_resv_list **list,
> > -                                  u32 *shared_count)
> > -{
> > -       do {
> > -               *excl = rcu_dereference(obj->fence_excl);
> > -               *list = rcu_dereference(obj->fence);
> > -               *shared_count = *list ? (*list)->shared_count : 0;
> > -               smp_rmb(); /* See dma_resv_add_excl_fence */
> > -       } while (rcu_access_pointer(obj->fence_excl) != *excl);
> > -}
> > -
> > -/**
> > - * dma_resv_get_excl_rcu - get the reservation object's
> > - * exclusive fence, without lock held.
> > - * @obj: the reservation object
> > - *
> > - * If there is an exclusive fence, this atomically increments it's
> > - * reference count and returns it.
> > - *
> > - * RETURNS
> > - * The exclusive fence or NULL if none
> > - */
> > -static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
> > -{
> > -       struct dma_fence *fence;
> > -
> > -       if (!rcu_access_pointer(obj->fence_excl))
> > -               return NULL;
> > -
> > -       rcu_read_lock();
> > -       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> > -       rcu_read_unlock();
> > -
> > -       return fence;
> > -}
> > -
> >  /**
> >   * dma_resv_lock - lock the reservation object
> >   * @obj: the reservation object
> > @@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
> >         ww_mutex_unlock(&obj->lock);
> >  }
> >  
> > +/**
> > + * dma_resv_get_excl - get the reservation object's
> > + * exclusive fence, with update-side lock held
> > + * @obj: the reservation object
> > + *
> > + * Returns the exclusive fence (if any).  Does NOT take a
> > + * reference. Writers must hold obj->lock, readers may only
> > + * hold a RCU read side lock.
> > + *
> > + * RETURNS
> > + * The exclusive fence or NULL
> > + */
> > +static inline struct dma_fence *
> > +dma_resv_get_excl(struct dma_resv *obj)
> > +{
> > +       return rcu_dereference_protected(obj->fence_excl,
> > +                                        dma_resv_held(obj));
> > +}
> > +
> > +/**
> > + * dma_resv_get_excl_rcu - get the reservation object's
> > + * exclusive fence, without lock held.
> > + * @obj: the reservation object
> > + *
> > + * If there is an exclusive fence, this atomically increments it's
> > + * reference count and returns it.
> > + *
> > + * RETURNS
> > + * The exclusive fence or NULL if none
> > + */
> > +static inline struct dma_fence *
> > +dma_resv_get_excl_rcu(struct dma_resv *obj)
> > +{
> > +       struct dma_fence *fence;
> > +
> > +       if (!rcu_access_pointer(obj->fence_excl))
> > +               return NULL;
> > +
> > +       rcu_read_lock();
> > +       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
> > +       rcu_read_unlock();
> > +
> > +       return fence;
> > +}
> > +
> >  void dma_resv_init(struct dma_resv *obj);
> >  void dma_resv_fini(struct dma_resv *obj);
> >  int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
> > -- 
> > 2.23.0.rc1
> > 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:26 ` Chris Wilson
  2019-08-14 20:07   ` Daniel Vetter
@ 2019-08-15  7:30   ` Koenig, Christian
  1 sibling, 0 replies; 8+ messages in thread
From: Koenig, Christian @ 2019-08-15  7:30 UTC (permalink / raw)
  To: Chris Wilson, dri-devel; +Cc: Daniel Vetter, intel-gfx

Am 14.08.19 um 20:26 schrieb Chris Wilson:
> Quoting Chris Wilson (2019-08-14 19:24:01)
>> This reverts
>> 67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
>> dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
>> 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
>> 5d344f58da76 ("dma-buf: nuke reservation_object seq number")
>>
>> The scenario that defeats simply grabbing a set of shared/exclusive
>> fences and using them blissfully under RCU is that any of those fences
>> may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
>> scenario, while keeping the rcu_read_lock we need to establish that no
>> fence was changed in the dma_resv after a read (or full) memory barrier.
>>
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Acked-by: Christian König <christian.koenig@amd.com>

>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> I said I needed to go lie down, that proves it.
>
> Cc: Christian König <christian.koenig@amd.com>
>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> ---
>>   drivers/dma-buf/dma-buf.c                     |  31 ++++-
>>   drivers/dma-buf/dma-resv.c                    | 109 ++++++++++++-----
>>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |   7 +-
>>   drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  24 ++--
>>   include/linux/dma-resv.h                      | 113 ++++++++----------
>>   5 files changed, 175 insertions(+), 109 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index b3400d6524ab..433d91d710e4 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>>          struct dma_resv_list *fobj;
>>          struct dma_fence *fence_excl;
>>          __poll_t events;
>> -       unsigned shared_count;
>> +       unsigned shared_count, seq;
>>   
>>          dmabuf = file->private_data;
>>          if (!dmabuf || !dmabuf->resv)
>> @@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>>          if (!events)
>>                  return 0;
>>   
>> +retry:
>> +       seq = read_seqcount_begin(&resv->seq);
>>          rcu_read_lock();
>> -       dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
>> +
>> +       fobj = rcu_dereference(resv->fence);
>> +       if (fobj)
>> +               shared_count = fobj->shared_count;
>> +       else
>> +               shared_count = 0;
>> +       fence_excl = rcu_dereference(resv->fence_excl);
>> +       if (read_seqcount_retry(&resv->seq, seq)) {
>> +               rcu_read_unlock();
>> +               goto retry;
>> +       }
>> +
>>          if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
>>                  struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
>>                  __poll_t pevents = EPOLLIN;
>> @@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>>          struct dma_resv *robj;
>>          struct dma_resv_list *fobj;
>>          struct dma_fence *fence;
>> +       unsigned seq;
>>          int count = 0, attach_count, shared_count, i;
>>          size_t size = 0;
>>   
>> @@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
>>                                  buf_obj->name ?: "");
>>   
>>                  robj = buf_obj->resv;
>> -               rcu_read_lock();
>> -               dma_resv_fences(robj, &fence, &fobj, &shared_count);
>> -               rcu_read_unlock();
>> +               while (true) {
>> +                       seq = read_seqcount_begin(&robj->seq);
>> +                       rcu_read_lock();
>> +                       fobj = rcu_dereference(robj->fence);
>> +                       shared_count = fobj ? fobj->shared_count : 0;
>> +                       fence = rcu_dereference(robj->fence_excl);
>> +                       if (!read_seqcount_retry(&robj->seq, seq))
>> +                               break;
>> +                       rcu_read_unlock();
>> +               }
>>   
>>                  if (fence)
>>                          seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
>> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
>> index f5142683c851..42a8f3f11681 100644
>> --- a/drivers/dma-buf/dma-resv.c
>> +++ b/drivers/dma-buf/dma-resv.c
>> @@ -49,6 +49,12 @@
>>   DEFINE_WD_CLASS(reservation_ww_class);
>>   EXPORT_SYMBOL(reservation_ww_class);
>>   
>> +struct lock_class_key reservation_seqcount_class;
>> +EXPORT_SYMBOL(reservation_seqcount_class);
>> +
>> +const char reservation_seqcount_string[] = "reservation_seqcount";
>> +EXPORT_SYMBOL(reservation_seqcount_string);
>> +
>>   /**
>>    * dma_resv_list_alloc - allocate fence list
>>    * @shared_max: number of fences we need space for
>> @@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
>>   void dma_resv_init(struct dma_resv *obj)
>>   {
>>          ww_mutex_init(&obj->lock, &reservation_ww_class);
>> +
>> +       __seqcount_init(&obj->seq, reservation_seqcount_string,
>> +                       &reservation_seqcount_class);
>>          RCU_INIT_POINTER(obj->fence, NULL);
>>          RCU_INIT_POINTER(obj->fence_excl, NULL);
>>   }
>> @@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>>          fobj = dma_resv_get_list(obj);
>>          count = fobj->shared_count;
>>   
>> +       preempt_disable();
>> +       write_seqcount_begin(&obj->seq);
>> +
>>          for (i = 0; i < count; ++i) {
>>   
>>                  old = rcu_dereference_protected(fobj->shared[i],
>> @@ -242,6 +254,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
>>          RCU_INIT_POINTER(fobj->shared[i], fence);
>>          /* pointer update must be visible before we extend the shared_count */
>>          smp_store_mb(fobj->shared_count, count);
>> +
>> +       write_seqcount_end(&obj->seq);
>> +       preempt_enable();
>>          dma_fence_put(old);
>>   }
>>   EXPORT_SYMBOL(dma_resv_add_shared_fence);
>> @@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
>>                  dma_fence_get(fence);
>>   
>>          preempt_disable();
>> -       rcu_assign_pointer(obj->fence_excl, fence);
>> -       /* pointer update must be visible before we modify the shared_count */
>> +       write_seqcount_begin(&obj->seq);
>> +       /* write_seqcount_begin provides the necessary memory barrier */
>> +       RCU_INIT_POINTER(obj->fence_excl, fence);
>>          if (old)
>> -               smp_store_mb(old->shared_count, 0);
>> +               old->shared_count = 0;
>> +       write_seqcount_end(&obj->seq);
>>          preempt_enable();
>>   
>>          /* inplace update, no shared fences */
>> @@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   {
>>          struct dma_resv_list *src_list, *dst_list;
>>          struct dma_fence *old, *new;
>> -       unsigned int i, shared_count;
>> +       unsigned i;
>>   
>>          dma_resv_assert_held(dst);
>>   
>>          rcu_read_lock();
>> +       src_list = rcu_dereference(src->fence);
>>   
>>   retry:
>> -       dma_resv_fences(src, &new, &src_list, &shared_count);
>> -       if (shared_count) {
>> +       if (src_list) {
>> +               unsigned shared_count = src_list->shared_count;
>> +
>>                  rcu_read_unlock();
>>   
>>                  dst_list = dma_resv_list_alloc(shared_count);
>> @@ -311,14 +330,14 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>                          return -ENOMEM;
>>   
>>                  rcu_read_lock();
>> -               dma_resv_fences(src, &new, &src_list, &shared_count);
>> -               if (!src_list || shared_count > dst_list->shared_max) {
>> +               src_list = rcu_dereference(src->fence);
>> +               if (!src_list || src_list->shared_count > shared_count) {
>>                          kfree(dst_list);
>>                          goto retry;
>>                  }
>>   
>>                  dst_list->shared_count = 0;
>> -               for (i = 0; i < shared_count; ++i) {
>> +               for (i = 0; i < src_list->shared_count; ++i) {
>>                          struct dma_fence *fence;
>>   
>>                          fence = rcu_dereference(src_list->shared[i]);
>> @@ -328,6 +347,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>   
>>                          if (!dma_fence_get_rcu(fence)) {
>>                                  dma_resv_list_free(dst_list);
>> +                               src_list = rcu_dereference(src->fence);
>>                                  goto retry;
>>                          }
>>   
>> @@ -342,18 +362,18 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
>>                  dst_list = NULL;
>>          }
>>   
>> -       if (new && !dma_fence_get_rcu(new)) {
>> -               dma_resv_list_free(dst_list);
>> -               goto retry;
>> -       }
>> +       new = dma_fence_get_rcu_safe(&src->fence_excl);
>>          rcu_read_unlock();
>>   
>>          src_list = dma_resv_get_list(dst);
>>          old = dma_resv_get_excl(dst);
>>   
>>          preempt_disable();
>> -       rcu_assign_pointer(dst->fence_excl, new);
>> -       rcu_assign_pointer(dst->fence, dst_list);
>> +       write_seqcount_begin(&dst->seq);
>> +       /* write_seqcount_begin provides the necessary memory barrier */
>> +       RCU_INIT_POINTER(dst->fence_excl, new);
>> +       RCU_INIT_POINTER(dst->fence, dst_list);
>> +       write_seqcount_end(&dst->seq);
>>          preempt_enable();
>>   
>>          dma_resv_list_free(src_list);
>> @@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>   
>>          do {
>>                  struct dma_resv_list *fobj;
>> -               unsigned int i;
>> +               unsigned int i, seq;
>>                  size_t sz = 0;
>>   
>> -               i = 0;
>> +               shared_count = i = 0;
>>   
>>                  rcu_read_lock();
>> -               dma_resv_fences(obj, &fence_excl, &fobj,
>> -                                         &shared_count);
>> +               seq = read_seqcount_begin(&obj->seq);
>>   
>> +               fence_excl = rcu_dereference(obj->fence_excl);
>>                  if (fence_excl && !dma_fence_get_rcu(fence_excl))
>>                          goto unlock;
>>   
>> +               fobj = rcu_dereference(obj->fence);
>>                  if (fobj)
>>                          sz += sizeof(*shared) * fobj->shared_max;
>>   
>> @@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>                                  break;
>>                          }
>>                          shared = nshared;
>> +                       shared_count = fobj ? fobj->shared_count : 0;
>>                          for (i = 0; i < shared_count; ++i) {
>>                                  shared[i] = rcu_dereference(fobj->shared[i]);
>>                                  if (!dma_fence_get_rcu(shared[i]))
>> @@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
>>                          }
>>                  }
>>   
>> -               if (i != shared_count) {
>> +               if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
>>                          while (i--)
>>                                  dma_fence_put(shared[i]);
>>                          dma_fence_put(fence_excl);
>> @@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>                                 bool wait_all, bool intr,
>>                                 unsigned long timeout)
>>   {
>> -       struct dma_resv_list *fobj;
>>          struct dma_fence *fence;
>> -       unsigned shared_count;
>> +       unsigned seq, shared_count;
>>          long ret = timeout ? timeout : 1;
>>          int i;
>>   
>>   retry:
>> +       shared_count = 0;
>> +       seq = read_seqcount_begin(&obj->seq);
>>          rcu_read_lock();
>>          i = -1;
>>   
>> -       dma_resv_fences(obj, &fence, &fobj, &shared_count);
>> +       fence = rcu_dereference(obj->fence_excl);
>>          if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
>>                  if (!dma_fence_get_rcu(fence))
>>                          goto unlock_retry;
>> @@ -503,6 +526,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>          }
>>   
>>          if (wait_all) {
>> +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
>> +
>> +               if (fobj)
>> +                       shared_count = fobj->shared_count;
>> +
>>                  for (i = 0; !fence && i < shared_count; ++i) {
>>                          struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
>>   
>> @@ -525,6 +553,11 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
>>   
>>          rcu_read_unlock();
>>          if (fence) {
>> +               if (read_seqcount_retry(&obj->seq, seq)) {
>> +                       dma_fence_put(fence);
>> +                       goto retry;
>> +               }
>> +
>>                  ret = dma_fence_wait_timeout(fence, intr, ret);
>>                  dma_fence_put(fence);
>>                  if (ret > 0 && wait_all && (i + 1 < shared_count))
>> @@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
>>    */
>>   bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>   {
>> -       struct dma_resv_list *fobj;
>> -       struct dma_fence *fence_excl;
>> -       unsigned shared_count;
>> +       unsigned seq, shared_count;
>>          int ret;
>>   
>>          rcu_read_lock();
>>   retry:
>>          ret = true;
>> +       shared_count = 0;
>> +       seq = read_seqcount_begin(&obj->seq);
>>   
>> -       dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
>>          if (test_all) {
>>                  unsigned i;
>>   
>> +               struct dma_resv_list *fobj = rcu_dereference(obj->fence);
>> +
>> +               if (fobj)
>> +                       shared_count = fobj->shared_count;
>> +
>>                  for (i = 0; i < shared_count; ++i) {
>>                          struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
>>   
>> @@ -589,14 +626,24 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
>>                          else if (!ret)
>>                                  break;
>>                  }
>> -       }
>>   
>> -       if (!shared_count && fence_excl) {
>> -               ret = dma_resv_test_signaled_single(fence_excl);
>> -               if (ret < 0)
>> +               if (read_seqcount_retry(&obj->seq, seq))
>>                          goto retry;
>>          }
>>   
>> +       if (!shared_count) {
>> +               struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
>> +
>> +               if (fence_excl) {
>> +                       ret = dma_resv_test_signaled_single(fence_excl);
>> +                       if (ret < 0)
>> +                               goto retry;
>> +
>> +                       if (read_seqcount_retry(&obj->seq, seq))
>> +                               goto retry;
>> +               }
>> +       }
>> +
>>          rcu_read_unlock();
>>          return ret;
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index bc4ec6b20a87..76e3516484e7 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
>>          new->shared_max = old->shared_max;
>>          new->shared_count = k;
>>   
>> -       rcu_assign_pointer(resv->fence, new);
>> +       /* Install the new fence list, seqcount provides the barriers */
>> +       preempt_disable();
>> +       write_seqcount_begin(&resv->seq);
>> +       RCU_INIT_POINTER(resv->fence, new);
>> +       write_seqcount_end(&resv->seq);
>> +       preempt_enable();
>>   
>>          /* Drop the references to the removed fences or move them to ef_list */
>>          for (i = j, k = 0; i < old->shared_count; ++i) {
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> index a2aff1d8290e..3d4f5775a4ba 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
>> @@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>>          struct drm_i915_gem_busy *args = data;
>>          struct drm_i915_gem_object *obj;
>>          struct dma_resv_list *list;
>> -       unsigned int i, shared_count;
>> -       struct dma_fence *excl;
>> +       unsigned int seq;
>>          int err;
>>   
>>          err = -ENOENT;
>> @@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>>           * to report the overall busyness. This is what the wait-ioctl does.
>>           *
>>           */
>> -       dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
>> +retry:
>> +       seq = raw_read_seqcount(&obj->base.resv->seq);
>>   
>>          /* Translate the exclusive fence to the READ *and* WRITE engine */
>> -       args->busy = busy_check_writer(excl);
>> +       args->busy =
>> +               busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
>>   
>>          /* Translate shared fences to READ set of engines */
>> -       for (i = 0; i < shared_count; ++i) {
>> -               struct dma_fence *fence = rcu_dereference(list->shared[i]);
>> +       list = rcu_dereference(obj->base.resv->fence);
>> +       if (list) {
>> +               unsigned int shared_count = list->shared_count, i;
>>   
>> -               args->busy |= busy_check_reader(fence);
>> +               for (i = 0; i < shared_count; ++i) {
>> +                       struct dma_fence *fence =
>> +                               rcu_dereference(list->shared[i]);
>> +
>> +                       args->busy |= busy_check_reader(fence);
>> +               }
>>          }
>>   
>> +       if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
>> +               goto retry;
>> +
>>          err = 0;
>>   out:
>>          rcu_read_unlock();
>> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
>> index 38f2802afabb..ee50d10f052b 100644
>> --- a/include/linux/dma-resv.h
>> +++ b/include/linux/dma-resv.h
>> @@ -46,6 +46,8 @@
>>   #include <linux/rcupdate.h>
>>   
>>   extern struct ww_class reservation_ww_class;
>> +extern struct lock_class_key reservation_seqcount_class;
>> +extern const char reservation_seqcount_string[];
>>   
>>   /**
>>    * struct dma_resv_list - a list of shared fences
>> @@ -69,6 +71,7 @@ struct dma_resv_list {
>>    */
>>   struct dma_resv {
>>          struct ww_mutex lock;
>> +       seqcount_t seq;
>>   
>>          struct dma_fence __rcu *fence_excl;
>>          struct dma_resv_list __rcu *fence;
>> @@ -77,24 +80,6 @@ struct dma_resv {
>>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>>   
>> -/**
>> - * dma_resv_get_excl - get the reservation object's
>> - * exclusive fence, with update-side lock held
>> - * @obj: the reservation object
>> - *
>> - * Returns the exclusive fence (if any).  Does NOT take a
>> - * reference. Writers must hold obj->lock, readers may only
>> - * hold a RCU read side lock.
>> - *
>> - * RETURNS
>> - * The exclusive fence or NULL
>> - */
>> -static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
>> -{
>> -       return rcu_dereference_protected(obj->fence_excl,
>> -                                        dma_resv_held(obj));
>> -}
>> -
>>   /**
>>    * dma_resv_get_list - get the reservation object's
>>    * shared fence list, with update-side lock held
>> @@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
>>                                           dma_resv_held(obj));
>>   }
>>   
>> -/**
>> - * dma_resv_fences - read consistent fence pointers
>> - * @obj: reservation object where we get the fences from
>> - * @excl: pointer for the exclusive fence
>> - * @list: pointer for the shared fence list
>> - *
>> - * Make sure we have a consisten exclusive fence and shared fence list.
>> - * Must be called with rcu read side lock held.
>> - */
>> -static inline void dma_resv_fences(struct dma_resv *obj,
>> -                                  struct dma_fence **excl,
>> -                                  struct dma_resv_list **list,
>> -                                  u32 *shared_count)
>> -{
>> -       do {
>> -               *excl = rcu_dereference(obj->fence_excl);
>> -               *list = rcu_dereference(obj->fence);
>> -               *shared_count = *list ? (*list)->shared_count : 0;
>> -               smp_rmb(); /* See dma_resv_add_excl_fence */
>> -       } while (rcu_access_pointer(obj->fence_excl) != *excl);
>> -}
>> -
>> -/**
>> - * dma_resv_get_excl_rcu - get the reservation object's
>> - * exclusive fence, without lock held.
>> - * @obj: the reservation object
>> - *
>> - * If there is an exclusive fence, this atomically increments it's
>> - * reference count and returns it.
>> - *
>> - * RETURNS
>> - * The exclusive fence or NULL if none
>> - */
>> -static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
>> -{
>> -       struct dma_fence *fence;
>> -
>> -       if (!rcu_access_pointer(obj->fence_excl))
>> -               return NULL;
>> -
>> -       rcu_read_lock();
>> -       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
>> -       rcu_read_unlock();
>> -
>> -       return fence;
>> -}
>> -
>>   /**
>>    * dma_resv_lock - lock the reservation object
>>    * @obj: the reservation object
>> @@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
>>          ww_mutex_unlock(&obj->lock);
>>   }
>>   
>> +/**
>> + * dma_resv_get_excl - get the reservation object's
>> + * exclusive fence, with update-side lock held
>> + * @obj: the reservation object
>> + *
>> + * Returns the exclusive fence (if any).  Does NOT take a
>> + * reference. Writers must hold obj->lock, readers may only
>> + * hold a RCU read side lock.
>> + *
>> + * RETURNS
>> + * The exclusive fence or NULL
>> + */
>> +static inline struct dma_fence *
>> +dma_resv_get_excl(struct dma_resv *obj)
>> +{
>> +       return rcu_dereference_protected(obj->fence_excl,
>> +                                        dma_resv_held(obj));
>> +}
>> +
>> +/**
>> + * dma_resv_get_excl_rcu - get the reservation object's
>> + * exclusive fence, without lock held.
>> + * @obj: the reservation object
>> + *
>> + * If there is an exclusive fence, this atomically increments it's
>> + * reference count and returns it.
>> + *
>> + * RETURNS
>> + * The exclusive fence or NULL if none
>> + */
>> +static inline struct dma_fence *
>> +dma_resv_get_excl_rcu(struct dma_resv *obj)
>> +{
>> +       struct dma_fence *fence;
>> +
>> +       if (!rcu_access_pointer(obj->fence_excl))
>> +               return NULL;
>> +
>> +       rcu_read_lock();
>> +       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
>> +       rcu_read_unlock();
>> +
>> +       return fence;
>> +}
>> +
>>   void dma_resv_init(struct dma_resv *obj);
>>   void dma_resv_fini(struct dma_resv *obj);
>>   int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
>> -- 
>> 2.23.0.rc1
>>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 20:07   ` Daniel Vetter
@ 2019-08-15  9:36     ` Koenig, Christian
  0 siblings, 0 replies; 8+ messages in thread
From: Koenig, Christian @ 2019-08-15  9:36 UTC (permalink / raw)
  To: Daniel Vetter, Chris Wilson; +Cc: Daniel Vetter, intel-gfx, dri-devel

Am 14.08.19 um 22:07 schrieb Daniel Vetter:
> On Wed, Aug 14, 2019 at 07:26:44PM +0100, Chris Wilson wrote:
>> Quoting Chris Wilson (2019-08-14 19:24:01)
>>> This reverts
>>> 67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
>>> dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
>>> 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
>>> 5d344f58da76 ("dma-buf: nuke reservation_object seq number")
> Oh I didn't realize they landed already.
>
>>> The scenario that defeats simply grabbing a set of shared/exclusive
>>> fences and using them blissfully under RCU is that any of those fences
>>> may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
>>> scenario, while keeping the rcu_read_lock we need to establish that no
>>> fence was changed in the dma_resv after a read (or full) memory barrier.
> So if I'm reading correctly what Chris is saying here I guess my half
> comment in that other thread pointed at a real oversight. Since I still
> haven't checked and it's too late for real review not more for now.

Yeah, the root of the problem is that I didn't realized that fences 
could be reused while in the RCU grace period.

Need to go a step back and try to come up with a complete new approach 
for synchronization.

>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>> I said I needed to go lie down, that proves it.
> Yeah I guess we need to wait for Christian to wake up an have a working
> brain.

Well in that case you will need to wait for a few more years for my kids 
to enter school age :)

Cheers,
Christian.

> -Daniel
>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✓ Fi.CI.IGT: success for dma-buf: Restore seqlock around dma_resv updates
  2019-08-14 18:24 [PATCH] dma-buf: Restore seqlock around dma_resv updates Chris Wilson
                   ` (2 preceding siblings ...)
  2019-08-14 19:27 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-08-15 10:58 ` Patchwork
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-08-15 10:58 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: dma-buf: Restore seqlock around dma_resv updates
URL   : https://patchwork.freedesktop.org/series/65196/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6709_full -> Patchwork_14016_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_14016_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_eio@reset-stress:
    - shard-apl:          [PASS][1] -> [FAIL][2] ([fdo#109661])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl6/igt@gem_eio@reset-stress.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl1/igt@gem_eio@reset-stress.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276]) +19 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb2/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb5/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#111325]) +6 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb8/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [PASS][7] -> [DMESG-WARN][8] ([fdo#108566]) +5 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl5/igt@gem_workarounds@suspend-resume-context.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl2/igt@gem_workarounds@suspend-resume-context.html

  * igt@kms_flip@2x-flip-vs-expired-vblank-interruptible:
    - shard-glk:          [PASS][9] -> [FAIL][10] ([fdo#105363])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-glk6/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-glk1/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][11] -> [FAIL][12] ([fdo#105363])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-skl2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-skl8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite:
    - shard-iclb:         [PASS][13] -> [FAIL][14] ([fdo#103167]) +6 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [PASS][15] -> [FAIL][16] ([fdo#108145])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-skl4/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_plane_lowres@pipe-a-tiling-x:
    - shard-iclb:         [PASS][17] -> [FAIL][18] ([fdo#103166])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb7/igt@kms_plane_lowres@pipe-a-tiling-x.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb7/igt@kms_plane_lowres@pipe-a-tiling-x.html

  * igt@kms_psr@psr2_sprite_plane_move:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#109441]) +4 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb5/igt@kms_psr@psr2_sprite_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][21] -> [FAIL][22] ([fdo#99912])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl4/igt@kms_setmode@basic.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl6/igt@kms_setmode@basic.html

  * igt@perf@enable-disable:
    - shard-iclb:         [PASS][23] -> [INCOMPLETE][24] ([fdo#107713])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb4/igt@perf@enable-disable.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb7/igt@perf@enable-disable.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-apl:          [DMESG-WARN][25] ([fdo#108566]) -> [PASS][26] +5 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl7/igt@gem_ctx_isolation@rcs0-s3.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl1/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [SKIP][27] ([fdo#111325]) -> [PASS][28] +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb5/igt@gem_exec_schedule@in-order-bsd.html

  * igt@kms_cursor_legacy@cursor-vs-flip-varying-size:
    - shard-apl:          [INCOMPLETE][29] ([fdo#103927]) -> [PASS][30] +2 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl4/igt@kms_cursor_legacy@cursor-vs-flip-varying-size.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl7/igt@kms_cursor_legacy@cursor-vs-flip-varying-size.html

  * igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite:
    - shard-iclb:         [FAIL][31] ([fdo#103167]) -> [PASS][32] +3 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [FAIL][33] ([fdo#103166]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb7/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb7/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@psr2_cursor_mmap_gtt:
    - shard-iclb:         [SKIP][35] ([fdo#109441]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb8/igt@kms_psr@psr2_cursor_mmap_gtt.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_gtt.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [SKIP][37] ([fdo#109276]) -> [PASS][38] +17 similar issues
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb7/igt@prime_busy@hang-bsd2.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb1/igt@prime_busy@hang-bsd2.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         [SKIP][39] ([fdo#109276]) -> [FAIL][40] ([fdo#111330]) +1 similar issue
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb3/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb2/igt@gem_mocs_settings@mocs-reset-bsd2.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [DMESG-WARN][41] ([fdo#107724]) -> [SKIP][42] ([fdo#109349])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-iclb4/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  * igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-fullscreen:
    - shard-apl:          [INCOMPLETE][43] ([fdo#103927]) -> [SKIP][44] ([fdo#109271])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6709/shard-apl2/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-fullscreen.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/shard-apl5/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-fullscreen.html

  
  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109661]: https://bugs.freedesktop.org/show_bug.cgi?id=109661
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6709 -> Patchwork_14016

  CI-20190529: 20190529
  CI_DRM_6709: 4c9976607118e10dfc9f9feb3b9be2b3579631c9 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5134: 81df2f22385bc275975cf199d962eed9bc10f916 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14016: cffdefd474fc9ecf39ce403c3733e03d5326c0be @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14016/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-08-15 10:58 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-14 18:24 [PATCH] dma-buf: Restore seqlock around dma_resv updates Chris Wilson
2019-08-14 18:26 ` Chris Wilson
2019-08-14 20:07   ` Daniel Vetter
2019-08-15  9:36     ` Koenig, Christian
2019-08-15  7:30   ` Koenig, Christian
2019-08-14 19:06 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2019-08-14 19:27 ` ✓ Fi.CI.BAT: success " Patchwork
2019-08-15 10:58 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.