dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 01/10] dma-buf: add new dma_fence_chain container v4
@ 2018-12-07 15:54 Chunming Zhou
  2018-12-07 15:54 ` [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2 Chunming Zhou
                   ` (4 more replies)
  0 siblings, 5 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Lockless container implementation similar to a dma_fence_array, but with
only two elements per node and automatic garbage collection.

v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno,
    drop prev reference during garbage collection if it's not a chain fence.
v3: use head and iterator for dma_fence_chain_for_each
v4: fix reference count in dma_fence_chain_enable_signaling

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/Makefile          |   3 +-
 drivers/dma-buf/dma-fence-chain.c | 241 ++++++++++++++++++++++++++++++
 include/linux/dma-fence-chain.h   |  81 ++++++++++
 3 files changed, 324 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/dma-fence-chain.c
 create mode 100644 include/linux/dma-fence-chain.h

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 0913a6ccab5a..1f006e083eb9 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,4 +1,5 @@
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+	 reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 000000000000..0c5e3c902fa0
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,241 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+	struct dma_fence *prev;
+
+	rcu_read_lock();
+	prev = dma_fence_get_rcu_safe(&chain->prev);
+	rcu_read_unlock();
+	return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain, *prev_chain;
+	struct dma_fence *prev, *replacement, *tmp;
+
+	chain = to_dma_fence_chain(fence);
+	if (!chain) {
+		dma_fence_put(fence);
+		return NULL;
+	}
+
+	while ((prev = dma_fence_chain_get_prev(chain))) {
+
+		prev_chain = to_dma_fence_chain(prev);
+		if (prev_chain) {
+			if (!dma_fence_is_signaled(prev_chain->fence))
+				break;
+
+			replacement = dma_fence_chain_get_prev(prev_chain);
+		} else {
+			if (!dma_fence_is_signaled(prev))
+				break;
+
+			replacement = NULL;
+		}
+
+		tmp = cmpxchg(&chain->prev, prev, replacement);
+		if (tmp == prev)
+			dma_fence_put(tmp);
+		else
+			dma_fence_put(replacement);
+		dma_fence_put(prev);
+	}
+
+	dma_fence_put(fence);
+	return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+	struct dma_fence_chain *chain;
+
+	if (!seqno)
+		return 0;
+
+	chain = to_dma_fence_chain(*pfence);
+	if (!chain || chain->base.seqno < seqno)
+		return -EINVAL;
+
+	dma_fence_chain_for_each(*pfence, &chain->base) {
+		if ((*pfence)->context != chain->base.context ||
+		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+			break;
+	}
+	dma_fence_put(&chain->base);
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+        return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+        return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(work, typeof(*chain), work);
+
+	/* Try to rearm the callback */
+	if (!dma_fence_chain_enable_signaling(&chain->base))
+		/* Ok, we are done. No more unsignaled fences left */
+		dma_fence_signal(&chain->base);
+	dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(cb, typeof(*chain), cb);
+	irq_work_queue(&chain->work);
+	dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+	dma_fence_get(&head->base);
+	dma_fence_chain_for_each(fence, &head->base) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		dma_fence_get(f);
+		if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+			dma_fence_put(fence);
+			return true;
+		}
+		dma_fence_put(f);
+	}
+	dma_fence_put(&head->base);
+	return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+	dma_fence_chain_for_each(fence, fence) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		if (!dma_fence_is_signaled(f)) {
+			dma_fence_put(fence);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+	dma_fence_put(chain->prev);
+	dma_fence_put(chain->fence);
+	dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+	.get_driver_name = dma_fence_chain_get_driver_name,
+	.get_timeline_name = dma_fence_chain_get_timeline_name,
+	.enable_signaling = dma_fence_chain_enable_signaling,
+	.signaled = dma_fence_chain_signaled,
+	.release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+			  struct dma_fence *prev,
+			  struct dma_fence *fence,
+			  uint64_t seqno)
+{
+	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+	uint64_t context;
+
+	spin_lock_init(&chain->lock);
+	chain->prev = prev;
+	chain->fence = fence;
+	chain->prev_seqno = 0;
+	init_irq_work(&chain->work, dma_fence_chain_irq_work);
+
+	/* Try to reuse the context of the previous chain node. */
+	if (prev_chain && __dma_fence_is_later(seqno, prev->seqno)) {
+		context = prev->context;
+		chain->prev_seqno = prev->seqno;
+	} else {
+		context = dma_fence_context_alloc(1);
+		/* Make sure that we always have a valid sequence number. */
+		if (prev_chain)
+			seqno = max(prev->seqno, seqno);
+	}
+
+	dma_fence_init(&chain->base, &dma_fence_chain_ops,
+		       &chain->lock, context, seqno);
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
new file mode 100644
index 000000000000..a5c2e8c6915c
--- /dev/null
+++ b/include/linux/dma-fence-chain.h
@@ -0,0 +1,81 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_CHAIN_H
+#define __LINUX_DMA_FENCE_CHAIN_H
+
+#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
+
+/**
+ * struct dma_fence_chain - fence to represent an node of a fence chain
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @prev: previous fence of the chain
+ * @prev_seqno: original previous seqno before garbage collection
+ * @fence: encapsulated fence
+ * @cb: callback structure for signaling
+ * @work: irq work item for signaling
+ */
+struct dma_fence_chain {
+	struct dma_fence base;
+	spinlock_t lock;
+	struct dma_fence *prev;
+	u64 prev_seqno;
+	struct dma_fence *fence;
+	struct dma_fence_cb cb;
+	struct irq_work work;
+};
+
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * to_dma_fence_chain - cast a fence to a dma_fence_chain
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_chain,
+ * or the dma_fence_chain otherwise.
+ */
+static inline struct dma_fence_chain *
+to_dma_fence_chain(struct dma_fence *fence)
+{
+	if (!fence || fence->ops != &dma_fence_chain_ops)
+		return NULL;
+
+	return container_of(fence, struct dma_fence_chain, base);
+}
+
+/**
+ * dma_fence_chain_for_each - iterate over all fences in chain
+ * @iter: current fence
+ * @head: starting point
+ *
+ * Iterate over all fences in the chain. We keep a reference to the current
+ * fence while inside the loop which must be dropped when breaking out.
+ */
+#define dma_fence_chain_for_each(iter, head)	\
+	for (iter = dma_fence_get(head); iter; \
+	     iter = dma_fence_chain_walk(head))
+
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+			  struct dma_fence *prev,
+			  struct dma_fence *fence,
+			  uint64_t seqno);
+
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 02/10] drm/syncobj: remove drm_syncobj_cb and cleanup
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-07 15:54   ` Chunming Zhou
  2018-12-07 15:54   ` [PATCH 04/10] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

This completes "drm/syncobj: Drop add/remove_callback from driver
interface" and cleans up the implementation a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_syncobj.c | 91 ++++++++++++-----------------------
 include/drm/drm_syncobj.h     | 21 --------
 2 files changed, 30 insertions(+), 82 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index db30a0e89db8..e19525af0cce 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,6 +56,16 @@
 #include "drm_internal.h"
 #include <drm/drm_syncobj.h>
 
+struct syncobj_wait_entry {
+	struct list_head node;
+	struct task_struct *task;
+	struct dma_fence *fence;
+	struct dma_fence_cb fence_cb;
+};
+
+static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
+				      struct syncobj_wait_entry *wait);
+
 /**
  * drm_syncobj_find - lookup and reference a sync object.
  * @file_private: drm file private pointer
@@ -82,58 +92,33 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
 }
 EXPORT_SYMBOL(drm_syncobj_find);
 
-static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
-					    struct drm_syncobj_cb *cb,
-					    drm_syncobj_func_t func)
+static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
+				       struct syncobj_wait_entry *wait)
 {
-	cb->func = func;
-	list_add_tail(&cb->node, &syncobj->cb_list);
-}
-
-static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
-						 struct dma_fence **fence,
-						 struct drm_syncobj_cb *cb,
-						 drm_syncobj_func_t func)
-{
-	int ret;
-
-	*fence = drm_syncobj_fence_get(syncobj);
-	if (*fence)
-		return 1;
+	if (wait->fence)
+		return;
 
 	spin_lock(&syncobj->lock);
 	/* We've already tried once to get a fence and failed.  Now that we
 	 * have the lock, try one more time just to be sure we don't add a
 	 * callback when a fence has already been set.
 	 */
-	if (syncobj->fence) {
-		*fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
-								 lockdep_is_held(&syncobj->lock)));
-		ret = 1;
-	} else {
-		*fence = NULL;
-		drm_syncobj_add_callback_locked(syncobj, cb, func);
-		ret = 0;
-	}
+	if (syncobj->fence)
+		wait->fence = dma_fence_get(
+			rcu_dereference_protected(syncobj->fence, 1));
+	else
+		list_add_tail(&wait->node, &syncobj->cb_list);
 	spin_unlock(&syncobj->lock);
-
-	return ret;
 }
 
-void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
-			      struct drm_syncobj_cb *cb,
-			      drm_syncobj_func_t func)
+static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
+				    struct syncobj_wait_entry *wait)
 {
-	spin_lock(&syncobj->lock);
-	drm_syncobj_add_callback_locked(syncobj, cb, func);
-	spin_unlock(&syncobj->lock);
-}
+	if (!wait->node.next)
+		return;
 
-void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
-				 struct drm_syncobj_cb *cb)
-{
 	spin_lock(&syncobj->lock);
-	list_del_init(&cb->node);
+	list_del_init(&wait->node);
 	spin_unlock(&syncobj->lock);
 }
 
@@ -148,7 +133,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 			       struct dma_fence *fence)
 {
 	struct dma_fence *old_fence;
-	struct drm_syncobj_cb *cur, *tmp;
+	struct syncobj_wait_entry *cur, *tmp;
 
 	if (fence)
 		dma_fence_get(fence);
@@ -162,7 +147,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 	if (fence != old_fence) {
 		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
 			list_del_init(&cur->node);
-			cur->func(syncobj, cur);
+			syncobj_wait_syncobj_func(syncobj, cur);
 		}
 	}
 
@@ -608,13 +593,6 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 					&args->handle);
 }
 
-struct syncobj_wait_entry {
-	struct task_struct *task;
-	struct dma_fence *fence;
-	struct dma_fence_cb fence_cb;
-	struct drm_syncobj_cb syncobj_cb;
-};
-
 static void syncobj_wait_fence_func(struct dma_fence *fence,
 				    struct dma_fence_cb *cb)
 {
@@ -625,11 +603,8 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
 }
 
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
-				      struct drm_syncobj_cb *cb)
+				      struct syncobj_wait_entry *wait)
 {
-	struct syncobj_wait_entry *wait =
-		container_of(cb, struct syncobj_wait_entry, syncobj_cb);
-
 	/* This happens inside the syncobj lock */
 	wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
 							      lockdep_is_held(&syncobj->lock)));
@@ -688,12 +663,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 	 */
 
 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
-		for (i = 0; i < count; ++i) {
-			drm_syncobj_fence_get_or_add_callback(syncobjs[i],
-							      &entries[i].fence,
-							      &entries[i].syncobj_cb,
-							      syncobj_wait_syncobj_func);
-		}
+		for (i = 0; i < count; ++i)
+			drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
 	}
 
 	do {
@@ -742,9 +713,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
 cleanup_entries:
 	for (i = 0; i < count; ++i) {
-		if (entries[i].syncobj_cb.func)
-			drm_syncobj_remove_callback(syncobjs[i],
-						    &entries[i].syncobj_cb);
+		drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
 		if (entries[i].fence_cb.func)
 			dma_fence_remove_callback(entries[i].fence,
 						  &entries[i].fence_cb);
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index b1fe921f8e8f..7c6ed845c70d 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -28,8 +28,6 @@
 
 #include "linux/dma-fence.h"
 
-struct drm_syncobj_cb;
-
 /**
  * struct drm_syncobj - sync object.
  *
@@ -62,25 +60,6 @@ struct drm_syncobj {
 	struct file *file;
 };
 
-typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
-				   struct drm_syncobj_cb *cb);
-
-/**
- * struct drm_syncobj_cb - callback for drm_syncobj_add_callback
- * @node: used by drm_syncob_add_callback to append this struct to
- *	  &drm_syncobj.cb_list
- * @func: drm_syncobj_func_t to call
- *
- * This struct will be initialized by drm_syncobj_add_callback, additional
- * data can be passed along by embedding drm_syncobj_cb in another struct.
- * The callback will get called the next time drm_syncobj_replace_fence is
- * called.
- */
-struct drm_syncobj_cb {
-	struct list_head node;
-	drm_syncobj_func_t func;
-};
-
 void drm_syncobj_free(struct kref *kref);
 
 /**
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
  2018-12-07 15:54 [PATCH 01/10] dma-buf: add new dma_fence_chain container v4 Chunming Zhou
@ 2018-12-07 15:54 ` Chunming Zhou
       [not found]   ` <20181207155422.15967-3-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2018-12-07 15:54 ` [PATCH 05/10] drm/syncobj: add timeline payload query ioctl v4 Chunming Zhou
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig, dri-devel, amd-gfx, intel-gfx
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Use the dma_fence_chain object to create a timeline of fence objects
instead of just replacing the existing fence.

v2: rebase and cleanup

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_syncobj.c | 37 +++++++++++++++++++++++++++++++++++
 include/drm/drm_syncobj.h     |  5 +++++
 2 files changed, 42 insertions(+)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e19525af0cce..51f798e2194f 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
 	spin_unlock(&syncobj->lock);
 }
 
+/**
+ * drm_syncobj_add_point - add new timeline point to the syncobj
+ * @syncobj: sync object to add timeline point do
+ * @chain: chain node to use to add the point
+ * @fence: fence to encapsulate in the chain node
+ * @point: sequence number to use for the point
+ *
+ * Add the chain node as new timeline point to the syncobj.
+ */
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+			   struct dma_fence_chain *chain,
+			   struct dma_fence *fence,
+			   uint64_t point)
+{
+	struct syncobj_wait_entry *cur, *tmp;
+	struct dma_fence *prev;
+
+	dma_fence_get(fence);
+
+	spin_lock(&syncobj->lock);
+
+	prev = rcu_dereference_protected(syncobj->fence,
+					 lockdep_is_held(&syncobj->lock));
+	dma_fence_chain_init(chain, prev, fence, point);
+	rcu_assign_pointer(syncobj->fence, &chain->base);
+
+	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+		list_del_init(&cur->node);
+		syncobj_wait_syncobj_func(syncobj, cur);
+	}
+	spin_unlock(&syncobj->lock);
+
+	/* Walk the chain once to trigger garbage collection */
+	dma_fence_chain_for_each(prev, fence);
+}
+EXPORT_SYMBOL(drm_syncobj_add_point);
+
 /**
  * drm_syncobj_replace_fence - replace fence in a sync object.
  * @syncobj: Sync object to replace fence in
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 7c6ed845c70d..8acb4ae4f311 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -27,6 +27,7 @@
 #define __DRM_SYNCOBJ_H__
 
 #include "linux/dma-fence.h"
+#include "linux/dma-fence-chain.h"
 
 /**
  * struct drm_syncobj - sync object.
@@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
 
 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
 				     u32 handle);
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+			   struct dma_fence_chain *chain,
+			   struct dma_fence *fence,
+			   uint64_t point);
 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 			       struct dma_fence *fence);
 int drm_syncobj_find_fence(struct drm_file *file_private,
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 04/10] drm/syncobj: add support for timeline point wait v8
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2018-12-07 15:54   ` [PATCH 02/10] drm/syncobj: remove drm_syncobj_cb and cleanup Chunming Zhou
@ 2018-12-07 15:54   ` Chunming Zhou
  2018-12-07 15:54   ` [PATCH 08/10] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou, Chris Wilson, Daniel Rakos, Jason Ekstrand,
	Bas Nieuwenhuizen, Dave Airlie, Christian König

points array is one-to-one match with syncobjs array.
v2:
add seperate ioctl for timeline point wait, otherwise break uapi.
v3:
userspace can specify two kinds waits::
a. Wait for time point to be completed.
b. and wait for time point to become available
v4:
rebase
v5:
add comment for xxx_WAIT_AVAILABLE
v6: rebase and rework on new container
v7: drop _WAIT_COMPLETED, it is the default anyway
v8: correctly handle garbage collected fences

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: Daniel Rakos <Daniel.Rakos@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/drm_internal.h |   2 +
 drivers/gpu/drm/drm_ioctl.c    |   2 +
 drivers/gpu/drm/drm_syncobj.c  | 153 ++++++++++++++++++++++++++-------
 include/uapi/drm/drm.h         |  15 ++++
 4 files changed, 143 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index c7a7d7ce5d1c..18b41e10195c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -178,6 +178,8 @@ int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
 int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_private);
+int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_private);
 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 94bd872d56c4..a9a17ed35cc4 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -675,6 +675,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 51f798e2194f..348079bb0965 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -61,6 +61,7 @@ struct syncobj_wait_entry {
 	struct task_struct *task;
 	struct dma_fence *fence;
 	struct dma_fence_cb fence_cb;
+	u64    point;
 };
 
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@ -95,6 +96,8 @@ EXPORT_SYMBOL(drm_syncobj_find);
 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
 				       struct syncobj_wait_entry *wait)
 {
+	struct dma_fence *fence;
+
 	if (wait->fence)
 		return;
 
@@ -103,11 +106,15 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
 	 * have the lock, try one more time just to be sure we don't add a
 	 * callback when a fence has already been set.
 	 */
-	if (syncobj->fence)
-		wait->fence = dma_fence_get(
-			rcu_dereference_protected(syncobj->fence, 1));
-	else
+	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+		dma_fence_put(fence);
 		list_add_tail(&wait->node, &syncobj->cb_list);
+	} else if (!fence) {
+		wait->fence = dma_fence_get_stub();
+	} else {
+		wait->fence = fence;
+	}
 	spin_unlock(&syncobj->lock);
 }
 
@@ -148,10 +155,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
 	dma_fence_chain_init(chain, prev, fence, point);
 	rcu_assign_pointer(syncobj->fence, &chain->base);
 
-	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
-		list_del_init(&cur->node);
+	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
 		syncobj_wait_syncobj_func(syncobj, cur);
-	}
 	spin_unlock(&syncobj->lock);
 
 	/* Walk the chain once to trigger garbage collection */
@@ -182,10 +187,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 	rcu_assign_pointer(syncobj->fence, fence);
 
 	if (fence != old_fence) {
-		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
-			list_del_init(&cur->node);
+		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
 			syncobj_wait_syncobj_func(syncobj, cur);
-		}
 	}
 
 	spin_unlock(&syncobj->lock);
@@ -642,13 +645,27 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
 				      struct syncobj_wait_entry *wait)
 {
+	struct dma_fence *fence;
+
 	/* This happens inside the syncobj lock */
-	wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
-							      lockdep_is_held(&syncobj->lock)));
+	fence = rcu_dereference_protected(syncobj->fence,
+					  lockdep_is_held(&syncobj->lock));
+	dma_fence_get(fence);
+	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+		dma_fence_put(fence);
+		return;
+	} else if (!fence) {
+		wait->fence = dma_fence_get_stub();
+	} else {
+		wait->fence = fence;
+	}
+
 	wake_up_process(wait->task);
+	list_del_init(&wait->node);
 }
 
 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+						  void __user *user_points,
 						  uint32_t count,
 						  uint32_t flags,
 						  signed long timeout,
@@ -656,12 +673,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 {
 	struct syncobj_wait_entry *entries;
 	struct dma_fence *fence;
+	uint64_t *points;
 	uint32_t signaled_count, i;
 
-	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
-	if (!entries)
+	points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+	if (points == NULL)
 		return -ENOMEM;
 
+	if (!user_points) {
+		memset(points, 0, count * sizeof(uint64_t));
+
+	} else if (copy_from_user(points, user_points,
+				  sizeof(uint64_t) * count)) {
+		timeout = -EFAULT;
+		goto err_free_points;
+	}
+
+	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+	if (!entries) {
+		timeout = -ENOMEM;
+		goto err_free_points;
+	}
 	/* Walk the list of sync objects and initialize entries.  We do
 	 * this up-front so that we can properly return -EINVAL if there is
 	 * a syncobj with a missing fence and then never have the chance of
@@ -669,9 +701,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 	 */
 	signaled_count = 0;
 	for (i = 0; i < count; ++i) {
+		struct dma_fence *fence;
+
 		entries[i].task = current;
-		entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
-		if (!entries[i].fence) {
+		entries[i].point = points[i];
+		fence = drm_syncobj_fence_get(syncobjs[i]);
+		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+			dma_fence_put(fence);
 			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
 				continue;
 			} else {
@@ -680,7 +716,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 			}
 		}
 
-		if (dma_fence_is_signaled(entries[i].fence)) {
+		if (fence)
+			entries[i].fence = fence;
+		else
+			entries[i].fence = dma_fence_get_stub();
+
+		if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+		    dma_fence_is_signaled(entries[i].fence)) {
 			if (signaled_count == 0 && idx)
 				*idx = i;
 			signaled_count++;
@@ -713,7 +755,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 			if (!fence)
 				continue;
 
-			if (dma_fence_is_signaled(fence) ||
+			if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+			    dma_fence_is_signaled(fence) ||
 			    (!entries[i].fence_cb.func &&
 			     dma_fence_add_callback(fence,
 						    &entries[i].fence_cb,
@@ -758,6 +801,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 	}
 	kfree(entries);
 
+err_free_points:
+	kfree(points);
+
 	return timeout;
 }
 
@@ -796,19 +842,33 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
 static int drm_syncobj_array_wait(struct drm_device *dev,
 				  struct drm_file *file_private,
 				  struct drm_syncobj_wait *wait,
-				  struct drm_syncobj **syncobjs)
+				  struct drm_syncobj_timeline_wait *timeline_wait,
+				  struct drm_syncobj **syncobjs, bool timeline)
 {
-	signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+	signed long timeout = 0;
 	uint32_t first = ~0;
 
-	timeout = drm_syncobj_array_wait_timeout(syncobjs,
-						 wait->count_handles,
-						 wait->flags,
-						 timeout, &first);
-	if (timeout < 0)
-		return timeout;
-
-	wait->first_signaled = first;
+	if (!timeline) {
+		timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+		timeout = drm_syncobj_array_wait_timeout(syncobjs,
+							 NULL,
+							 wait->count_handles,
+							 wait->flags,
+							 timeout, &first);
+		if (timeout < 0)
+			return timeout;
+		wait->first_signaled = first;
+	} else {
+		timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+		timeout = drm_syncobj_array_wait_timeout(syncobjs,
+							 u64_to_user_ptr(timeline_wait->points),
+							 timeline_wait->count_handles,
+							 timeline_wait->flags,
+							 timeout, &first);
+		if (timeout < 0)
+			return timeout;
+		timeline_wait->first_signaled = first;
+	}
 	return 0;
 }
 
@@ -894,13 +954,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	ret = drm_syncobj_array_wait(dev, file_private,
-				     args, syncobjs);
+				     args, NULL, syncobjs, false);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
 	return ret;
 }
 
+int
+drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_wait *args = data;
+	struct drm_syncobj **syncobjs;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_syncobj_array_wait(dev, file_private,
+				     NULL, args, syncobjs, true);
+
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
+
+
 int
 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_private)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 300f336633f2..0092111d002c 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -737,6 +737,7 @@ struct drm_syncobj_handle {
 
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2)
 struct drm_syncobj_wait {
 	__u64 handles;
 	/* absolute timeout */
@@ -747,6 +748,19 @@ struct drm_syncobj_wait {
 	__u32 pad;
 };
 
+struct drm_syncobj_timeline_wait {
+	__u64 handles;
+	/* wait on specific timeline point for every handles*/
+	__u64 points;
+	/* absolute timeout */
+	__s64 timeout_nsec;
+	__u32 count_handles;
+	__u32 flags;
+	__u32 first_signaled; /* only valid when not waiting all */
+	__u32 pad;
+};
+
+
 struct drm_syncobj_array {
 	__u64 handles;
 	__u32 count_handles;
@@ -909,6 +923,7 @@ extern "C" {
 #define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
 
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 05/10] drm/syncobj: add timeline payload query ioctl v4
  2018-12-07 15:54 [PATCH 01/10] dma-buf: add new dma_fence_chain container v4 Chunming Zhou
  2018-12-07 15:54 ` [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2 Chunming Zhou
@ 2018-12-07 15:54 ` Chunming Zhou
  2018-12-07 15:54 ` [PATCH 06/10] drm/syncobj: use the timeline point in drm_syncobj_find_fence v3 Chunming Zhou
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig, dri-devel, amd-gfx, intel-gfx
  Cc: Chunming Zhou, Daniel Rakos, Bas Nieuwenhuizen, Dave Airlie,
	Christian König

user mode can query timeline payload.
v2: check return value of copy_to_user
v3: handle querying entry by entry
v4: rebase on new chain container, simplify interface

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Daniel Rakos <Daniel.Rakos@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/drm_internal.h |  2 ++
 drivers/gpu/drm/drm_ioctl.c    |  2 ++
 drivers/gpu/drm/drm_syncobj.c  | 43 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         | 10 ++++++++
 4 files changed, 57 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 18b41e10195c..dab4d5936441 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -184,6 +184,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_private);
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_private);
 
 /* drm_framebuffer.c */
 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index a9a17ed35cc4..7578ef6dc1d1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -681,6 +681,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 348079bb0965..f97fa00ca1d0 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1061,3 +1061,46 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 
 	return ret;
 }
+
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_array *args = data;
+	struct drm_syncobj **syncobjs;
+	uint64_t __user *points = u64_to_user_ptr(args->points);
+	uint32_t i;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < args->count_handles; i++) {
+		struct dma_fence_chain *chain;
+		struct dma_fence *fence;
+		uint64_t point;
+
+		fence = drm_syncobj_fence_get(syncobjs[i]);
+		chain = to_dma_fence_chain(fence);
+		point = chain ? fence->seqno : 0;
+		ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+		ret = ret ? -EFAULT : 0;
+		if (ret)
+			break;
+	}
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 0092111d002c..b2c36f2b2599 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -767,6 +767,14 @@ struct drm_syncobj_array {
 	__u32 pad;
 };
 
+struct drm_syncobj_timeline_array {
+	__u64 handles;
+	__u64 points;
+	__u32 count_handles;
+	__u32 pad;
+};
+
+
 /* Query current scanout sequence number */
 struct drm_crtc_get_sequence {
 	__u32 crtc_id;		/* requested crtc_id */
@@ -924,6 +932,8 @@ extern "C" {
 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
 
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 06/10] drm/syncobj: use the timeline point in drm_syncobj_find_fence v3
  2018-12-07 15:54 [PATCH 01/10] dma-buf: add new dma_fence_chain container v4 Chunming Zhou
  2018-12-07 15:54 ` [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2 Chunming Zhou
  2018-12-07 15:54 ` [PATCH 05/10] drm/syncobj: add timeline payload query ioctl v4 Chunming Zhou
@ 2018-12-07 15:54 ` Chunming Zhou
  2018-12-07 15:54 ` [PATCH 07/10] drm/amdgpu: add timeline support in amdgpu CS v2 Chunming Zhou
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig, dri-devel, amd-gfx, intel-gfx
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Implement finding the right timeline point in drm_syncobj_find_fence.

v2: return -EINVAL when the point is not submitted yet.
v3: fix reference counting bug, add flags handling as well

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/drm_syncobj.c | 43 ++++++++++++++++++++++++++++++++---
 1 file changed, 40 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f97fa00ca1d0..282982e58dbd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -231,16 +231,53 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 			   struct dma_fence **fence)
 {
 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
-	int ret = 0;
+	struct syncobj_wait_entry wait;
+	int ret;
 
 	if (!syncobj)
 		return -ENOENT;
 
 	*fence = drm_syncobj_fence_get(syncobj);
-	if (!*fence) {
+	drm_syncobj_put(syncobj);
+
+	if (*fence) {
+		ret = dma_fence_chain_find_seqno(fence, point);
+		if (!ret)
+			return 0;
+		dma_fence_put(*fence);
+	} else {
 		ret = -EINVAL;
 	}
-	drm_syncobj_put(syncobj);
+
+	if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+		return ret;
+
+	memset(&wait, 0, sizeof(wait));
+	wait.task = current;
+	wait.point = point;
+	drm_syncobj_fence_add_wait(syncobj, &wait);
+
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (wait.fence) {
+			ret = 0;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		schedule();
+	} while (1);
+
+	__set_current_state(TASK_RUNNING);
+	*fence = wait.fence;
+
+	if (wait.node.next)
+		drm_syncobj_remove_wait(syncobj, &wait);
+
 	return ret;
 }
 EXPORT_SYMBOL(drm_syncobj_find_fence);
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 07/10] drm/amdgpu: add timeline support in amdgpu CS v2
  2018-12-07 15:54 [PATCH 01/10] dma-buf: add new dma_fence_chain container v4 Chunming Zhou
                   ` (2 preceding siblings ...)
  2018-12-07 15:54 ` [PATCH 06/10] drm/syncobj: use the timeline point in drm_syncobj_find_fence v3 Chunming Zhou
@ 2018-12-07 15:54 ` Chunming Zhou
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig, dri-devel, amd-gfx, intel-gfx
  Cc: Chunming Zhou, Daniel Rakos, Bas Nieuwenhuizen, Dave Airlie,
	Christian König

syncobj wait/signal operation is appending in command submission.
v2: separate to two kinds in/out_deps functions

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Daniel Rakos <Daniel.Rakos@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  10 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 147 +++++++++++++++++++++----
 include/uapi/drm/amdgpu_drm.h          |   8 ++
 3 files changed, 140 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 42f882c633ee..f9160ea1396a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -545,6 +545,12 @@ struct amdgpu_cs_chunk {
 	void			*kdata;
 };
 
+struct amdgpu_cs_post_dep {
+	struct drm_syncobj *syncobj;
+	struct dma_fence_chain *chain;
+	u64 point;
+};
+
 struct amdgpu_cs_parser {
 	struct amdgpu_device	*adev;
 	struct drm_file		*filp;
@@ -574,8 +580,8 @@ struct amdgpu_cs_parser {
 	/* user fence */
 	struct amdgpu_bo_list_entry	uf_entry;
 
-	unsigned num_post_dep_syncobjs;
-	struct drm_syncobj **post_dep_syncobjs;
+	unsigned			num_post_deps;
+	struct amdgpu_cs_post_dep	*post_deps;
 };
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc54e9efd910..580f1ea27157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -213,6 +213,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
 		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 			break;
 
 		default:
@@ -792,9 +794,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 
-	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
-		drm_syncobj_put(parser->post_dep_syncobjs[i]);
-	kfree(parser->post_dep_syncobjs);
+	for (i = 0; i < parser->num_post_deps; i++) {
+		drm_syncobj_put(parser->post_deps[i].syncobj);
+		kfree(parser->post_deps[i].chain);
+	}
+	kfree(parser->post_deps);
 
 	dma_fence_put(parser->fence);
 
@@ -1100,13 +1104,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 }
 
 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
-						 uint32_t handle)
+						 uint32_t handle, u64 point,
+						 u64 flags)
 {
-	int r;
 	struct dma_fence *fence;
-	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
-	if (r)
+	int r;
+
+	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+	if (r) {
+		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+			  handle, point, r);
 		return r;
+	}
 
 	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
 	dma_fence_put(fence);
@@ -1117,46 +1126,115 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
 					    struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i, r;
-	struct drm_amdgpu_cs_chunk_sem *deps;
 
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
+	for (i = 0; i < num_deps; ++i) {
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+							  0, 0);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
 
+
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+						     struct amdgpu_cs_chunk *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i, r;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 	for (i = 0; i < num_deps; ++i) {
-		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+							  syncobj_deps[i].handle,
+							  syncobj_deps[i].point,
+							  syncobj_deps[i].flags);
 		if (r)
 			return r;
 	}
+
 	return 0;
 }
 
 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
 					     struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i;
-	struct drm_amdgpu_cs_chunk_sem *deps;
+
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
 
-	p->post_dep_syncobjs = kmalloc_array(num_deps,
-					     sizeof(struct drm_syncobj *),
-					     GFP_KERNEL);
-	p->num_post_dep_syncobjs = 0;
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
+
+	if (!p->post_deps)
+		return -ENOMEM;
+
+
+	for (i = 0; i < num_deps; ++i) {
+		p->post_deps[i].syncobj =
+			drm_syncobj_find(p->filp, deps[i].handle);
+		if (!p->post_deps[i].syncobj)
+			return -EINVAL;
+		p->post_deps[i].chain = NULL;
+		p->post_deps[i].point = 0;
+		p->num_post_deps++;
+	}
+
+	return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+						      struct amdgpu_cs_chunk
+						      *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
 
-	if (!p->post_dep_syncobjs)
+	if (!p->post_deps)
 		return -ENOMEM;
 
 	for (i = 0; i < num_deps; ++i) {
-		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
-		if (!p->post_dep_syncobjs[i])
+		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+		dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+		if (!dep->chain)
+			return -ENOMEM;
+
+		dep->syncobj = drm_syncobj_find(p->filp,
+						syncobj_deps[i].handle);
+		if (!dep->syncobj) {
+			kfree(dep->chain);
 			return -EINVAL;
-		p->num_post_dep_syncobjs++;
+		}
+		dep->point = syncobj_deps[i].point;
+		p->num_post_deps++;
 	}
+
 	return 0;
 }
 
@@ -1170,18 +1248,32 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 
 		chunk = &p->chunks[i];
 
-		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
+		switch (chunk->chunk_id) {
+		case AMDGPU_CHUNK_ID_DEPENDENCIES:
 			r = amdgpu_cs_process_fence_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
 			if (r)
 				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+			if (r)
+				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+			if (r)
+				return r;
+			break;
 		}
 	}
 
@@ -1192,8 +1284,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 {
 	int i;
 
-	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
-		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+	for (i = 0; i < p->num_post_deps; ++i) {
+		if (p->post_deps[i].chain) {
+			drm_syncobj_add_point(p->post_deps[i].syncobj,
+					      p->post_deps[i].chain,
+					      p->fence, p->post_deps[i].point);
+			p->post_deps[i].chain = NULL;
+		} else {
+			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+						  p->fence);
+		}
+	}
 }
 
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index be84e43c1e19..997222bc1afe 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -523,6 +523,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_SYNCOBJ_IN      0x04
 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT     0x05
 #define AMDGPU_CHUNK_ID_BO_HANDLES      0x06
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT    0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL  0x08
 
 struct drm_amdgpu_cs_chunk {
 	__u32		chunk_id;
@@ -598,6 +600,12 @@ struct drm_amdgpu_cs_chunk_sem {
 	__u32 handle;
 };
 
+struct drm_amdgpu_cs_chunk_syncobj {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+};
+
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 08/10] drm/syncobj: add transition iotcls between binary and timeline v2
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2018-12-07 15:54   ` [PATCH 02/10] drm/syncobj: remove drm_syncobj_cb and cleanup Chunming Zhou
  2018-12-07 15:54   ` [PATCH 04/10] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
@ 2018-12-07 15:54   ` Chunming Zhou
  2018-12-07 15:54   ` [PATCH 09/10] drm/syncobj: add timeline signal ioctl for syncobj v2 Chunming Zhou
  2018-12-07 15:54   ` [PATCH 10/10] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou

we need to import/export timeline point.

v2: unify to one transfer ioctl

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
---
 drivers/gpu/drm/drm_internal.h |  2 +
 drivers/gpu/drm/drm_ioctl.c    |  2 +
 drivers/gpu/drm/drm_syncobj.c  | 79 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         | 10 +++++
 4 files changed, 93 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index dab4d5936441..06c2adc4950e 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -176,6 +176,8 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
 int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
+int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_private);
 int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_private);
 int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7578ef6dc1d1..e9d4bed12783 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -673,6 +673,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 282982e58dbd..82f0ab96813e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -670,6 +670,85 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 					&args->handle);
 }
 
+static int drm_syncobj_binary_to_timeline(struct drm_file *file_private,
+					  struct drm_syncobj_transfer *args)
+{
+	struct drm_syncobj *timeline_syncobj = NULL;
+	struct dma_fence *fence;
+	struct dma_fence_chain *chain;
+	int ret;
+
+	timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+	if (!timeline_syncobj) {
+		return -ENOENT;
+	}
+	ret = drm_syncobj_find_fence(file_private, args->src_handle, 0, 0,
+				     &fence);
+	if (ret)
+		goto err;
+	chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+	if (!chain) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+err1:
+	dma_fence_put(fence);
+err:
+	drm_syncobj_put(timeline_syncobj);
+
+	return ret;
+}
+
+static int
+drm_syncobj_timeline_to_binary(struct drm_file *file_private,
+			       struct drm_syncobj_transfer *args)
+{
+	struct drm_syncobj *binary_syncobj = NULL;
+	struct dma_fence *fence;
+	int ret;
+
+	binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+	if (!binary_syncobj)
+		return -ENOENT;
+	ret = drm_syncobj_find_fence(file_private, args->src_handle,
+				     args->src_point, args->flags, &fence);
+	if (ret)
+		goto err;
+	drm_syncobj_replace_fence(binary_syncobj, fence);
+	dma_fence_put(fence);
+err:
+	drm_syncobj_put(binary_syncobj);
+
+	return ret;
+}
+int
+drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_private)
+{
+	struct drm_syncobj_transfer *args = data;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->pad)
+		return -EINVAL;
+
+	if (args->src_point && args->dst_point)
+		return -EINVAL;
+
+	if (!args->src_point && !args->dst_point)
+		return -EINVAL;
+
+	if (!args->src_point)
+		ret = drm_syncobj_binary_to_timeline(file_private, args);
+	else
+		ret = drm_syncobj_timeline_to_binary(file_private, args);
+
+	return ret;
+}
+
 static void syncobj_wait_fence_func(struct dma_fence *fence,
 				    struct dma_fence_cb *cb)
 {
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b2c36f2b2599..4c1e2e6579fa 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -735,6 +735,15 @@ struct drm_syncobj_handle {
 	__u32 pad;
 };
 
+struct drm_syncobj_transfer {
+	__u32 src_handle;
+	__u32 dst_handle;
+	__u64 src_point;
+	__u64 dst_point;
+	__u32 flags;
+	__u32 pad;
+};
+
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2)
@@ -933,6 +942,7 @@ extern "C" {
 
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
 
 /**
  * Device specific ioctls should only be in their respective headers
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 09/10] drm/syncobj: add timeline signal ioctl for syncobj v2
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2018-12-07 15:54   ` [PATCH 08/10] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
@ 2018-12-07 15:54   ` Chunming Zhou
  2018-12-07 15:54   ` [PATCH 10/10] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou

v2: individually allocate chain array, since chain node is free independently.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
---
 drivers/gpu/drm/drm_internal.h |  2 +
 drivers/gpu/drm/drm_ioctl.c    |  2 +
 drivers/gpu/drm/drm_syncobj.c  | 81 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         |  1 +
 4 files changed, 86 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 06c2adc4950e..d7a19bd89cb2 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -186,6 +186,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_private);
+int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_private);
 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index e9d4bed12783..a50dc62dc87b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -683,6 +683,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 82f0ab96813e..84a52da5ed7b 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1178,6 +1178,87 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 	return ret;
 }
 
+int
+drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_array *args = data;
+	struct drm_syncobj **syncobjs;
+	struct dma_fence_chain **chains;
+	uint64_t *points;
+	uint32_t i, j, timeline_count = 0;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -EOPNOTSUPP;
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	points = kmalloc_array(args->count_handles, sizeof(*points),
+			       GFP_KERNEL);
+	if (!points) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	if (!u64_to_user_ptr(args->points)) {
+		memset(points, 0, args->count_handles * sizeof(uint64_t));
+	} else if (copy_from_user(points, u64_to_user_ptr(args->points),
+				  sizeof(uint64_t) * args->count_handles)) {
+		ret = -EFAULT;
+		goto err_points;
+	}
+
+
+	for (i = 0; i < args->count_handles; i++) {
+		if (points[i])
+			timeline_count++;
+	}
+	chains = kmalloc_array(timeline_count, sizeof(void *), GFP_KERNEL);
+	if (!chains) {
+		ret = -ENOMEM;
+		goto err_points;
+	}
+	for (i = 0; i < timeline_count; i++) {
+		chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+		if (!chains[i]) {
+			for (j = 0; j < i; j++)
+				kfree(chains[j]);
+			ret = -ENOMEM;
+			goto err_chains;
+		}
+	}
+
+	for (i = 0, j = 0; i < args->count_handles; i++) {
+		if (points[i]) {
+			struct dma_fence *fence = dma_fence_get_stub();
+
+			drm_syncobj_add_point(syncobjs[i], chains[j++],
+					      fence, points[i]);
+			dma_fence_put(fence);
+		} else
+			drm_syncobj_assign_null_handle(syncobjs[i]);
+	}
+err_chains:
+	kfree(chains);
+err_points:
+	kfree(points);
+out:
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
+
 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private)
 {
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 4c1e2e6579fa..fe00b74268eb 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -943,6 +943,7 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
 
 /**
  * Device specific ioctls should only be in their respective headers
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 10/10] drm/amdgpu: update version for timeline syncobj support in amdgpu
       [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (3 preceding siblings ...)
  2018-12-07 15:54   ` [PATCH 09/10] drm/syncobj: add timeline signal ioctl for syncobj v2 Chunming Zhou
@ 2018-12-07 15:54   ` Chunming Zhou
  4 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-07 15:54 UTC (permalink / raw)
  To: Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Chunming Zhou

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..cafafdb1d03f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -71,9 +71,10 @@
  * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
  * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
  * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
+ * - 3.28.0 - Add syncobj timeline support to AMDGPU_CS.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	27
+#define KMS_DRIVER_MINOR	28
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
       [not found]   ` <20181207155422.15967-3-david1.zhou-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-12 10:49     ` Daniel Vetter
  2018-12-12 11:08       ` Koenig, Christian
  0 siblings, 1 reply; 18+ messages in thread
From: Daniel Vetter @ 2018-12-12 10:49 UTC (permalink / raw)
  To: Chunming Zhou
  Cc: Christian König, intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> 
> Use the dma_fence_chain object to create a timeline of fence objects
> instead of just replacing the existing fence.
> 
> v2: rebase and cleanup
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

Somewhat jumping back into this. Not sure we discussed this already or
not. I'm a bit unclear on why we have to chain the fences in the timeline:

- The timeline stuff is modelled after the WDDM2 monitored fences. Which
  really are just u64 counters in memory somewhere (I think could be
  system ram or vram). Because WDDM2 has the memory management entirely
  separated from rendering synchronization it totally allows userspace to
  create loops and deadlocks and everything else nasty using this - the
  memory manager won't deadlock because these monitored fences never leak
  into the buffer manager. And if CS deadlock, gpu reset takes care of the
  mess.

- This has a few consequences, as in they seem to indeed work like a
  memory location: Userspace incrementing out-of-order (because they run
  batches updating the same fence on different engines) is totally fine,
  as is doing anything else "stupid".

- Now on linux we can't allow anything, because we need to make sure that
  deadlocks don't leak into the memory manager. But as long as we block
  until the underlying dma_fence has materialized, nothing userspace can
  do will lead to such a deadlock. Even if userspace ends up submitting
  jobs without enough built-in synchronization, leading to out-of-order
  signalling of fences on that "timeline". And I don't think that would
  pose a problem for us.

Essentially I think we can look at timeline syncobj as a dma_fence
container indexed through an integer, and there's no need to enforce that
the timline works like a real dma_fence timeline, with all it's
guarantees. It's just a pile of (possibly, if userspace is stupid)
unrelated dma_fences. You could implement the entire thing in userspace
after all, except for the "we want to share these timeline objects between
processes" problem.

tldr; I think we can drop the dma_fence_chain complexity completely. Or at
least I'm not really understanding why it's needed.

Of course that means drivers cannot treat a drm_syncobj timeline as a
dma_fence timeline. But given the future fences stuff and all that, that's
already out of the window anyway.

What am I missing?
-Daniel

> ---
>  drivers/gpu/drm/drm_syncobj.c | 37 +++++++++++++++++++++++++++++++++++
>  include/drm/drm_syncobj.h     |  5 +++++
>  2 files changed, 42 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
> index e19525af0cce..51f798e2194f 100644
> --- a/drivers/gpu/drm/drm_syncobj.c
> +++ b/drivers/gpu/drm/drm_syncobj.c
> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
>  	spin_unlock(&syncobj->lock);
>  }
>  
> +/**
> + * drm_syncobj_add_point - add new timeline point to the syncobj
> + * @syncobj: sync object to add timeline point do
> + * @chain: chain node to use to add the point
> + * @fence: fence to encapsulate in the chain node
> + * @point: sequence number to use for the point
> + *
> + * Add the chain node as new timeline point to the syncobj.
> + */
> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> +			   struct dma_fence_chain *chain,
> +			   struct dma_fence *fence,
> +			   uint64_t point)
> +{
> +	struct syncobj_wait_entry *cur, *tmp;
> +	struct dma_fence *prev;
> +
> +	dma_fence_get(fence);
> +
> +	spin_lock(&syncobj->lock);
> +
> +	prev = rcu_dereference_protected(syncobj->fence,
> +					 lockdep_is_held(&syncobj->lock));
> +	dma_fence_chain_init(chain, prev, fence, point);
> +	rcu_assign_pointer(syncobj->fence, &chain->base);
> +
> +	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
> +		list_del_init(&cur->node);
> +		syncobj_wait_syncobj_func(syncobj, cur);
> +	}
> +	spin_unlock(&syncobj->lock);
> +
> +	/* Walk the chain once to trigger garbage collection */
> +	dma_fence_chain_for_each(prev, fence);
> +}
> +EXPORT_SYMBOL(drm_syncobj_add_point);
> +
>  /**
>   * drm_syncobj_replace_fence - replace fence in a sync object.
>   * @syncobj: Sync object to replace fence in
> diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
> index 7c6ed845c70d..8acb4ae4f311 100644
> --- a/include/drm/drm_syncobj.h
> +++ b/include/drm/drm_syncobj.h
> @@ -27,6 +27,7 @@
>  #define __DRM_SYNCOBJ_H__
>  
>  #include "linux/dma-fence.h"
> +#include "linux/dma-fence-chain.h"
>  
>  /**
>   * struct drm_syncobj - sync object.
> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
>  
>  struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
>  				     u32 handle);
> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> +			   struct dma_fence_chain *chain,
> +			   struct dma_fence *fence,
> +			   uint64_t point);
>  void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
>  			       struct dma_fence *fence);
>  int drm_syncobj_find_fence(struct drm_file *file_private,
> -- 
> 2.17.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
  2018-12-12 10:49     ` [Intel-gfx] " Daniel Vetter
@ 2018-12-12 11:08       ` Koenig, Christian
       [not found]         ` <12badb5a-f2c1-f819-c30a-f274d8a9401b-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Koenig, Christian @ 2018-12-12 11:08 UTC (permalink / raw)
  To: Daniel Vetter, Zhou, David(ChunMing)
  Cc: Christian König, intel-gfx, amd-gfx, dri-devel

Am 12.12.18 um 11:49 schrieb Daniel Vetter:
> On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
>> From: Christian König <ckoenig.leichtzumerken@gmail.com>
>>
>> Use the dma_fence_chain object to create a timeline of fence objects
>> instead of just replacing the existing fence.
>>
>> v2: rebase and cleanup
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> Somewhat jumping back into this. Not sure we discussed this already or
> not. I'm a bit unclear on why we have to chain the fences in the timeline:
>
> - The timeline stuff is modelled after the WDDM2 monitored fences. Which
>    really are just u64 counters in memory somewhere (I think could be
>    system ram or vram). Because WDDM2 has the memory management entirely
>    separated from rendering synchronization it totally allows userspace to
>    create loops and deadlocks and everything else nasty using this - the
>    memory manager won't deadlock because these monitored fences never leak
>    into the buffer manager. And if CS deadlock, gpu reset takes care of the
>    mess.
>
> - This has a few consequences, as in they seem to indeed work like a
>    memory location: Userspace incrementing out-of-order (because they run
>    batches updating the same fence on different engines) is totally fine,
>    as is doing anything else "stupid".
>
> - Now on linux we can't allow anything, because we need to make sure that
>    deadlocks don't leak into the memory manager. But as long as we block
>    until the underlying dma_fence has materialized, nothing userspace can
>    do will lead to such a deadlock. Even if userspace ends up submitting
>    jobs without enough built-in synchronization, leading to out-of-order
>    signalling of fences on that "timeline". And I don't think that would
>    pose a problem for us.
>
> Essentially I think we can look at timeline syncobj as a dma_fence
> container indexed through an integer, and there's no need to enforce that
> the timline works like a real dma_fence timeline, with all it's
> guarantees. It's just a pile of (possibly, if userspace is stupid)
> unrelated dma_fences. You could implement the entire thing in userspace
> after all, except for the "we want to share these timeline objects between
> processes" problem.
>
> tldr; I think we can drop the dma_fence_chain complexity completely. Or at
> least I'm not really understanding why it's needed.
>
> Of course that means drivers cannot treat a drm_syncobj timeline as a
> dma_fence timeline. But given the future fences stuff and all that, that's
> already out of the window anyway.
>
> What am I missing?

Good question, since that was exactly my initial idea as well.

Key point is that our Vulcan guys came back and said that this wouldn't 
be sufficient, but I honestly don't fully understand why.

Anyway that's why David came up with using the fence array to wait for 
all previously added fences, which I then later on extended into this 
chain container.

I have to admit that it is way more defensive implemented this way. E.g. 
there is much fewer things userspace can do wrong.

The principal idea is that when they mess things up they are always 
going to wait more than necessary, but never less.

Christian.

> -Daniel
>
>> ---
>>   drivers/gpu/drm/drm_syncobj.c | 37 +++++++++++++++++++++++++++++++++++
>>   include/drm/drm_syncobj.h     |  5 +++++
>>   2 files changed, 42 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
>> index e19525af0cce..51f798e2194f 100644
>> --- a/drivers/gpu/drm/drm_syncobj.c
>> +++ b/drivers/gpu/drm/drm_syncobj.c
>> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
>>   	spin_unlock(&syncobj->lock);
>>   }
>>   
>> +/**
>> + * drm_syncobj_add_point - add new timeline point to the syncobj
>> + * @syncobj: sync object to add timeline point do
>> + * @chain: chain node to use to add the point
>> + * @fence: fence to encapsulate in the chain node
>> + * @point: sequence number to use for the point
>> + *
>> + * Add the chain node as new timeline point to the syncobj.
>> + */
>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>> +			   struct dma_fence_chain *chain,
>> +			   struct dma_fence *fence,
>> +			   uint64_t point)
>> +{
>> +	struct syncobj_wait_entry *cur, *tmp;
>> +	struct dma_fence *prev;
>> +
>> +	dma_fence_get(fence);
>> +
>> +	spin_lock(&syncobj->lock);
>> +
>> +	prev = rcu_dereference_protected(syncobj->fence,
>> +					 lockdep_is_held(&syncobj->lock));
>> +	dma_fence_chain_init(chain, prev, fence, point);
>> +	rcu_assign_pointer(syncobj->fence, &chain->base);
>> +
>> +	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
>> +		list_del_init(&cur->node);
>> +		syncobj_wait_syncobj_func(syncobj, cur);
>> +	}
>> +	spin_unlock(&syncobj->lock);
>> +
>> +	/* Walk the chain once to trigger garbage collection */
>> +	dma_fence_chain_for_each(prev, fence);
>> +}
>> +EXPORT_SYMBOL(drm_syncobj_add_point);
>> +
>>   /**
>>    * drm_syncobj_replace_fence - replace fence in a sync object.
>>    * @syncobj: Sync object to replace fence in
>> diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
>> index 7c6ed845c70d..8acb4ae4f311 100644
>> --- a/include/drm/drm_syncobj.h
>> +++ b/include/drm/drm_syncobj.h
>> @@ -27,6 +27,7 @@
>>   #define __DRM_SYNCOBJ_H__
>>   
>>   #include "linux/dma-fence.h"
>> +#include "linux/dma-fence-chain.h"
>>   
>>   /**
>>    * struct drm_syncobj - sync object.
>> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
>>   
>>   struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
>>   				     u32 handle);
>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>> +			   struct dma_fence_chain *chain,
>> +			   struct dma_fence *fence,
>> +			   uint64_t point);
>>   void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
>>   			       struct dma_fence *fence);
>>   int drm_syncobj_find_fence(struct drm_file *file_private,
>> -- 
>> 2.17.1
>>
>> _______________________________________________
>> Intel-gfx mailing list
>> Intel-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
       [not found]         ` <12badb5a-f2c1-f819-c30a-f274d8a9401b-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-12 11:15           ` Daniel Vetter
       [not found]             ` <CAKMK7uEDuYmuYTbCr3fP-_bVKWehMMWn+SbJkEUAB_uWn6X1Gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Daniel Vetter @ 2018-12-12 11:15 UTC (permalink / raw)
  To: Christian König
  Cc: Chunming Zhou, intel-gfx, amd-gfx list, dri-devel, Christian König

On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
<Christian.Koenig@amd.com> wrote:
>
> Am 12.12.18 um 11:49 schrieb Daniel Vetter:
> > On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
> >> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> >>
> >> Use the dma_fence_chain object to create a timeline of fence objects
> >> instead of just replacing the existing fence.
> >>
> >> v2: rebase and cleanup
> >>
> >> Signed-off-by: Christian König <christian.koenig@amd.com>
> > Somewhat jumping back into this. Not sure we discussed this already or
> > not. I'm a bit unclear on why we have to chain the fences in the timeline:
> >
> > - The timeline stuff is modelled after the WDDM2 monitored fences. Which
> >    really are just u64 counters in memory somewhere (I think could be
> >    system ram or vram). Because WDDM2 has the memory management entirely
> >    separated from rendering synchronization it totally allows userspace to
> >    create loops and deadlocks and everything else nasty using this - the
> >    memory manager won't deadlock because these monitored fences never leak
> >    into the buffer manager. And if CS deadlock, gpu reset takes care of the
> >    mess.
> >
> > - This has a few consequences, as in they seem to indeed work like a
> >    memory location: Userspace incrementing out-of-order (because they run
> >    batches updating the same fence on different engines) is totally fine,
> >    as is doing anything else "stupid".
> >
> > - Now on linux we can't allow anything, because we need to make sure that
> >    deadlocks don't leak into the memory manager. But as long as we block
> >    until the underlying dma_fence has materialized, nothing userspace can
> >    do will lead to such a deadlock. Even if userspace ends up submitting
> >    jobs without enough built-in synchronization, leading to out-of-order
> >    signalling of fences on that "timeline". And I don't think that would
> >    pose a problem for us.
> >
> > Essentially I think we can look at timeline syncobj as a dma_fence
> > container indexed through an integer, and there's no need to enforce that
> > the timline works like a real dma_fence timeline, with all it's
> > guarantees. It's just a pile of (possibly, if userspace is stupid)
> > unrelated dma_fences. You could implement the entire thing in userspace
> > after all, except for the "we want to share these timeline objects between
> > processes" problem.
> >
> > tldr; I think we can drop the dma_fence_chain complexity completely. Or at
> > least I'm not really understanding why it's needed.
> >
> > Of course that means drivers cannot treat a drm_syncobj timeline as a
> > dma_fence timeline. But given the future fences stuff and all that, that's
> > already out of the window anyway.
> >
> > What am I missing?
>
> Good question, since that was exactly my initial idea as well.
>
> Key point is that our Vulcan guys came back and said that this wouldn't
> be sufficient, but I honestly don't fully understand why.

Hm, sounds like we really need those testscases (vk cts on top of
mesa, igt) so we can talk about the exact corner cases we care about
and why.

I guess one thing that might happen is that userspace leaves out a
number and never sets that fence, relying on the >= semantics of the
monitored fence to unblock that thread. E.g. when skipping a frame in
one of the auxiliary workloads. For that case we'd need to make sure
we don't just wait for the given fence to materialize, but also any
fences later in the timeline.

But we can't decide that without understanding the actual use-case
that needs to be supported at the other end of the stack, and how all
the bits in between should look like.

I guess we're back to "uapi design without userspace doesn't make sense" ...

> Anyway that's why David came up with using the fence array to wait for
> all previously added fences, which I then later on extended into this
> chain container.
>
> I have to admit that it is way more defensive implemented this way. E.g.
> there is much fewer things userspace can do wrong.
>
> The principal idea is that when they mess things up they are always
> going to wait more than necessary, but never less.

That seems against the spirit of vulkan, which is very much about "you
get all the pieces". It also might dig us a hole in the future, if we
ever get around to moving towards a WDDM2 style memory management
model. For future proving I think it would make sense if we implement
the minimal uapi we need for vk timelines, not the strictest
guarantees we can get away with (without performance impact) with
current drivers.
-Daniel


> Christian.
>
> > -Daniel
> >
> >> ---
> >>   drivers/gpu/drm/drm_syncobj.c | 37 +++++++++++++++++++++++++++++++++++
> >>   include/drm/drm_syncobj.h     |  5 +++++
> >>   2 files changed, 42 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
> >> index e19525af0cce..51f798e2194f 100644
> >> --- a/drivers/gpu/drm/drm_syncobj.c
> >> +++ b/drivers/gpu/drm/drm_syncobj.c
> >> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
> >>      spin_unlock(&syncobj->lock);
> >>   }
> >>
> >> +/**
> >> + * drm_syncobj_add_point - add new timeline point to the syncobj
> >> + * @syncobj: sync object to add timeline point do
> >> + * @chain: chain node to use to add the point
> >> + * @fence: fence to encapsulate in the chain node
> >> + * @point: sequence number to use for the point
> >> + *
> >> + * Add the chain node as new timeline point to the syncobj.
> >> + */
> >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> >> +                       struct dma_fence_chain *chain,
> >> +                       struct dma_fence *fence,
> >> +                       uint64_t point)
> >> +{
> >> +    struct syncobj_wait_entry *cur, *tmp;
> >> +    struct dma_fence *prev;
> >> +
> >> +    dma_fence_get(fence);
> >> +
> >> +    spin_lock(&syncobj->lock);
> >> +
> >> +    prev = rcu_dereference_protected(syncobj->fence,
> >> +                                     lockdep_is_held(&syncobj->lock));
> >> +    dma_fence_chain_init(chain, prev, fence, point);
> >> +    rcu_assign_pointer(syncobj->fence, &chain->base);
> >> +
> >> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
> >> +            list_del_init(&cur->node);
> >> +            syncobj_wait_syncobj_func(syncobj, cur);
> >> +    }
> >> +    spin_unlock(&syncobj->lock);
> >> +
> >> +    /* Walk the chain once to trigger garbage collection */
> >> +    dma_fence_chain_for_each(prev, fence);
> >> +}
> >> +EXPORT_SYMBOL(drm_syncobj_add_point);
> >> +
> >>   /**
> >>    * drm_syncobj_replace_fence - replace fence in a sync object.
> >>    * @syncobj: Sync object to replace fence in
> >> diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
> >> index 7c6ed845c70d..8acb4ae4f311 100644
> >> --- a/include/drm/drm_syncobj.h
> >> +++ b/include/drm/drm_syncobj.h
> >> @@ -27,6 +27,7 @@
> >>   #define __DRM_SYNCOBJ_H__
> >>
> >>   #include "linux/dma-fence.h"
> >> +#include "linux/dma-fence-chain.h"
> >>
> >>   /**
> >>    * struct drm_syncobj - sync object.
> >> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
> >>
> >>   struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
> >>                                   u32 handle);
> >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> >> +                       struct dma_fence_chain *chain,
> >> +                       struct dma_fence *fence,
> >> +                       uint64_t point);
> >>   void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
> >>                             struct dma_fence *fence);
> >>   int drm_syncobj_find_fence(struct drm_file *file_private,
> >> --
> >> 2.17.1
> >>
> >> _______________________________________________
> >> Intel-gfx mailing list
> >> Intel-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
>


-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
       [not found]             ` <CAKMK7uEDuYmuYTbCr3fP-_bVKWehMMWn+SbJkEUAB_uWn6X1Gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-12-12 11:39               ` Zhou, David(ChunMing)
       [not found]                 ` <BY1PR12MB0502201CA782F5F2DBB661B8B4A70-PicGAnIBOobrCwm+z9iKNgdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2018-12-12 12:24                 ` Daniel Vetter
  0 siblings, 2 replies; 18+ messages in thread
From: Zhou, David(ChunMing) @ 2018-12-12 11:39 UTC (permalink / raw)
  To: Daniel Vetter, Koenig, Christian, Rakos, Daniel, Jason Ekstrand
  Cc: Christian König, intel-gfx, amd-gfx list, dri-devel

+ Daniel Rakos and Jason Ekstrand.

 Below is the background, which is from Daniel R should  be able to explain that's why: 
" ISVs, especially those coming from D3D12, are unsatisfied with the behavior of the Vulkan semaphores as they are unhappy with the fact that for every single dependency they need to use separate semaphores due to their binary nature.
Compared to that a synchronization primitive like D3D12 monitored fences enable one of those to be used to track a sequence of operations by simply associating timeline values to the completion of individual operations. This allows them to track the lifetime and usage of resources and the ordered completion of sequences.
Besides that, they also want to use a single synchronization primitive to be able to handle GPU-to-GPU and GPU-to-CPU dependencies, compared to using semaphores for the former and fences for the latter.
In addition, compared to legacy semaphores, timeline semaphores are proposed to support wait-before-signal, i.e. allow enqueueing a semaphore wait operation with a wait value that is larger than any of the already enqueued signal values. This seems to be a hard requirement for ISVs. Without UMD-side queue batching, and even UMD-side queue batching doesn’t help the situation when such a semaphore is externally shared with another API. Thus in order to properly support wait-before-signal the KMD implementation has to also be able to support such dependencies.
"

Btw, we already add test case to igt, and tested by many existing test, like libdrm unit test, igt related test, vulkan cts, and steam games.

-David
> -----Original Message-----
> From: Daniel Vetter <daniel@ffwll.ch>
> Sent: Wednesday, December 12, 2018 7:15 PM
> To: Koenig, Christian <Christian.Koenig@amd.com>
> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; dri-devel <dri-
> devel@lists.freedesktop.org>; amd-gfx list <amd-gfx@lists.freedesktop.org>;
> intel-gfx <intel-gfx@lists.freedesktop.org>; Christian König
> <ckoenig.leichtzumerken@gmail.com>
> Subject: Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new
> drm_syncobj_add_point interface v2
> 
> On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
> <Christian.Koenig@amd.com> wrote:
> >
> > Am 12.12.18 um 11:49 schrieb Daniel Vetter:
> > > On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
> > >> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> > >>
> > >> Use the dma_fence_chain object to create a timeline of fence
> > >> objects instead of just replacing the existing fence.
> > >>
> > >> v2: rebase and cleanup
> > >>
> > >> Signed-off-by: Christian König <christian.koenig@amd.com>
> > > Somewhat jumping back into this. Not sure we discussed this already
> > > or not. I'm a bit unclear on why we have to chain the fences in the
> timeline:
> > >
> > > - The timeline stuff is modelled after the WDDM2 monitored fences.
> Which
> > >    really are just u64 counters in memory somewhere (I think could be
> > >    system ram or vram). Because WDDM2 has the memory management
> entirely
> > >    separated from rendering synchronization it totally allows userspace to
> > >    create loops and deadlocks and everything else nasty using this - the
> > >    memory manager won't deadlock because these monitored fences
> never leak
> > >    into the buffer manager. And if CS deadlock, gpu reset takes care of the
> > >    mess.
> > >
> > > - This has a few consequences, as in they seem to indeed work like a
> > >    memory location: Userspace incrementing out-of-order (because they
> run
> > >    batches updating the same fence on different engines) is totally fine,
> > >    as is doing anything else "stupid".
> > >
> > > - Now on linux we can't allow anything, because we need to make sure
> that
> > >    deadlocks don't leak into the memory manager. But as long as we block
> > >    until the underlying dma_fence has materialized, nothing userspace can
> > >    do will lead to such a deadlock. Even if userspace ends up submitting
> > >    jobs without enough built-in synchronization, leading to out-of-order
> > >    signalling of fences on that "timeline". And I don't think that would
> > >    pose a problem for us.
> > >
> > > Essentially I think we can look at timeline syncobj as a dma_fence
> > > container indexed through an integer, and there's no need to enforce
> > > that the timline works like a real dma_fence timeline, with all it's
> > > guarantees. It's just a pile of (possibly, if userspace is stupid)
> > > unrelated dma_fences. You could implement the entire thing in
> > > userspace after all, except for the "we want to share these timeline
> > > objects between processes" problem.
> > >
> > > tldr; I think we can drop the dma_fence_chain complexity completely.
> > > Or at least I'm not really understanding why it's needed.
> > >
> > > Of course that means drivers cannot treat a drm_syncobj timeline as
> > > a dma_fence timeline. But given the future fences stuff and all
> > > that, that's already out of the window anyway.
> > >
> > > What am I missing?
> >
> > Good question, since that was exactly my initial idea as well.
> >
> > Key point is that our Vulcan guys came back and said that this
> > wouldn't be sufficient, but I honestly don't fully understand why.
> 
> Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
> so we can talk about the exact corner cases we care about and why.
> 
> I guess one thing that might happen is that userspace leaves out a number
> and never sets that fence, relying on the >= semantics of the monitored
> fence to unblock that thread. E.g. when skipping a frame in one of the
> auxiliary workloads. For that case we'd need to make sure we don't just wait
> for the given fence to materialize, but also any fences later in the timeline.
> 
> But we can't decide that without understanding the actual use-case that
> needs to be supported at the other end of the stack, and how all the bits in
> between should look like.
> 
> I guess we're back to "uapi design without userspace doesn't make sense" ...
> 
> > Anyway that's why David came up with using the fence array to wait for
> > all previously added fences, which I then later on extended into this
> > chain container.
> >
> > I have to admit that it is way more defensive implemented this way. E.g.
> > there is much fewer things userspace can do wrong.
> >
> > The principal idea is that when they mess things up they are always
> > going to wait more than necessary, but never less.
> 
> That seems against the spirit of vulkan, which is very much about "you get all
> the pieces". It also might dig us a hole in the future, if we ever get around to
> moving towards a WDDM2 style memory management model. For future
> proving I think it would make sense if we implement the minimal uapi we
> need for vk timelines, not the strictest guarantees we can get away with
> (without performance impact) with current drivers.
> -Daniel
> 
> 
> > Christian.
> >
> > > -Daniel
> > >
> > >> ---
> > >>   drivers/gpu/drm/drm_syncobj.c | 37
> +++++++++++++++++++++++++++++++++++
> > >>   include/drm/drm_syncobj.h     |  5 +++++
> > >>   2 files changed, 42 insertions(+)
> > >>
> > >> diff --git a/drivers/gpu/drm/drm_syncobj.c
> > >> b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..51f798e2194f
> > >> 100644
> > >> --- a/drivers/gpu/drm/drm_syncobj.c
> > >> +++ b/drivers/gpu/drm/drm_syncobj.c
> > >> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct
> drm_syncobj *syncobj,
> > >>      spin_unlock(&syncobj->lock);
> > >>   }
> > >>
> > >> +/**
> > >> + * drm_syncobj_add_point - add new timeline point to the syncobj
> > >> + * @syncobj: sync object to add timeline point do
> > >> + * @chain: chain node to use to add the point
> > >> + * @fence: fence to encapsulate in the chain node
> > >> + * @point: sequence number to use for the point
> > >> + *
> > >> + * Add the chain node as new timeline point to the syncobj.
> > >> + */
> > >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> > >> +                       struct dma_fence_chain *chain,
> > >> +                       struct dma_fence *fence,
> > >> +                       uint64_t point) {
> > >> +    struct syncobj_wait_entry *cur, *tmp;
> > >> +    struct dma_fence *prev;
> > >> +
> > >> +    dma_fence_get(fence);
> > >> +
> > >> +    spin_lock(&syncobj->lock);
> > >> +
> > >> +    prev = rcu_dereference_protected(syncobj->fence,
> > >> +                                     lockdep_is_held(&syncobj->lock));
> > >> +    dma_fence_chain_init(chain, prev, fence, point);
> > >> +    rcu_assign_pointer(syncobj->fence, &chain->base);
> > >> +
> > >> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
> > >> +            list_del_init(&cur->node);
> > >> +            syncobj_wait_syncobj_func(syncobj, cur);
> > >> +    }
> > >> +    spin_unlock(&syncobj->lock);
> > >> +
> > >> +    /* Walk the chain once to trigger garbage collection */
> > >> +    dma_fence_chain_for_each(prev, fence); }
> > >> +EXPORT_SYMBOL(drm_syncobj_add_point);
> > >> +
> > >>   /**
> > >>    * drm_syncobj_replace_fence - replace fence in a sync object.
> > >>    * @syncobj: Sync object to replace fence in diff --git
> > >> a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index
> > >> 7c6ed845c70d..8acb4ae4f311 100644
> > >> --- a/include/drm/drm_syncobj.h
> > >> +++ b/include/drm/drm_syncobj.h
> > >> @@ -27,6 +27,7 @@
> > >>   #define __DRM_SYNCOBJ_H__
> > >>
> > >>   #include "linux/dma-fence.h"
> > >> +#include "linux/dma-fence-chain.h"
> > >>
> > >>   /**
> > >>    * struct drm_syncobj - sync object.
> > >> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj
> > >> *syncobj)
> > >>
> > >>   struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
> > >>                                   u32 handle);
> > >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> > >> +                       struct dma_fence_chain *chain,
> > >> +                       struct dma_fence *fence,
> > >> +                       uint64_t point);
> > >>   void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
> > >>                             struct dma_fence *fence);
> > >>   int drm_syncobj_find_fence(struct drm_file *file_private,
> > >> --
> > >> 2.17.1
> > >>
> > >> _______________________________________________
> > >> Intel-gfx mailing list
> > >> Intel-gfx@lists.freedesktop.org
> > >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> >
> 
> 
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
       [not found]                 ` <BY1PR12MB0502201CA782F5F2DBB661B8B4A70-PicGAnIBOobrCwm+z9iKNgdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-12-12 12:00                   ` Koenig, Christian
  2018-12-12 12:20                     ` Daniel Vetter
  0 siblings, 1 reply; 18+ messages in thread
From: Koenig, Christian @ 2018-12-12 12:00 UTC (permalink / raw)
  To: Zhou, David(ChunMing), Daniel Vetter, Rakos, Daniel, Jason Ekstrand
  Cc: Christian König, intel-gfx, amd-gfx list, dri-devel

> Key point is that our Vulcan guys came back and said that this
> wouldn't be sufficient, but I honestly don't fully understand why.
> Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
> so we can talk about the exact corner cases we care about and why.
Yes, that's why I made it mandatory that David provides an igt test case 
along the ones in libdrm.

> I guess one thing that might happen is that userspace leaves out a number
> and never sets that fence, relying on the >= semantics of the monitored
> fence to unblock that thread. E.g. when skipping a frame in one of the
> auxiliary workloads. For that case we'd need to make sure we don't just wait
> for the given fence to materialize, but also any fences later in the timeline.
Correct and that's also how we have implemented it.

> But we can't decide that without understanding the actual use-case that
> needs to be supported at the other end of the stack, and how all the bits in
> between should look like.
>
> I guess we're back to "uapi design without userspace doesn't make sense" ...
Yeah, well chicken and egg problem. Amdvlk probably won't make the code 
to support this public until the kernel has accepted it and the kernel 
doesn't accept it until the amdvlk patches are public.

David can you take care of this and release the userspace patches as well?

Additional to that except for a bit polishing the UAPI stayed the same 
from the very beginning while being reviewed multiple times now. So that 
seems to be rather sane.

> That seems against the spirit of vulkan, which is very much about "you get all
> the pieces". It also might dig us a hole in the future, if we ever get around to
> moving towards a WDDM2 style memory management model. For future
> proving I think it would make sense if we implement the minimal uapi we
> need for vk timelines, not the strictest guarantees we can get away with
> (without performance impact) with current drivers.
Well I'm repeating myself, but while this seems to be a good idea for an 
userspace API it is not necessary for a kernel API.

In other words userspace can do all the mess it wants in as long as it 
stays inside the same process, but when it starts to mess with 
inter-process communication (e.g. X or Wayland) the stuff should be 
water prove and not allow for mess to leak between processes.

And what we can always do is to make the restriction more lose, but 
tightening it when userspace already depends on a behavior is not 
possible any more.

Regards,
Christian.

Am 12.12.18 um 12:39 schrieb Zhou, David(ChunMing):
> + Daniel Rakos and Jason Ekstrand.
>
>   Below is the background, which is from Daniel R should  be able to explain that's why:
> " ISVs, especially those coming from D3D12, are unsatisfied with the behavior of the Vulkan semaphores as they are unhappy with the fact that for every single dependency they need to use separate semaphores due to their binary nature.
> Compared to that a synchronization primitive like D3D12 monitored fences enable one of those to be used to track a sequence of operations by simply associating timeline values to the completion of individual operations. This allows them to track the lifetime and usage of resources and the ordered completion of sequences.
> Besides that, they also want to use a single synchronization primitive to be able to handle GPU-to-GPU and GPU-to-CPU dependencies, compared to using semaphores for the former and fences for the latter.
> In addition, compared to legacy semaphores, timeline semaphores are proposed to support wait-before-signal, i.e. allow enqueueing a semaphore wait operation with a wait value that is larger than any of the already enqueued signal values. This seems to be a hard requirement for ISVs. Without UMD-side queue batching, and even UMD-side queue batching doesn’t help the situation when such a semaphore is externally shared with another API. Thus in order to properly support wait-before-signal the KMD implementation has to also be able to support such dependencies.
> "
>
> Btw, we already add test case to igt, and tested by many existing test, like libdrm unit test, igt related test, vulkan cts, and steam games.
>
> -David
>> -----Original Message-----
>> From: Daniel Vetter <daniel@ffwll.ch>
>> Sent: Wednesday, December 12, 2018 7:15 PM
>> To: Koenig, Christian <Christian.Koenig@amd.com>
>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; dri-devel <dri-
>> devel@lists.freedesktop.org>; amd-gfx list <amd-gfx@lists.freedesktop.org>;
>> intel-gfx <intel-gfx@lists.freedesktop.org>; Christian König
>> <ckoenig.leichtzumerken@gmail.com>
>> Subject: Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new
>> drm_syncobj_add_point interface v2
>>
>> On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
>> <Christian.Koenig@amd.com> wrote:
>>> Am 12.12.18 um 11:49 schrieb Daniel Vetter:
>>>> On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
>>>>> From: Christian König <ckoenig.leichtzumerken@gmail.com>
>>>>>
>>>>> Use the dma_fence_chain object to create a timeline of fence
>>>>> objects instead of just replacing the existing fence.
>>>>>
>>>>> v2: rebase and cleanup
>>>>>
>>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> Somewhat jumping back into this. Not sure we discussed this already
>>>> or not. I'm a bit unclear on why we have to chain the fences in the
>> timeline:
>>>> - The timeline stuff is modelled after the WDDM2 monitored fences.
>> Which
>>>>     really are just u64 counters in memory somewhere (I think could be
>>>>     system ram or vram). Because WDDM2 has the memory management
>> entirely
>>>>     separated from rendering synchronization it totally allows userspace to
>>>>     create loops and deadlocks and everything else nasty using this - the
>>>>     memory manager won't deadlock because these monitored fences
>> never leak
>>>>     into the buffer manager. And if CS deadlock, gpu reset takes care of the
>>>>     mess.
>>>>
>>>> - This has a few consequences, as in they seem to indeed work like a
>>>>     memory location: Userspace incrementing out-of-order (because they
>> run
>>>>     batches updating the same fence on different engines) is totally fine,
>>>>     as is doing anything else "stupid".
>>>>
>>>> - Now on linux we can't allow anything, because we need to make sure
>> that
>>>>     deadlocks don't leak into the memory manager. But as long as we block
>>>>     until the underlying dma_fence has materialized, nothing userspace can
>>>>     do will lead to such a deadlock. Even if userspace ends up submitting
>>>>     jobs without enough built-in synchronization, leading to out-of-order
>>>>     signalling of fences on that "timeline". And I don't think that would
>>>>     pose a problem for us.
>>>>
>>>> Essentially I think we can look at timeline syncobj as a dma_fence
>>>> container indexed through an integer, and there's no need to enforce
>>>> that the timline works like a real dma_fence timeline, with all it's
>>>> guarantees. It's just a pile of (possibly, if userspace is stupid)
>>>> unrelated dma_fences. You could implement the entire thing in
>>>> userspace after all, except for the "we want to share these timeline
>>>> objects between processes" problem.
>>>>
>>>> tldr; I think we can drop the dma_fence_chain complexity completely.
>>>> Or at least I'm not really understanding why it's needed.
>>>>
>>>> Of course that means drivers cannot treat a drm_syncobj timeline as
>>>> a dma_fence timeline. But given the future fences stuff and all
>>>> that, that's already out of the window anyway.
>>>>
>>>> What am I missing?
>>> Good question, since that was exactly my initial idea as well.
>>>
>>> Key point is that our Vulcan guys came back and said that this
>>> wouldn't be sufficient, but I honestly don't fully understand why.
>> Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
>> so we can talk about the exact corner cases we care about and why.
>>
>> I guess one thing that might happen is that userspace leaves out a number
>> and never sets that fence, relying on the >= semantics of the monitored
>> fence to unblock that thread. E.g. when skipping a frame in one of the
>> auxiliary workloads. For that case we'd need to make sure we don't just wait
>> for the given fence to materialize, but also any fences later in the timeline.
>>
>> But we can't decide that without understanding the actual use-case that
>> needs to be supported at the other end of the stack, and how all the bits in
>> between should look like.
>>
>> I guess we're back to "uapi design without userspace doesn't make sense" ...
>>
>>> Anyway that's why David came up with using the fence array to wait for
>>> all previously added fences, which I then later on extended into this
>>> chain container.
>>>
>>> I have to admit that it is way more defensive implemented this way. E.g.
>>> there is much fewer things userspace can do wrong.
>>>
>>> The principal idea is that when they mess things up they are always
>>> going to wait more than necessary, but never less.
>> That seems against the spirit of vulkan, which is very much about "you get all
>> the pieces". It also might dig us a hole in the future, if we ever get around to
>> moving towards a WDDM2 style memory management model. For future
>> proving I think it would make sense if we implement the minimal uapi we
>> need for vk timelines, not the strictest guarantees we can get away with
>> (without performance impact) with current drivers.
>> -Daniel
>>
>>
>>> Christian.
>>>
>>>> -Daniel
>>>>
>>>>> ---
>>>>>    drivers/gpu/drm/drm_syncobj.c | 37
>> +++++++++++++++++++++++++++++++++++
>>>>>    include/drm/drm_syncobj.h     |  5 +++++
>>>>>    2 files changed, 42 insertions(+)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
>>>>> b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..51f798e2194f
>>>>> 100644
>>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>>> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct
>> drm_syncobj *syncobj,
>>>>>       spin_unlock(&syncobj->lock);
>>>>>    }
>>>>>
>>>>> +/**
>>>>> + * drm_syncobj_add_point - add new timeline point to the syncobj
>>>>> + * @syncobj: sync object to add timeline point do
>>>>> + * @chain: chain node to use to add the point
>>>>> + * @fence: fence to encapsulate in the chain node
>>>>> + * @point: sequence number to use for the point
>>>>> + *
>>>>> + * Add the chain node as new timeline point to the syncobj.
>>>>> + */
>>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>>>>> +                       struct dma_fence_chain *chain,
>>>>> +                       struct dma_fence *fence,
>>>>> +                       uint64_t point) {
>>>>> +    struct syncobj_wait_entry *cur, *tmp;
>>>>> +    struct dma_fence *prev;
>>>>> +
>>>>> +    dma_fence_get(fence);
>>>>> +
>>>>> +    spin_lock(&syncobj->lock);
>>>>> +
>>>>> +    prev = rcu_dereference_protected(syncobj->fence,
>>>>> +                                     lockdep_is_held(&syncobj->lock));
>>>>> +    dma_fence_chain_init(chain, prev, fence, point);
>>>>> +    rcu_assign_pointer(syncobj->fence, &chain->base);
>>>>> +
>>>>> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
>>>>> +            list_del_init(&cur->node);
>>>>> +            syncobj_wait_syncobj_func(syncobj, cur);
>>>>> +    }
>>>>> +    spin_unlock(&syncobj->lock);
>>>>> +
>>>>> +    /* Walk the chain once to trigger garbage collection */
>>>>> +    dma_fence_chain_for_each(prev, fence); }
>>>>> +EXPORT_SYMBOL(drm_syncobj_add_point);
>>>>> +
>>>>>    /**
>>>>>     * drm_syncobj_replace_fence - replace fence in a sync object.
>>>>>     * @syncobj: Sync object to replace fence in diff --git
>>>>> a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index
>>>>> 7c6ed845c70d..8acb4ae4f311 100644
>>>>> --- a/include/drm/drm_syncobj.h
>>>>> +++ b/include/drm/drm_syncobj.h
>>>>> @@ -27,6 +27,7 @@
>>>>>    #define __DRM_SYNCOBJ_H__
>>>>>
>>>>>    #include "linux/dma-fence.h"
>>>>> +#include "linux/dma-fence-chain.h"
>>>>>
>>>>>    /**
>>>>>     * struct drm_syncobj - sync object.
>>>>> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj
>>>>> *syncobj)
>>>>>
>>>>>    struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
>>>>>                                    u32 handle);
>>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>>>>> +                       struct dma_fence_chain *chain,
>>>>> +                       struct dma_fence *fence,
>>>>> +                       uint64_t point);
>>>>>    void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
>>>>>                              struct dma_fence *fence);
>>>>>    int drm_syncobj_find_fence(struct drm_file *file_private,
>>>>> --
>>>>> 2.17.1
>>>>>
>>>>> _______________________________________________
>>>>> Intel-gfx mailing list
>>>>> Intel-gfx@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
>>
>> --
>> Daniel Vetter
>> Software Engineer, Intel Corporation
>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
  2018-12-12 12:00                   ` Koenig, Christian
@ 2018-12-12 12:20                     ` Daniel Vetter
  0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2018-12-12 12:20 UTC (permalink / raw)
  To: Christian König
  Cc: Chunming Zhou, Christian König, intel-gfx, amd-gfx list,
	Rakos, Daniel, dri-devel

On Wed, Dec 12, 2018 at 1:00 PM Koenig, Christian
<Christian.Koenig@amd.com> wrote:
>
> > Key point is that our Vulcan guys came back and said that this
> > wouldn't be sufficient, but I honestly don't fully understand why.
> > Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
> > so we can talk about the exact corner cases we care about and why.
> Yes, that's why I made it mandatory that David provides an igt test case
> along the ones in libdrm.
>
> > I guess one thing that might happen is that userspace leaves out a number
> > and never sets that fence, relying on the >= semantics of the monitored
> > fence to unblock that thread. E.g. when skipping a frame in one of the
> > auxiliary workloads. For that case we'd need to make sure we don't just wait
> > for the given fence to materialize, but also any fences later in the timeline.
> Correct and that's also how we have implemented it.
>
> > But we can't decide that without understanding the actual use-case that
> > needs to be supported at the other end of the stack, and how all the bits in
> > between should look like.
> >
> > I guess we're back to "uapi design without userspace doesn't make sense" ...
> Yeah, well chicken and egg problem. Amdvlk probably won't make the code
> to support this public until the kernel has accepted it and the kernel
> doesn't accept it until the amdvlk patches are public.

That's not how we do uapi development.

> David can you take care of this and release the userspace patches as well?

Please also drag the people typing the code on the mailing list, not
just the code. Code alone doesn't make for a useful discussion :-)

Also, someone needs to drag the radv/anv side into this, because I
don't also want to mix up the technical issues here with the entire
"is amdvlk good enough for uapi development" question ... (as is it
seems to fall short on technicalities of simply not doing development
openly, but I hope that's fixable).

> Additional to that except for a bit polishing the UAPI stayed the same
> from the very beginning while being reviewed multiple times now. So that
> seems to be rather sane.
>
> > That seems against the spirit of vulkan, which is very much about "you get all
> > the pieces". It also might dig us a hole in the future, if we ever get around to
> > moving towards a WDDM2 style memory management model. For future
> > proving I think it would make sense if we implement the minimal uapi we
> > need for vk timelines, not the strictest guarantees we can get away with
> > (without performance impact) with current drivers.
> Well I'm repeating myself, but while this seems to be a good idea for an
> userspace API it is not necessary for a kernel API.
>
> In other words userspace can do all the mess it wants in as long as it
> stays inside the same process, but when it starts to mess with
> inter-process communication (e.g. X or Wayland) the stuff should be
> water prove and not allow for mess to leak between processes.
>
> And what we can always do is to make the restriction more lose, but
> tightening it when userspace already depends on a behavior is not
> possible any more.

The point of vk timelines seems to be that userspace wants the mess,
even across processes. E.g. for VR compositors, which run in some
other address space. If you don't want to leak the mess, don't use vk
timelines, use normal fences. Which is what all the X/wayland
protocols seem to be doing. So the mess is strictly opt-in for
userspace, but seems entirely desired.
-Daniel

>
> Regards,
> Christian.
>
> Am 12.12.18 um 12:39 schrieb Zhou, David(ChunMing):
> > + Daniel Rakos and Jason Ekstrand.
> >
> >   Below is the background, which is from Daniel R should  be able to explain that's why:
> > " ISVs, especially those coming from D3D12, are unsatisfied with the behavior of the Vulkan semaphores as they are unhappy with the fact that for every single dependency they need to use separate semaphores due to their binary nature.
> > Compared to that a synchronization primitive like D3D12 monitored fences enable one of those to be used to track a sequence of operations by simply associating timeline values to the completion of individual operations. This allows them to track the lifetime and usage of resources and the ordered completion of sequences.
> > Besides that, they also want to use a single synchronization primitive to be able to handle GPU-to-GPU and GPU-to-CPU dependencies, compared to using semaphores for the former and fences for the latter.
> > In addition, compared to legacy semaphores, timeline semaphores are proposed to support wait-before-signal, i.e. allow enqueueing a semaphore wait operation with a wait value that is larger than any of the already enqueued signal values. This seems to be a hard requirement for ISVs. Without UMD-side queue batching, and even UMD-side queue batching doesn’t help the situation when such a semaphore is externally shared with another API. Thus in order to properly support wait-before-signal the KMD implementation has to also be able to support such dependencies.
> > "
> >
> > Btw, we already add test case to igt, and tested by many existing test, like libdrm unit test, igt related test, vulkan cts, and steam games.
> >
> > -David
> >> -----Original Message-----
> >> From: Daniel Vetter <daniel@ffwll.ch>
> >> Sent: Wednesday, December 12, 2018 7:15 PM
> >> To: Koenig, Christian <Christian.Koenig@amd.com>
> >> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; dri-devel <dri-
> >> devel@lists.freedesktop.org>; amd-gfx list <amd-gfx@lists.freedesktop.org>;
> >> intel-gfx <intel-gfx@lists.freedesktop.org>; Christian König
> >> <ckoenig.leichtzumerken@gmail.com>
> >> Subject: Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new
> >> drm_syncobj_add_point interface v2
> >>
> >> On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
> >> <Christian.Koenig@amd.com> wrote:
> >>> Am 12.12.18 um 11:49 schrieb Daniel Vetter:
> >>>> On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
> >>>>> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> >>>>>
> >>>>> Use the dma_fence_chain object to create a timeline of fence
> >>>>> objects instead of just replacing the existing fence.
> >>>>>
> >>>>> v2: rebase and cleanup
> >>>>>
> >>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
> >>>> Somewhat jumping back into this. Not sure we discussed this already
> >>>> or not. I'm a bit unclear on why we have to chain the fences in the
> >> timeline:
> >>>> - The timeline stuff is modelled after the WDDM2 monitored fences.
> >> Which
> >>>>     really are just u64 counters in memory somewhere (I think could be
> >>>>     system ram or vram). Because WDDM2 has the memory management
> >> entirely
> >>>>     separated from rendering synchronization it totally allows userspace to
> >>>>     create loops and deadlocks and everything else nasty using this - the
> >>>>     memory manager won't deadlock because these monitored fences
> >> never leak
> >>>>     into the buffer manager. And if CS deadlock, gpu reset takes care of the
> >>>>     mess.
> >>>>
> >>>> - This has a few consequences, as in they seem to indeed work like a
> >>>>     memory location: Userspace incrementing out-of-order (because they
> >> run
> >>>>     batches updating the same fence on different engines) is totally fine,
> >>>>     as is doing anything else "stupid".
> >>>>
> >>>> - Now on linux we can't allow anything, because we need to make sure
> >> that
> >>>>     deadlocks don't leak into the memory manager. But as long as we block
> >>>>     until the underlying dma_fence has materialized, nothing userspace can
> >>>>     do will lead to such a deadlock. Even if userspace ends up submitting
> >>>>     jobs without enough built-in synchronization, leading to out-of-order
> >>>>     signalling of fences on that "timeline". And I don't think that would
> >>>>     pose a problem for us.
> >>>>
> >>>> Essentially I think we can look at timeline syncobj as a dma_fence
> >>>> container indexed through an integer, and there's no need to enforce
> >>>> that the timline works like a real dma_fence timeline, with all it's
> >>>> guarantees. It's just a pile of (possibly, if userspace is stupid)
> >>>> unrelated dma_fences. You could implement the entire thing in
> >>>> userspace after all, except for the "we want to share these timeline
> >>>> objects between processes" problem.
> >>>>
> >>>> tldr; I think we can drop the dma_fence_chain complexity completely.
> >>>> Or at least I'm not really understanding why it's needed.
> >>>>
> >>>> Of course that means drivers cannot treat a drm_syncobj timeline as
> >>>> a dma_fence timeline. But given the future fences stuff and all
> >>>> that, that's already out of the window anyway.
> >>>>
> >>>> What am I missing?
> >>> Good question, since that was exactly my initial idea as well.
> >>>
> >>> Key point is that our Vulcan guys came back and said that this
> >>> wouldn't be sufficient, but I honestly don't fully understand why.
> >> Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
> >> so we can talk about the exact corner cases we care about and why.
> >>
> >> I guess one thing that might happen is that userspace leaves out a number
> >> and never sets that fence, relying on the >= semantics of the monitored
> >> fence to unblock that thread. E.g. when skipping a frame in one of the
> >> auxiliary workloads. For that case we'd need to make sure we don't just wait
> >> for the given fence to materialize, but also any fences later in the timeline.
> >>
> >> But we can't decide that without understanding the actual use-case that
> >> needs to be supported at the other end of the stack, and how all the bits in
> >> between should look like.
> >>
> >> I guess we're back to "uapi design without userspace doesn't make sense" ...
> >>
> >>> Anyway that's why David came up with using the fence array to wait for
> >>> all previously added fences, which I then later on extended into this
> >>> chain container.
> >>>
> >>> I have to admit that it is way more defensive implemented this way. E.g.
> >>> there is much fewer things userspace can do wrong.
> >>>
> >>> The principal idea is that when they mess things up they are always
> >>> going to wait more than necessary, but never less.
> >> That seems against the spirit of vulkan, which is very much about "you get all
> >> the pieces". It also might dig us a hole in the future, if we ever get around to
> >> moving towards a WDDM2 style memory management model. For future
> >> proving I think it would make sense if we implement the minimal uapi we
> >> need for vk timelines, not the strictest guarantees we can get away with
> >> (without performance impact) with current drivers.
> >> -Daniel
> >>
> >>
> >>> Christian.
> >>>
> >>>> -Daniel
> >>>>
> >>>>> ---
> >>>>>    drivers/gpu/drm/drm_syncobj.c | 37
> >> +++++++++++++++++++++++++++++++++++
> >>>>>    include/drm/drm_syncobj.h     |  5 +++++
> >>>>>    2 files changed, 42 insertions(+)
> >>>>>
> >>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
> >>>>> b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..51f798e2194f
> >>>>> 100644
> >>>>> --- a/drivers/gpu/drm/drm_syncobj.c
> >>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
> >>>>> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct
> >> drm_syncobj *syncobj,
> >>>>>       spin_unlock(&syncobj->lock);
> >>>>>    }
> >>>>>
> >>>>> +/**
> >>>>> + * drm_syncobj_add_point - add new timeline point to the syncobj
> >>>>> + * @syncobj: sync object to add timeline point do
> >>>>> + * @chain: chain node to use to add the point
> >>>>> + * @fence: fence to encapsulate in the chain node
> >>>>> + * @point: sequence number to use for the point
> >>>>> + *
> >>>>> + * Add the chain node as new timeline point to the syncobj.
> >>>>> + */
> >>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> >>>>> +                       struct dma_fence_chain *chain,
> >>>>> +                       struct dma_fence *fence,
> >>>>> +                       uint64_t point) {
> >>>>> +    struct syncobj_wait_entry *cur, *tmp;
> >>>>> +    struct dma_fence *prev;
> >>>>> +
> >>>>> +    dma_fence_get(fence);
> >>>>> +
> >>>>> +    spin_lock(&syncobj->lock);
> >>>>> +
> >>>>> +    prev = rcu_dereference_protected(syncobj->fence,
> >>>>> +                                     lockdep_is_held(&syncobj->lock));
> >>>>> +    dma_fence_chain_init(chain, prev, fence, point);
> >>>>> +    rcu_assign_pointer(syncobj->fence, &chain->base);
> >>>>> +
> >>>>> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
> >>>>> +            list_del_init(&cur->node);
> >>>>> +            syncobj_wait_syncobj_func(syncobj, cur);
> >>>>> +    }
> >>>>> +    spin_unlock(&syncobj->lock);
> >>>>> +
> >>>>> +    /* Walk the chain once to trigger garbage collection */
> >>>>> +    dma_fence_chain_for_each(prev, fence); }
> >>>>> +EXPORT_SYMBOL(drm_syncobj_add_point);
> >>>>> +
> >>>>>    /**
> >>>>>     * drm_syncobj_replace_fence - replace fence in a sync object.
> >>>>>     * @syncobj: Sync object to replace fence in diff --git
> >>>>> a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index
> >>>>> 7c6ed845c70d..8acb4ae4f311 100644
> >>>>> --- a/include/drm/drm_syncobj.h
> >>>>> +++ b/include/drm/drm_syncobj.h
> >>>>> @@ -27,6 +27,7 @@
> >>>>>    #define __DRM_SYNCOBJ_H__
> >>>>>
> >>>>>    #include "linux/dma-fence.h"
> >>>>> +#include "linux/dma-fence-chain.h"
> >>>>>
> >>>>>    /**
> >>>>>     * struct drm_syncobj - sync object.
> >>>>> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj
> >>>>> *syncobj)
> >>>>>
> >>>>>    struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
> >>>>>                                    u32 handle);
> >>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> >>>>> +                       struct dma_fence_chain *chain,
> >>>>> +                       struct dma_fence *fence,
> >>>>> +                       uint64_t point);
> >>>>>    void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
> >>>>>                              struct dma_fence *fence);
> >>>>>    int drm_syncobj_find_fence(struct drm_file *file_private,
> >>>>> --
> >>>>> 2.17.1
> >>>>>
> >>>>> _______________________________________________
> >>>>> Intel-gfx mailing list
> >>>>> Intel-gfx@lists.freedesktop.org
> >>>>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> >>
> >> --
> >> Daniel Vetter
> >> Software Engineer, Intel Corporation
> >> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>


-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
  2018-12-12 11:39               ` Zhou, David(ChunMing)
       [not found]                 ` <BY1PR12MB0502201CA782F5F2DBB661B8B4A70-PicGAnIBOobrCwm+z9iKNgdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-12-12 12:24                 ` Daniel Vetter
  2018-12-12 13:06                   ` [Intel-gfx] " Chunming Zhou
  1 sibling, 1 reply; 18+ messages in thread
From: Daniel Vetter @ 2018-12-12 12:24 UTC (permalink / raw)
  To: Chunming Zhou
  Cc: Christian König, intel-gfx, dri-devel, Rakos, Daniel,
	amd-gfx list, Christian König

On Wed, Dec 12, 2018 at 12:40 PM Zhou, David(ChunMing)
<David1.Zhou@amd.com> wrote:
>
> + Daniel Rakos and Jason Ekstrand.
>
>  Below is the background, which is from Daniel R should  be able to explain that's why:
> " ISVs, especially those coming from D3D12, are unsatisfied with the behavior of the Vulkan semaphores as they are unhappy with the fact that for every single dependency they need to use separate semaphores due to their binary nature.
> Compared to that a synchronization primitive like D3D12 monitored fences enable one of those to be used to track a sequence of operations by simply associating timeline values to the completion of individual operations. This allows them to track the lifetime and usage of resources and the ordered completion of sequences.
> Besides that, they also want to use a single synchronization primitive to be able to handle GPU-to-GPU and GPU-to-CPU dependencies, compared to using semaphores for the former and fences for the latter.
> In addition, compared to legacy semaphores, timeline semaphores are proposed to support wait-before-signal, i.e. allow enqueueing a semaphore wait operation with a wait value that is larger than any of the already enqueued signal values. This seems to be a hard requirement for ISVs. Without UMD-side queue batching, and even UMD-side queue batching doesn’t help the situation when such a semaphore is externally shared with another API. Thus in order to properly support wait-before-signal the KMD implementation has to also be able to support such dependencies.
> "

I was tangetially involved in that wg too, I understand the overall
use-case of vk timelines. I don't understand the exact corner case
here, because I wasn't deeply involved in the details.
-Daniel

> Btw, we already add test case to igt, and tested by many existing test, like libdrm unit test, igt related test, vulkan cts, and steam games.
>
> -David
> > -----Original Message-----
> > From: Daniel Vetter <daniel@ffwll.ch>
> > Sent: Wednesday, December 12, 2018 7:15 PM
> > To: Koenig, Christian <Christian.Koenig@amd.com>
> > Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; dri-devel <dri-
> > devel@lists.freedesktop.org>; amd-gfx list <amd-gfx@lists.freedesktop.org>;
> > intel-gfx <intel-gfx@lists.freedesktop.org>; Christian König
> > <ckoenig.leichtzumerken@gmail.com>
> > Subject: Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new
> > drm_syncobj_add_point interface v2
> >
> > On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
> > <Christian.Koenig@amd.com> wrote:
> > >
> > > Am 12.12.18 um 11:49 schrieb Daniel Vetter:
> > > > On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
> > > >> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> > > >>
> > > >> Use the dma_fence_chain object to create a timeline of fence
> > > >> objects instead of just replacing the existing fence.
> > > >>
> > > >> v2: rebase and cleanup
> > > >>
> > > >> Signed-off-by: Christian König <christian.koenig@amd.com>
> > > > Somewhat jumping back into this. Not sure we discussed this already
> > > > or not. I'm a bit unclear on why we have to chain the fences in the
> > timeline:
> > > >
> > > > - The timeline stuff is modelled after the WDDM2 monitored fences.
> > Which
> > > >    really are just u64 counters in memory somewhere (I think could be
> > > >    system ram or vram). Because WDDM2 has the memory management
> > entirely
> > > >    separated from rendering synchronization it totally allows userspace to
> > > >    create loops and deadlocks and everything else nasty using this - the
> > > >    memory manager won't deadlock because these monitored fences
> > never leak
> > > >    into the buffer manager. And if CS deadlock, gpu reset takes care of the
> > > >    mess.
> > > >
> > > > - This has a few consequences, as in they seem to indeed work like a
> > > >    memory location: Userspace incrementing out-of-order (because they
> > run
> > > >    batches updating the same fence on different engines) is totally fine,
> > > >    as is doing anything else "stupid".
> > > >
> > > > - Now on linux we can't allow anything, because we need to make sure
> > that
> > > >    deadlocks don't leak into the memory manager. But as long as we block
> > > >    until the underlying dma_fence has materialized, nothing userspace can
> > > >    do will lead to such a deadlock. Even if userspace ends up submitting
> > > >    jobs without enough built-in synchronization, leading to out-of-order
> > > >    signalling of fences on that "timeline". And I don't think that would
> > > >    pose a problem for us.
> > > >
> > > > Essentially I think we can look at timeline syncobj as a dma_fence
> > > > container indexed through an integer, and there's no need to enforce
> > > > that the timline works like a real dma_fence timeline, with all it's
> > > > guarantees. It's just a pile of (possibly, if userspace is stupid)
> > > > unrelated dma_fences. You could implement the entire thing in
> > > > userspace after all, except for the "we want to share these timeline
> > > > objects between processes" problem.
> > > >
> > > > tldr; I think we can drop the dma_fence_chain complexity completely.
> > > > Or at least I'm not really understanding why it's needed.
> > > >
> > > > Of course that means drivers cannot treat a drm_syncobj timeline as
> > > > a dma_fence timeline. But given the future fences stuff and all
> > > > that, that's already out of the window anyway.
> > > >
> > > > What am I missing?
> > >
> > > Good question, since that was exactly my initial idea as well.
> > >
> > > Key point is that our Vulcan guys came back and said that this
> > > wouldn't be sufficient, but I honestly don't fully understand why.
> >
> > Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
> > so we can talk about the exact corner cases we care about and why.
> >
> > I guess one thing that might happen is that userspace leaves out a number
> > and never sets that fence, relying on the >= semantics of the monitored
> > fence to unblock that thread. E.g. when skipping a frame in one of the
> > auxiliary workloads. For that case we'd need to make sure we don't just wait
> > for the given fence to materialize, but also any fences later in the timeline.
> >
> > But we can't decide that without understanding the actual use-case that
> > needs to be supported at the other end of the stack, and how all the bits in
> > between should look like.
> >
> > I guess we're back to "uapi design without userspace doesn't make sense" ...
> >
> > > Anyway that's why David came up with using the fence array to wait for
> > > all previously added fences, which I then later on extended into this
> > > chain container.
> > >
> > > I have to admit that it is way more defensive implemented this way. E.g.
> > > there is much fewer things userspace can do wrong.
> > >
> > > The principal idea is that when they mess things up they are always
> > > going to wait more than necessary, but never less.
> >
> > That seems against the spirit of vulkan, which is very much about "you get all
> > the pieces". It also might dig us a hole in the future, if we ever get around to
> > moving towards a WDDM2 style memory management model. For future
> > proving I think it would make sense if we implement the minimal uapi we
> > need for vk timelines, not the strictest guarantees we can get away with
> > (without performance impact) with current drivers.
> > -Daniel
> >
> >
> > > Christian.
> > >
> > > > -Daniel
> > > >
> > > >> ---
> > > >>   drivers/gpu/drm/drm_syncobj.c | 37
> > +++++++++++++++++++++++++++++++++++
> > > >>   include/drm/drm_syncobj.h     |  5 +++++
> > > >>   2 files changed, 42 insertions(+)
> > > >>
> > > >> diff --git a/drivers/gpu/drm/drm_syncobj.c
> > > >> b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..51f798e2194f
> > > >> 100644
> > > >> --- a/drivers/gpu/drm/drm_syncobj.c
> > > >> +++ b/drivers/gpu/drm/drm_syncobj.c
> > > >> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct
> > drm_syncobj *syncobj,
> > > >>      spin_unlock(&syncobj->lock);
> > > >>   }
> > > >>
> > > >> +/**
> > > >> + * drm_syncobj_add_point - add new timeline point to the syncobj
> > > >> + * @syncobj: sync object to add timeline point do
> > > >> + * @chain: chain node to use to add the point
> > > >> + * @fence: fence to encapsulate in the chain node
> > > >> + * @point: sequence number to use for the point
> > > >> + *
> > > >> + * Add the chain node as new timeline point to the syncobj.
> > > >> + */
> > > >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> > > >> +                       struct dma_fence_chain *chain,
> > > >> +                       struct dma_fence *fence,
> > > >> +                       uint64_t point) {
> > > >> +    struct syncobj_wait_entry *cur, *tmp;
> > > >> +    struct dma_fence *prev;
> > > >> +
> > > >> +    dma_fence_get(fence);
> > > >> +
> > > >> +    spin_lock(&syncobj->lock);
> > > >> +
> > > >> +    prev = rcu_dereference_protected(syncobj->fence,
> > > >> +                                     lockdep_is_held(&syncobj->lock));
> > > >> +    dma_fence_chain_init(chain, prev, fence, point);
> > > >> +    rcu_assign_pointer(syncobj->fence, &chain->base);
> > > >> +
> > > >> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
> > > >> +            list_del_init(&cur->node);
> > > >> +            syncobj_wait_syncobj_func(syncobj, cur);
> > > >> +    }
> > > >> +    spin_unlock(&syncobj->lock);
> > > >> +
> > > >> +    /* Walk the chain once to trigger garbage collection */
> > > >> +    dma_fence_chain_for_each(prev, fence); }
> > > >> +EXPORT_SYMBOL(drm_syncobj_add_point);
> > > >> +
> > > >>   /**
> > > >>    * drm_syncobj_replace_fence - replace fence in a sync object.
> > > >>    * @syncobj: Sync object to replace fence in diff --git
> > > >> a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index
> > > >> 7c6ed845c70d..8acb4ae4f311 100644
> > > >> --- a/include/drm/drm_syncobj.h
> > > >> +++ b/include/drm/drm_syncobj.h
> > > >> @@ -27,6 +27,7 @@
> > > >>   #define __DRM_SYNCOBJ_H__
> > > >>
> > > >>   #include "linux/dma-fence.h"
> > > >> +#include "linux/dma-fence-chain.h"
> > > >>
> > > >>   /**
> > > >>    * struct drm_syncobj - sync object.
> > > >> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj
> > > >> *syncobj)
> > > >>
> > > >>   struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
> > > >>                                   u32 handle);
> > > >> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
> > > >> +                       struct dma_fence_chain *chain,
> > > >> +                       struct dma_fence *fence,
> > > >> +                       uint64_t point);
> > > >>   void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
> > > >>                             struct dma_fence *fence);
> > > >>   int drm_syncobj_find_fence(struct drm_file *file_private,
> > > >> --
> > > >> 2.17.1
> > > >>
> > > >> _______________________________________________
> > > >> Intel-gfx mailing list
> > > >> Intel-gfx@lists.freedesktop.org
> > > >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> > >
> >
> >
> > --
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > +41 (0) 79 365 57 48 - http://blog.ffwll.ch



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2
  2018-12-12 12:24                 ` Daniel Vetter
@ 2018-12-12 13:06                   ` Chunming Zhou
  0 siblings, 0 replies; 18+ messages in thread
From: Chunming Zhou @ 2018-12-12 13:06 UTC (permalink / raw)
  To: Daniel Vetter, Zhou, David(ChunMing)
  Cc: Christian König, intel-gfx, dri-devel, Rakos, Daniel,
	amd-gfx list, Jason Ekstrand, Koenig, Christian


在 2018/12/12 20:24, Daniel Vetter 写道:
> On Wed, Dec 12, 2018 at 12:40 PM Zhou, David(ChunMing)
> <David1.Zhou@amd.com> wrote:
>> + Daniel Rakos and Jason Ekstrand.
>>
>>   Below is the background, which is from Daniel R should  be able to explain that's why:
>> " ISVs, especially those coming from D3D12, are unsatisfied with the behavior of the Vulkan semaphores as they are unhappy with the fact that for every single dependency they need to use separate semaphores due to their binary nature.
>> Compared to that a synchronization primitive like D3D12 monitored fences enable one of those to be used to track a sequence of operations by simply associating timeline values to the completion of individual operations. This allows them to track the lifetime and usage of resources and the ordered completion of sequences.
>> Besides that, they also want to use a single synchronization primitive to be able to handle GPU-to-GPU and GPU-to-CPU dependencies, compared to using semaphores for the former and fences for the latter.
>> In addition, compared to legacy semaphores, timeline semaphores are proposed to support wait-before-signal, i.e. allow enqueueing a semaphore wait operation with a wait value that is larger than any of the already enqueued signal values. This seems to be a hard requirement for ISVs. Without UMD-side queue batching, and even UMD-side queue batching doesn’t help the situation when such a semaphore is externally shared with another API. Thus in order to properly support wait-before-signal the KMD implementation has to also be able to support such dependencies.
>> "
> I was tangetially involved in that wg too, I understand the overall
> use-case of vk timelines. I don't understand the exact corner case
> here, because I wasn't deeply involved in the details.


all details are here: 
https://gitlab.khronos.org/vulkan/vulkan/merge_requests/2696

-David

> -Daniel
>
>> Btw, we already add test case to igt, and tested by many existing test, like libdrm unit test, igt related test, vulkan cts, and steam games.
>>
>> -David
>>> -----Original Message-----
>>> From: Daniel Vetter <daniel@ffwll.ch>
>>> Sent: Wednesday, December 12, 2018 7:15 PM
>>> To: Koenig, Christian <Christian.Koenig@amd.com>
>>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; dri-devel <dri-
>>> devel@lists.freedesktop.org>; amd-gfx list <amd-gfx@lists.freedesktop.org>;
>>> intel-gfx <intel-gfx@lists.freedesktop.org>; Christian König
>>> <ckoenig.leichtzumerken@gmail.com>
>>> Subject: Re: [Intel-gfx] [PATCH 03/10] drm/syncobj: add new
>>> drm_syncobj_add_point interface v2
>>>
>>> On Wed, Dec 12, 2018 at 12:08 PM Koenig, Christian
>>> <Christian.Koenig@amd.com> wrote:
>>>> Am 12.12.18 um 11:49 schrieb Daniel Vetter:
>>>>> On Fri, Dec 07, 2018 at 11:54:15PM +0800, Chunming Zhou wrote:
>>>>>> From: Christian König <ckoenig.leichtzumerken@gmail.com>
>>>>>>
>>>>>> Use the dma_fence_chain object to create a timeline of fence
>>>>>> objects instead of just replacing the existing fence.
>>>>>>
>>>>>> v2: rebase and cleanup
>>>>>>
>>>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>>> Somewhat jumping back into this. Not sure we discussed this already
>>>>> or not. I'm a bit unclear on why we have to chain the fences in the
>>> timeline:
>>>>> - The timeline stuff is modelled after the WDDM2 monitored fences.
>>> Which
>>>>>     really are just u64 counters in memory somewhere (I think could be
>>>>>     system ram or vram). Because WDDM2 has the memory management
>>> entirely
>>>>>     separated from rendering synchronization it totally allows userspace to
>>>>>     create loops and deadlocks and everything else nasty using this - the
>>>>>     memory manager won't deadlock because these monitored fences
>>> never leak
>>>>>     into the buffer manager. And if CS deadlock, gpu reset takes care of the
>>>>>     mess.
>>>>>
>>>>> - This has a few consequences, as in they seem to indeed work like a
>>>>>     memory location: Userspace incrementing out-of-order (because they
>>> run
>>>>>     batches updating the same fence on different engines) is totally fine,
>>>>>     as is doing anything else "stupid".
>>>>>
>>>>> - Now on linux we can't allow anything, because we need to make sure
>>> that
>>>>>     deadlocks don't leak into the memory manager. But as long as we block
>>>>>     until the underlying dma_fence has materialized, nothing userspace can
>>>>>     do will lead to such a deadlock. Even if userspace ends up submitting
>>>>>     jobs without enough built-in synchronization, leading to out-of-order
>>>>>     signalling of fences on that "timeline". And I don't think that would
>>>>>     pose a problem for us.
>>>>>
>>>>> Essentially I think we can look at timeline syncobj as a dma_fence
>>>>> container indexed through an integer, and there's no need to enforce
>>>>> that the timline works like a real dma_fence timeline, with all it's
>>>>> guarantees. It's just a pile of (possibly, if userspace is stupid)
>>>>> unrelated dma_fences. You could implement the entire thing in
>>>>> userspace after all, except for the "we want to share these timeline
>>>>> objects between processes" problem.
>>>>>
>>>>> tldr; I think we can drop the dma_fence_chain complexity completely.
>>>>> Or at least I'm not really understanding why it's needed.
>>>>>
>>>>> Of course that means drivers cannot treat a drm_syncobj timeline as
>>>>> a dma_fence timeline. But given the future fences stuff and all
>>>>> that, that's already out of the window anyway.
>>>>>
>>>>> What am I missing?
>>>> Good question, since that was exactly my initial idea as well.
>>>>
>>>> Key point is that our Vulcan guys came back and said that this
>>>> wouldn't be sufficient, but I honestly don't fully understand why.
>>> Hm, sounds like we really need those testscases (vk cts on top of mesa, igt)
>>> so we can talk about the exact corner cases we care about and why.
>>>
>>> I guess one thing that might happen is that userspace leaves out a number
>>> and never sets that fence, relying on the >= semantics of the monitored
>>> fence to unblock that thread. E.g. when skipping a frame in one of the
>>> auxiliary workloads. For that case we'd need to make sure we don't just wait
>>> for the given fence to materialize, but also any fences later in the timeline.
>>>
>>> But we can't decide that without understanding the actual use-case that
>>> needs to be supported at the other end of the stack, and how all the bits in
>>> between should look like.
>>>
>>> I guess we're back to "uapi design without userspace doesn't make sense" ...
>>>
>>>> Anyway that's why David came up with using the fence array to wait for
>>>> all previously added fences, which I then later on extended into this
>>>> chain container.
>>>>
>>>> I have to admit that it is way more defensive implemented this way. E.g.
>>>> there is much fewer things userspace can do wrong.
>>>>
>>>> The principal idea is that when they mess things up they are always
>>>> going to wait more than necessary, but never less.
>>> That seems against the spirit of vulkan, which is very much about "you get all
>>> the pieces". It also might dig us a hole in the future, if we ever get around to
>>> moving towards a WDDM2 style memory management model. For future
>>> proving I think it would make sense if we implement the minimal uapi we
>>> need for vk timelines, not the strictest guarantees we can get away with
>>> (without performance impact) with current drivers.
>>> -Daniel
>>>
>>>
>>>> Christian.
>>>>
>>>>> -Daniel
>>>>>
>>>>>> ---
>>>>>>    drivers/gpu/drm/drm_syncobj.c | 37
>>> +++++++++++++++++++++++++++++++++++
>>>>>>    include/drm/drm_syncobj.h     |  5 +++++
>>>>>>    2 files changed, 42 insertions(+)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
>>>>>> b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..51f798e2194f
>>>>>> 100644
>>>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>>>> @@ -122,6 +122,43 @@ static void drm_syncobj_remove_wait(struct
>>> drm_syncobj *syncobj,
>>>>>>       spin_unlock(&syncobj->lock);
>>>>>>    }
>>>>>>
>>>>>> +/**
>>>>>> + * drm_syncobj_add_point - add new timeline point to the syncobj
>>>>>> + * @syncobj: sync object to add timeline point do
>>>>>> + * @chain: chain node to use to add the point
>>>>>> + * @fence: fence to encapsulate in the chain node
>>>>>> + * @point: sequence number to use for the point
>>>>>> + *
>>>>>> + * Add the chain node as new timeline point to the syncobj.
>>>>>> + */
>>>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>>>>>> +                       struct dma_fence_chain *chain,
>>>>>> +                       struct dma_fence *fence,
>>>>>> +                       uint64_t point) {
>>>>>> +    struct syncobj_wait_entry *cur, *tmp;
>>>>>> +    struct dma_fence *prev;
>>>>>> +
>>>>>> +    dma_fence_get(fence);
>>>>>> +
>>>>>> +    spin_lock(&syncobj->lock);
>>>>>> +
>>>>>> +    prev = rcu_dereference_protected(syncobj->fence,
>>>>>> +                                     lockdep_is_held(&syncobj->lock));
>>>>>> +    dma_fence_chain_init(chain, prev, fence, point);
>>>>>> +    rcu_assign_pointer(syncobj->fence, &chain->base);
>>>>>> +
>>>>>> +    list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
>>>>>> +            list_del_init(&cur->node);
>>>>>> +            syncobj_wait_syncobj_func(syncobj, cur);
>>>>>> +    }
>>>>>> +    spin_unlock(&syncobj->lock);
>>>>>> +
>>>>>> +    /* Walk the chain once to trigger garbage collection */
>>>>>> +    dma_fence_chain_for_each(prev, fence); }
>>>>>> +EXPORT_SYMBOL(drm_syncobj_add_point);
>>>>>> +
>>>>>>    /**
>>>>>>     * drm_syncobj_replace_fence - replace fence in a sync object.
>>>>>>     * @syncobj: Sync object to replace fence in diff --git
>>>>>> a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index
>>>>>> 7c6ed845c70d..8acb4ae4f311 100644
>>>>>> --- a/include/drm/drm_syncobj.h
>>>>>> +++ b/include/drm/drm_syncobj.h
>>>>>> @@ -27,6 +27,7 @@
>>>>>>    #define __DRM_SYNCOBJ_H__
>>>>>>
>>>>>>    #include "linux/dma-fence.h"
>>>>>> +#include "linux/dma-fence-chain.h"
>>>>>>
>>>>>>    /**
>>>>>>     * struct drm_syncobj - sync object.
>>>>>> @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj
>>>>>> *syncobj)
>>>>>>
>>>>>>    struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
>>>>>>                                    u32 handle);
>>>>>> +void drm_syncobj_add_point(struct drm_syncobj *syncobj,
>>>>>> +                       struct dma_fence_chain *chain,
>>>>>> +                       struct dma_fence *fence,
>>>>>> +                       uint64_t point);
>>>>>>    void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
>>>>>>                              struct dma_fence *fence);
>>>>>>    int drm_syncobj_find_fence(struct drm_file *file_private,
>>>>>> --
>>>>>> 2.17.1
>>>>>>
>>>>>> _______________________________________________
>>>>>> Intel-gfx mailing list
>>>>>> Intel-gfx@lists.freedesktop.org
>>>>>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>
>
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2018-12-12 13:06 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-07 15:54 [PATCH 01/10] dma-buf: add new dma_fence_chain container v4 Chunming Zhou
2018-12-07 15:54 ` [PATCH 03/10] drm/syncobj: add new drm_syncobj_add_point interface v2 Chunming Zhou
     [not found]   ` <20181207155422.15967-3-david1.zhou-5C7GfCeVMHo@public.gmane.org>
2018-12-12 10:49     ` [Intel-gfx] " Daniel Vetter
2018-12-12 11:08       ` Koenig, Christian
     [not found]         ` <12badb5a-f2c1-f819-c30a-f274d8a9401b-5C7GfCeVMHo@public.gmane.org>
2018-12-12 11:15           ` Daniel Vetter
     [not found]             ` <CAKMK7uEDuYmuYTbCr3fP-_bVKWehMMWn+SbJkEUAB_uWn6X1Gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-12-12 11:39               ` Zhou, David(ChunMing)
     [not found]                 ` <BY1PR12MB0502201CA782F5F2DBB661B8B4A70-PicGAnIBOobrCwm+z9iKNgdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-12-12 12:00                   ` Koenig, Christian
2018-12-12 12:20                     ` Daniel Vetter
2018-12-12 12:24                 ` Daniel Vetter
2018-12-12 13:06                   ` [Intel-gfx] " Chunming Zhou
2018-12-07 15:54 ` [PATCH 05/10] drm/syncobj: add timeline payload query ioctl v4 Chunming Zhou
2018-12-07 15:54 ` [PATCH 06/10] drm/syncobj: use the timeline point in drm_syncobj_find_fence v3 Chunming Zhou
2018-12-07 15:54 ` [PATCH 07/10] drm/amdgpu: add timeline support in amdgpu CS v2 Chunming Zhou
     [not found] ` <20181207155422.15967-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
2018-12-07 15:54   ` [PATCH 02/10] drm/syncobj: remove drm_syncobj_cb and cleanup Chunming Zhou
2018-12-07 15:54   ` [PATCH 04/10] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
2018-12-07 15:54   ` [PATCH 08/10] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
2018-12-07 15:54   ` [PATCH 09/10] drm/syncobj: add timeline signal ioctl for syncobj v2 Chunming Zhou
2018-12-07 15:54   ` [PATCH 10/10] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).