dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
@ 2019-03-20  5:47 Chunming Zhou
  2019-03-20  5:47 ` [PATCH 5/9] drm/syncobj: use the timeline point in drm_syncobj_find_fence v4 Chunming Zhou
                   ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel, amd-gfx, lionel.g.landwerlin, jason, Christian.Koenig,
	Tobias.Hector
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Lockless container implementation similar to a dma_fence_array, but with
only two elements per node and automatic garbage collection.

v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno,
    drop prev reference during garbage collection if it's not a chain fence.
v3: use head and iterator for dma_fence_chain_for_each
v4: fix reference count in dma_fence_chain_enable_signaling
v5: fix iteration when walking each chain node
v6: add __rcu for member 'prev' of struct chain node

Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/dma-buf/Makefile          |   3 +-
 drivers/dma-buf/dma-fence-chain.c | 241 ++++++++++++++++++++++++++++++
 include/linux/dma-fence-chain.h   |  81 ++++++++++
 3 files changed, 324 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/dma-fence-chain.c
 create mode 100644 include/linux/dma-fence-chain.h

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 0913a6ccab5a..1f006e083eb9 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,4 +1,5 @@
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+	 reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 000000000000..0c5e3c902fa0
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,241 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+	struct dma_fence *prev;
+
+	rcu_read_lock();
+	prev = dma_fence_get_rcu_safe(&chain->prev);
+	rcu_read_unlock();
+	return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain, *prev_chain;
+	struct dma_fence *prev, *replacement, *tmp;
+
+	chain = to_dma_fence_chain(fence);
+	if (!chain) {
+		dma_fence_put(fence);
+		return NULL;
+	}
+
+	while ((prev = dma_fence_chain_get_prev(chain))) {
+
+		prev_chain = to_dma_fence_chain(prev);
+		if (prev_chain) {
+			if (!dma_fence_is_signaled(prev_chain->fence))
+				break;
+
+			replacement = dma_fence_chain_get_prev(prev_chain);
+		} else {
+			if (!dma_fence_is_signaled(prev))
+				break;
+
+			replacement = NULL;
+		}
+
+		tmp = cmpxchg(&chain->prev, prev, replacement);
+		if (tmp == prev)
+			dma_fence_put(tmp);
+		else
+			dma_fence_put(replacement);
+		dma_fence_put(prev);
+	}
+
+	dma_fence_put(fence);
+	return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+	struct dma_fence_chain *chain;
+
+	if (!seqno)
+		return 0;
+
+	chain = to_dma_fence_chain(*pfence);
+	if (!chain || chain->base.seqno < seqno)
+		return -EINVAL;
+
+	dma_fence_chain_for_each(*pfence, &chain->base) {
+		if ((*pfence)->context != chain->base.context ||
+		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+			break;
+	}
+	dma_fence_put(&chain->base);
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+        return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+        return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(work, typeof(*chain), work);
+
+	/* Try to rearm the callback */
+	if (!dma_fence_chain_enable_signaling(&chain->base))
+		/* Ok, we are done. No more unsignaled fences left */
+		dma_fence_signal(&chain->base);
+	dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(cb, typeof(*chain), cb);
+	irq_work_queue(&chain->work);
+	dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+	dma_fence_get(&head->base);
+	dma_fence_chain_for_each(fence, &head->base) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		dma_fence_get(f);
+		if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+			dma_fence_put(fence);
+			return true;
+		}
+		dma_fence_put(f);
+	}
+	dma_fence_put(&head->base);
+	return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+	dma_fence_chain_for_each(fence, fence) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		if (!dma_fence_is_signaled(f)) {
+			dma_fence_put(fence);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+	dma_fence_put(chain->prev);
+	dma_fence_put(chain->fence);
+	dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+	.get_driver_name = dma_fence_chain_get_driver_name,
+	.get_timeline_name = dma_fence_chain_get_timeline_name,
+	.enable_signaling = dma_fence_chain_enable_signaling,
+	.signaled = dma_fence_chain_signaled,
+	.release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+			  struct dma_fence *prev,
+			  struct dma_fence *fence,
+			  uint64_t seqno)
+{
+	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+	uint64_t context;
+
+	spin_lock_init(&chain->lock);
+	chain->prev = prev;
+	chain->fence = fence;
+	chain->prev_seqno = 0;
+	init_irq_work(&chain->work, dma_fence_chain_irq_work);
+
+	/* Try to reuse the context of the previous chain node. */
+	if (prev_chain && __dma_fence_is_later(seqno, prev->seqno)) {
+		context = prev->context;
+		chain->prev_seqno = prev->seqno;
+	} else {
+		context = dma_fence_context_alloc(1);
+		/* Make sure that we always have a valid sequence number. */
+		if (prev_chain)
+			seqno = max(prev->seqno, seqno);
+	}
+
+	dma_fence_init(&chain->base, &dma_fence_chain_ops,
+		       &chain->lock, context, seqno);
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
new file mode 100644
index 000000000000..934a442db8ac
--- /dev/null
+++ b/include/linux/dma-fence-chain.h
@@ -0,0 +1,81 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_CHAIN_H
+#define __LINUX_DMA_FENCE_CHAIN_H
+
+#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
+
+/**
+ * struct dma_fence_chain - fence to represent an node of a fence chain
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @prev: previous fence of the chain
+ * @prev_seqno: original previous seqno before garbage collection
+ * @fence: encapsulated fence
+ * @cb: callback structure for signaling
+ * @work: irq work item for signaling
+ */
+struct dma_fence_chain {
+	struct dma_fence base;
+	spinlock_t lock;
+	struct dma_fence __rcu *prev;
+	u64 prev_seqno;
+	struct dma_fence *fence;
+	struct dma_fence_cb cb;
+	struct irq_work work;
+};
+
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * to_dma_fence_chain - cast a fence to a dma_fence_chain
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_chain,
+ * or the dma_fence_chain otherwise.
+ */
+static inline struct dma_fence_chain *
+to_dma_fence_chain(struct dma_fence *fence)
+{
+	if (!fence || fence->ops != &dma_fence_chain_ops)
+		return NULL;
+
+	return container_of(fence, struct dma_fence_chain, base);
+}
+
+/**
+ * dma_fence_chain_for_each - iterate over all fences in chain
+ * @iter: current fence
+ * @head: starting point
+ *
+ * Iterate over all fences in the chain. We keep a reference to the current
+ * fence while inside the loop which must be dropped when breaking out.
+ */
+#define dma_fence_chain_for_each(iter, head)	\
+	for (iter = dma_fence_get(head); iter; \
+	     iter = dma_fence_chain_walk(iter))
+
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+			  struct dma_fence *prev,
+			  struct dma_fence *fence,
+			  uint64_t seqno);
+
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/9] drm/syncobj: add new drm_syncobj_add_point interface v4
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 3/9] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Use the dma_fence_chain object to create a timeline of fence objects
instead of just replacing the existing fence.

v2: rebase and cleanup
v3: fix garbage collection parameters
v4: add unorder point check, print a warn calltrace

Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_syncobj.c | 39 +++++++++++++++++++++++++++++++++++
 include/drm/drm_syncobj.h     |  5 +++++
 2 files changed, 44 insertions(+)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 5329e66598c6..19a9ce638119 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -122,6 +122,45 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
 	spin_unlock(&syncobj->lock);
 }
 
+/**
+ * drm_syncobj_add_point - add new timeline point to the syncobj
+ * @syncobj: sync object to add timeline point do
+ * @chain: chain node to use to add the point
+ * @fence: fence to encapsulate in the chain node
+ * @point: sequence number to use for the point
+ *
+ * Add the chain node as new timeline point to the syncobj.
+ */
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+			   struct dma_fence_chain *chain,
+			   struct dma_fence *fence,
+			   uint64_t point)
+{
+	struct syncobj_wait_entry *cur, *tmp;
+	struct dma_fence *prev;
+
+	dma_fence_get(fence);
+
+	spin_lock(&syncobj->lock);
+
+	prev = drm_syncobj_fence_get(syncobj);
+	/* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
+	WARN_ON_ONCE(prev && prev->seqno >= point);
+	dma_fence_chain_init(chain, prev, fence, point);
+	rcu_assign_pointer(syncobj->fence, &chain->base);
+
+	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+		list_del_init(&cur->node);
+		syncobj_wait_syncobj_func(syncobj, cur);
+	}
+	spin_unlock(&syncobj->lock);
+
+	/* Walk the chain once to trigger garbage collection */
+	dma_fence_chain_for_each(fence, prev);
+	dma_fence_put(prev);
+}
+EXPORT_SYMBOL(drm_syncobj_add_point);
+
 /**
  * drm_syncobj_replace_fence - replace fence in a sync object.
  * @syncobj: Sync object to replace fence in
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 0311c9fdbd2f..6cf7243a1dc5 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -27,6 +27,7 @@
 #define __DRM_SYNCOBJ_H__
 
 #include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
 
 struct drm_file;
 
@@ -112,6 +113,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
 
 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
 				     u32 handle);
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+			   struct dma_fence_chain *chain,
+			   struct dma_fence *fence,
+			   uint64_t point);
 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 			       struct dma_fence *fence);
 int drm_syncobj_find_fence(struct drm_file *file_private,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/9] drm/syncobj: add support for timeline point wait v8
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2019-03-20  5:47   ` [PATCH 2/9] drm/syncobj: add new drm_syncobj_add_point interface v4 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 4/9] drm/syncobj: add timeline payload query ioctl v6 Chunming Zhou
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou, Chris Wilson, Christian König, Dave Airlie

points array is one-to-one match with syncobjs array.
v2:
add seperate ioctl for timeline point wait, otherwise break uapi.
v3:
userspace can specify two kinds waits::
a. Wait for time point to be completed.
b. and wait for time point to become available
v4:
rebase
v5:
add comment for xxx_WAIT_AVAILABLE
v6: rebase and rework on new container
v7: drop _WAIT_COMPLETED, it is the default anyway
v8: correctly handle garbage collected fences

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: Tobias Hector <Tobias.Hector@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_internal.h |   2 +
 drivers/gpu/drm/drm_ioctl.c    |   2 +
 drivers/gpu/drm/drm_syncobj.c  | 153 ++++++++++++++++++++++++++-------
 include/uapi/drm/drm.h         |  15 ++++
 4 files changed, 143 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 251d67e04c2d..331ac6225b58 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -182,6 +182,8 @@ int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
 int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_private);
+int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_private);
 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 687943df58e1..c984654646fa 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -688,6 +688,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 19a9ce638119..eae51978cda4 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -61,6 +61,7 @@ struct syncobj_wait_entry {
 	struct task_struct *task;
 	struct dma_fence *fence;
 	struct dma_fence_cb fence_cb;
+	u64    point;
 };
 
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@ -95,6 +96,8 @@ EXPORT_SYMBOL(drm_syncobj_find);
 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
 				       struct syncobj_wait_entry *wait)
 {
+	struct dma_fence *fence;
+
 	if (wait->fence)
 		return;
 
@@ -103,11 +106,15 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
 	 * have the lock, try one more time just to be sure we don't add a
 	 * callback when a fence has already been set.
 	 */
-	if (syncobj->fence)
-		wait->fence = dma_fence_get(
-			rcu_dereference_protected(syncobj->fence, 1));
-	else
+	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+		dma_fence_put(fence);
 		list_add_tail(&wait->node, &syncobj->cb_list);
+	} else if (!fence) {
+		wait->fence = dma_fence_get_stub();
+	} else {
+		wait->fence = fence;
+	}
 	spin_unlock(&syncobj->lock);
 }
 
@@ -149,10 +156,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
 	dma_fence_chain_init(chain, prev, fence, point);
 	rcu_assign_pointer(syncobj->fence, &chain->base);
 
-	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
-		list_del_init(&cur->node);
+	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
 		syncobj_wait_syncobj_func(syncobj, cur);
-	}
 	spin_unlock(&syncobj->lock);
 
 	/* Walk the chain once to trigger garbage collection */
@@ -184,10 +189,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 	rcu_assign_pointer(syncobj->fence, fence);
 
 	if (fence != old_fence) {
-		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
-			list_del_init(&cur->node);
+		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
 			syncobj_wait_syncobj_func(syncobj, cur);
-		}
 	}
 
 	spin_unlock(&syncobj->lock);
@@ -644,13 +647,27 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
 				      struct syncobj_wait_entry *wait)
 {
+	struct dma_fence *fence;
+
 	/* This happens inside the syncobj lock */
-	wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
-							      lockdep_is_held(&syncobj->lock)));
+	fence = rcu_dereference_protected(syncobj->fence,
+					  lockdep_is_held(&syncobj->lock));
+	dma_fence_get(fence);
+	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+		dma_fence_put(fence);
+		return;
+	} else if (!fence) {
+		wait->fence = dma_fence_get_stub();
+	} else {
+		wait->fence = fence;
+	}
+
 	wake_up_process(wait->task);
+	list_del_init(&wait->node);
 }
 
 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+						  void __user *user_points,
 						  uint32_t count,
 						  uint32_t flags,
 						  signed long timeout,
@@ -658,12 +675,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 {
 	struct syncobj_wait_entry *entries;
 	struct dma_fence *fence;
+	uint64_t *points;
 	uint32_t signaled_count, i;
 
-	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
-	if (!entries)
+	points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+	if (points == NULL)
 		return -ENOMEM;
 
+	if (!user_points) {
+		memset(points, 0, count * sizeof(uint64_t));
+
+	} else if (copy_from_user(points, user_points,
+				  sizeof(uint64_t) * count)) {
+		timeout = -EFAULT;
+		goto err_free_points;
+	}
+
+	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+	if (!entries) {
+		timeout = -ENOMEM;
+		goto err_free_points;
+	}
 	/* Walk the list of sync objects and initialize entries.  We do
 	 * this up-front so that we can properly return -EINVAL if there is
 	 * a syncobj with a missing fence and then never have the chance of
@@ -671,9 +703,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 	 */
 	signaled_count = 0;
 	for (i = 0; i < count; ++i) {
+		struct dma_fence *fence;
+
 		entries[i].task = current;
-		entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
-		if (!entries[i].fence) {
+		entries[i].point = points[i];
+		fence = drm_syncobj_fence_get(syncobjs[i]);
+		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+			dma_fence_put(fence);
 			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
 				continue;
 			} else {
@@ -682,7 +718,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 			}
 		}
 
-		if (dma_fence_is_signaled(entries[i].fence)) {
+		if (fence)
+			entries[i].fence = fence;
+		else
+			entries[i].fence = dma_fence_get_stub();
+
+		if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+		    dma_fence_is_signaled(entries[i].fence)) {
 			if (signaled_count == 0 && idx)
 				*idx = i;
 			signaled_count++;
@@ -715,7 +757,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 			if (!fence)
 				continue;
 
-			if (dma_fence_is_signaled(fence) ||
+			if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+			    dma_fence_is_signaled(fence) ||
 			    (!entries[i].fence_cb.func &&
 			     dma_fence_add_callback(fence,
 						    &entries[i].fence_cb,
@@ -760,6 +803,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 	}
 	kfree(entries);
 
+err_free_points:
+	kfree(points);
+
 	return timeout;
 }
 
@@ -799,19 +845,33 @@ EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
 static int drm_syncobj_array_wait(struct drm_device *dev,
 				  struct drm_file *file_private,
 				  struct drm_syncobj_wait *wait,
-				  struct drm_syncobj **syncobjs)
+				  struct drm_syncobj_timeline_wait *timeline_wait,
+				  struct drm_syncobj **syncobjs, bool timeline)
 {
-	signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+	signed long timeout = 0;
 	uint32_t first = ~0;
 
-	timeout = drm_syncobj_array_wait_timeout(syncobjs,
-						 wait->count_handles,
-						 wait->flags,
-						 timeout, &first);
-	if (timeout < 0)
-		return timeout;
-
-	wait->first_signaled = first;
+	if (!timeline) {
+		timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+		timeout = drm_syncobj_array_wait_timeout(syncobjs,
+							 NULL,
+							 wait->count_handles,
+							 wait->flags,
+							 timeout, &first);
+		if (timeout < 0)
+			return timeout;
+		wait->first_signaled = first;
+	} else {
+		timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+		timeout = drm_syncobj_array_wait_timeout(syncobjs,
+							 u64_to_user_ptr(timeline_wait->points),
+							 timeline_wait->count_handles,
+							 timeline_wait->flags,
+							 timeout, &first);
+		if (timeout < 0)
+			return timeout;
+		timeline_wait->first_signaled = first;
+	}
 	return 0;
 }
 
@@ -897,13 +957,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	ret = drm_syncobj_array_wait(dev, file_private,
-				     args, syncobjs);
+				     args, NULL, syncobjs, false);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
 	return ret;
 }
 
+int
+drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_wait *args = data;
+	struct drm_syncobj **syncobjs;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_syncobj_array_wait(dev, file_private,
+				     NULL, args, syncobjs, true);
+
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
+
+
 int
 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_private)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 300f336633f2..44ebcdd9bd1d 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -737,6 +737,7 @@ struct drm_syncobj_handle {
 
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
 struct drm_syncobj_wait {
 	__u64 handles;
 	/* absolute timeout */
@@ -747,6 +748,19 @@ struct drm_syncobj_wait {
 	__u32 pad;
 };
 
+struct drm_syncobj_timeline_wait {
+	__u64 handles;
+	/* wait on specific timeline point for every handles*/
+	__u64 points;
+	/* absolute timeout */
+	__s64 timeout_nsec;
+	__u32 count_handles;
+	__u32 flags;
+	__u32 first_signaled; /* only valid when not waiting all */
+	__u32 pad;
+};
+
+
 struct drm_syncobj_array {
 	__u64 handles;
 	__u32 count_handles;
@@ -909,6 +923,7 @@ extern "C" {
 #define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
 
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/9] drm/syncobj: add timeline payload query ioctl v6
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2019-03-20  5:47   ` [PATCH 2/9] drm/syncobj: add new drm_syncobj_add_point interface v4 Chunming Zhou
  2019-03-20  5:47   ` [PATCH 3/9] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 6/9] drm/amdgpu: add timeline support in amdgpu CS v3 Chunming Zhou
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou, Chris Wilson, Dave Airlie

user mode can query timeline payload.
v2: check return value of copy_to_user
v3: handle querying entry by entry
v4: rebase on new chain container, simplify interface
v5: query last signaled timeline point, not last point.
v6: add unorder point check

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Tobias Hector <Tobias.Hector@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_internal.h |  2 ++
 drivers/gpu/drm/drm_ioctl.c    |  2 ++
 drivers/gpu/drm/drm_syncobj.c  | 62 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         | 10 ++++++
 4 files changed, 76 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 331ac6225b58..695179bb88dc 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -188,6 +188,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_private);
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_private);
 
 /* drm_framebuffer.c */
 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index c984654646fa..7a534c184e52 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -694,6 +694,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index eae51978cda4..0e62a793c8dd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1064,3 +1064,65 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 
 	return ret;
 }
+
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_array *args = data;
+	struct drm_syncobj **syncobjs;
+	uint64_t __user *points = u64_to_user_ptr(args->points);
+	uint32_t i;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < args->count_handles; i++) {
+		struct dma_fence_chain *chain;
+		struct dma_fence *fence;
+		uint64_t point;
+
+		fence = drm_syncobj_fence_get(syncobjs[i]);
+		chain = to_dma_fence_chain(fence);
+		if (chain) {
+			struct dma_fence *iter, *last_signaled = NULL;
+
+			dma_fence_chain_for_each(iter, fence) {
+				if (!iter)
+					break;
+				dma_fence_put(last_signaled);
+				last_signaled = dma_fence_get(iter);
+				if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+					/* It is most likely that timeline has
+					 * unorder points. */
+					break;
+			}
+			point = dma_fence_is_signaled(last_signaled) ?
+				last_signaled->seqno :
+				to_dma_fence_chain(last_signaled)->prev_seqno;
+			dma_fence_put(last_signaled);
+		} else {
+			point = 0;
+		}
+		ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+		ret = ret ? -EFAULT : 0;
+		if (ret)
+			break;
+	}
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 44ebcdd9bd1d..c62be0840ba5 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -767,6 +767,14 @@ struct drm_syncobj_array {
 	__u32 pad;
 };
 
+struct drm_syncobj_timeline_array {
+	__u64 handles;
+	__u64 points;
+	__u32 count_handles;
+	__u32 pad;
+};
+
+
 /* Query current scanout sequence number */
 struct drm_crtc_get_sequence {
 	__u32 crtc_id;		/* requested crtc_id */
@@ -924,6 +932,8 @@ extern "C" {
 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
 
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/9] drm/syncobj: use the timeline point in drm_syncobj_find_fence v4
  2019-03-20  5:47 [PATCH 1/9] dma-buf: add new dma_fence_chain container v6 Chunming Zhou
@ 2019-03-20  5:47 ` Chunming Zhou
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
  2019-03-21  0:24 ` [PATCH 1/9] dma-buf: add new dma_fence_chain container v6 kbuild test robot
  2 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel, amd-gfx, lionel.g.landwerlin, jason, Christian.Koenig,
	Tobias.Hector
  Cc: Christian König, Christian König

From: Christian König <ckoenig.leichtzumerken@gmail.com>

Implement finding the right timeline point in drm_syncobj_find_fence.

v2: return -EINVAL when the point is not submitted yet.
v3: fix reference counting bug, add flags handling as well
v4: add timeout for find fence

Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_syncobj.c | 50 ++++++++++++++++++++++++++++++++---
 1 file changed, 47 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0e62a793c8dd..087fd4e7eaf3 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -213,6 +213,8 @@ static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
 	dma_fence_put(fence);
 }
 
+/* 5s default for wait submission */
+#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
 /**
  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
  * @file_private: drm file private pointer
@@ -233,16 +235,58 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 			   struct dma_fence **fence)
 {
 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
-	int ret = 0;
+	struct syncobj_wait_entry wait;
+	u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
+	int ret;
 
 	if (!syncobj)
 		return -ENOENT;
 
 	*fence = drm_syncobj_fence_get(syncobj);
-	if (!*fence) {
+	drm_syncobj_put(syncobj);
+
+	if (*fence) {
+		ret = dma_fence_chain_find_seqno(fence, point);
+		if (!ret)
+			return 0;
+		dma_fence_put(*fence);
+	} else {
 		ret = -EINVAL;
 	}
-	drm_syncobj_put(syncobj);
+
+	if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+		return ret;
+
+	memset(&wait, 0, sizeof(wait));
+	wait.task = current;
+	wait.point = point;
+	drm_syncobj_fence_add_wait(syncobj, &wait);
+
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (wait.fence) {
+			ret = 0;
+			break;
+		}
+                if (timeout == 0) {
+                        ret = -ETIME;
+                        break;
+                }
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+                timeout = schedule_timeout(timeout);
+	} while (1);
+
+	__set_current_state(TASK_RUNNING);
+	*fence = wait.fence;
+
+	if (wait.node.next)
+		drm_syncobj_remove_wait(syncobj, &wait);
+
 	return ret;
 }
 EXPORT_SYMBOL(drm_syncobj_find_fence);
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 6/9] drm/amdgpu: add timeline support in amdgpu CS v3
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2019-03-20  5:47   ` [PATCH 4/9] drm/syncobj: add timeline payload query ioctl v6 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 7/9] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou, Chris Wilson, Dave Airlie

syncobj wait/signal operation is appending in command submission.
v2: separate to two kinds in/out_deps functions
v3: fix checking for timeline syncobj

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Tobias Hector <Tobias.Hector@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  10 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 152 +++++++++++++++++++++----
 include/uapi/drm/amdgpu_drm.h          |   8 ++
 3 files changed, 144 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8d0d7f3dd5fb..deec2c796253 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -433,6 +433,12 @@ struct amdgpu_cs_chunk {
 	void			*kdata;
 };
 
+struct amdgpu_cs_post_dep {
+	struct drm_syncobj *syncobj;
+	struct dma_fence_chain *chain;
+	u64 point;
+};
+
 struct amdgpu_cs_parser {
 	struct amdgpu_device	*adev;
 	struct drm_file		*filp;
@@ -462,8 +468,8 @@ struct amdgpu_cs_parser {
 	/* user fence */
 	struct amdgpu_bo_list_entry	uf_entry;
 
-	unsigned num_post_dep_syncobjs;
-	struct drm_syncobj **post_dep_syncobjs;
+	unsigned			num_post_deps;
+	struct amdgpu_cs_post_dep	*post_deps;
 };
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52a5e4fdc95b..2f6239b6be6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 			break;
 
 		default:
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 
-	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
-		drm_syncobj_put(parser->post_dep_syncobjs[i]);
-	kfree(parser->post_dep_syncobjs);
+	for (i = 0; i < parser->num_post_deps; i++) {
+		drm_syncobj_put(parser->post_deps[i].syncobj);
+		kfree(parser->post_deps[i].chain);
+	}
+	kfree(parser->post_deps);
 
 	dma_fence_put(parser->fence);
 
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 }
 
 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
-						 uint32_t handle)
+						 uint32_t handle, u64 point,
+						 u64 flags)
 {
-	int r;
 	struct dma_fence *fence;
-	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
-	if (r)
+	int r;
+
+	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+	if (r) {
+		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+			  handle, point, r);
 		return r;
+	}
 
 	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
 	dma_fence_put(fence);
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
 					    struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i, r;
-	struct drm_amdgpu_cs_chunk_sem *deps;
 
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
+	for (i = 0; i < num_deps; ++i) {
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+							  0, 0);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
 
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+						     struct amdgpu_cs_chunk *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i, r;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
 	for (i = 0; i < num_deps; ++i) {
-		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+							  syncobj_deps[i].handle,
+							  syncobj_deps[i].point,
+							  syncobj_deps[i].flags);
 		if (r)
 			return r;
 	}
+
 	return 0;
 }
 
 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
 					     struct amdgpu_cs_chunk *chunk)
 {
+	struct drm_amdgpu_cs_chunk_sem *deps;
 	unsigned num_deps;
 	int i;
-	struct drm_amdgpu_cs_chunk_sem *deps;
+
 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
 	num_deps = chunk->length_dw * 4 /
 		sizeof(struct drm_amdgpu_cs_chunk_sem);
 
-	p->post_dep_syncobjs = kmalloc_array(num_deps,
-					     sizeof(struct drm_syncobj *),
-					     GFP_KERNEL);
-	p->num_post_dep_syncobjs = 0;
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
 
-	if (!p->post_dep_syncobjs)
+	if (!p->post_deps)
 		return -ENOMEM;
 
+
 	for (i = 0; i < num_deps; ++i) {
-		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
-		if (!p->post_dep_syncobjs[i])
+		p->post_deps[i].syncobj =
+			drm_syncobj_find(p->filp, deps[i].handle);
+		if (!p->post_deps[i].syncobj)
 			return -EINVAL;
-		p->num_post_dep_syncobjs++;
+		p->post_deps[i].chain = NULL;
+		p->post_deps[i].point = 0;
+		p->num_post_deps++;
 	}
+
+	return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+						      struct amdgpu_cs_chunk
+						      *chunk)
+{
+	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+	unsigned num_deps;
+	int i;
+
+	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+	num_deps = chunk->length_dw * 4 /
+		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+				     GFP_KERNEL);
+	p->num_post_deps = 0;
+
+	if (!p->post_deps)
+		return -ENOMEM;
+
+	for (i = 0; i < num_deps; ++i) {
+		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+		dep->chain = NULL;
+		if (syncobj_deps[i].point) {
+			dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+			if (!dep->chain)
+				return -ENOMEM;
+		}
+
+		dep->syncobj = drm_syncobj_find(p->filp,
+						syncobj_deps[i].handle);
+		if (!dep->syncobj) {
+			kfree(dep->chain);
+			return -EINVAL;
+		}
+		dep->point = syncobj_deps[i].point;
+		p->num_post_deps++;
+	}
+
 	return 0;
 }
 
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 
 		chunk = &p->chunks[i];
 
-		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
-		    chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+		switch (chunk->chunk_id) {
+		case AMDGPU_CHUNK_ID_DEPENDENCIES:
+		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 			r = amdgpu_cs_process_fence_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
 			if (r)
 				return r;
-		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
 			if (r)
 				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+			if (r)
+				return r;
+			break;
+		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+			if (r)
+				return r;
+			break;
 		}
 	}
 
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 {
 	int i;
 
-	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
-		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+	for (i = 0; i < p->num_post_deps; ++i) {
+		if (p->post_deps[i].chain && p->post_deps[i].point) {
+			drm_syncobj_add_point(p->post_deps[i].syncobj,
+					      p->post_deps[i].chain,
+					      p->fence, p->post_deps[i].point);
+			p->post_deps[i].chain = NULL;
+		} else {
+			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+						  p->fence);
+		}
+	}
 }
 
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4a53f6cfa034..e928760c4c1a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -525,6 +525,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT     0x05
 #define AMDGPU_CHUNK_ID_BO_HANDLES      0x06
 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES	0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT    0x08
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL  0x09
 
 struct drm_amdgpu_cs_chunk {
 	__u32		chunk_id;
@@ -605,6 +607,12 @@ struct drm_amdgpu_cs_chunk_sem {
 	__u32 handle;
 };
 
+struct drm_amdgpu_cs_chunk_syncobj {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+};
+
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 7/9] drm/syncobj: add transition iotcls between binary and timeline v2
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (3 preceding siblings ...)
  2019-03-20  5:47   ` [PATCH 6/9] drm/amdgpu: add timeline support in amdgpu CS v3 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 8/9] drm/syncobj: add timeline signal ioctl for syncobj v4 Chunming Zhou
  2019-03-20  5:47   ` [PATCH 9/9] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou

we need to import/export timeline point.

v2: unify to one transfer ioctl

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_internal.h |  2 +
 drivers/gpu/drm/drm_ioctl.c    |  2 +
 drivers/gpu/drm/drm_syncobj.c  | 74 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         | 10 +++++
 4 files changed, 88 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 695179bb88dc..dd11ae5f1eef 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -180,6 +180,8 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
 int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
+int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_private);
 int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_private);
 int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7a534c184e52..92b3b7b2fd81 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -686,6 +686,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 087fd4e7eaf3..ee2d66e047e7 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -679,6 +679,80 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 					&args->handle);
 }
 
+static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
+					    struct drm_syncobj_transfer *args)
+{
+	struct drm_syncobj *timeline_syncobj = NULL;
+	struct dma_fence *fence;
+	struct dma_fence_chain *chain;
+	int ret;
+
+	timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+	if (!timeline_syncobj) {
+		return -ENOENT;
+	}
+	ret = drm_syncobj_find_fence(file_private, args->src_handle,
+				     args->src_point, args->flags,
+				     &fence);
+	if (ret)
+		goto err;
+	chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+	if (!chain) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+err1:
+	dma_fence_put(fence);
+err:
+	drm_syncobj_put(timeline_syncobj);
+
+	return ret;
+}
+
+static int
+drm_syncobj_transfer_to_binary(struct drm_file *file_private,
+			       struct drm_syncobj_transfer *args)
+{
+	struct drm_syncobj *binary_syncobj = NULL;
+	struct dma_fence *fence;
+	int ret;
+
+	binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+	if (!binary_syncobj)
+		return -ENOENT;
+	ret = drm_syncobj_find_fence(file_private, args->src_handle,
+				     args->src_point, args->flags, &fence);
+	if (ret)
+		goto err;
+	drm_syncobj_replace_fence(binary_syncobj, fence);
+	dma_fence_put(fence);
+err:
+	drm_syncobj_put(binary_syncobj);
+
+	return ret;
+}
+int
+drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_private)
+{
+	struct drm_syncobj_transfer *args = data;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->pad)
+		return -EINVAL;
+
+	if (args->dst_point)
+		ret = drm_syncobj_transfer_to_timeline(file_private, args);
+	else
+		ret = drm_syncobj_transfer_to_binary(file_private, args);
+
+	return ret;
+}
+
 static void syncobj_wait_fence_func(struct dma_fence *fence,
 				    struct dma_fence_cb *cb)
 {
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index c62be0840ba5..e8d0d6b51875 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -735,6 +735,15 @@ struct drm_syncobj_handle {
 	__u32 pad;
 };
 
+struct drm_syncobj_transfer {
+	__u32 src_handle;
+	__u32 dst_handle;
+	__u64 src_point;
+	__u64 dst_point;
+	__u32 flags;
+	__u32 pad;
+};
+
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
@@ -933,6 +942,7 @@ extern "C" {
 
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
 
 /**
  * Device specific ioctls should only be in their respective headers
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 8/9] drm/syncobj: add timeline signal ioctl for syncobj v4
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (4 preceding siblings ...)
  2019-03-20  5:47   ` [PATCH 7/9] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  2019-03-20  5:47   ` [PATCH 9/9] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou, Chris Wilson, Dave Airlie

v2: individually allocate chain array, since chain node is free independently.
v3: all existing points must be already signaled before cpu perform signal operation,
    so add check condition for that.
v4: remove v3 change and add checking to prevent out-of-order

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Cc: Tobias Hector <Tobias.Hector@amd.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/drm_internal.h |  2 +
 drivers/gpu/drm/drm_ioctl.c    |  2 +
 drivers/gpu/drm/drm_syncobj.c  | 93 ++++++++++++++++++++++++++++++++++
 include/uapi/drm/drm.h         |  1 +
 4 files changed, 98 insertions(+)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index dd11ae5f1eef..d9a483a5fce0 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -190,6 +190,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_private);
+int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_private);
 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 92b3b7b2fd81..d337f161909c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -696,6 +696,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index ee2d66e047e7..a3702c75fd1e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1183,6 +1183,99 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 	return ret;
 }
 
+int
+drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_private)
+{
+	struct drm_syncobj_timeline_array *args = data;
+	struct drm_syncobj **syncobjs;
+	struct dma_fence_chain **chains;
+	uint64_t *points;
+	uint32_t i, j, timeline_count = 0;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -EOPNOTSUPP;
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return -EINVAL;
+
+	ret = drm_syncobj_array_find(file_private,
+				     u64_to_user_ptr(args->handles),
+				     args->count_handles,
+				     &syncobjs);
+	if (ret < 0)
+		return ret;
+
+	points = kmalloc_array(args->count_handles, sizeof(*points),
+			       GFP_KERNEL);
+	if (!points) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	if (!u64_to_user_ptr(args->points)) {
+		memset(points, 0, args->count_handles * sizeof(uint64_t));
+	} else if (copy_from_user(points, u64_to_user_ptr(args->points),
+				  sizeof(uint64_t) * args->count_handles)) {
+		ret = -EFAULT;
+		goto err_points;
+	}
+
+	for (i = 0; i < args->count_handles; i++) {
+		struct dma_fence_chain *chain;
+		struct dma_fence *fence;
+
+		fence = drm_syncobj_fence_get(syncobjs[i]);
+		chain = to_dma_fence_chain(fence);
+		if (chain) {
+			if (points[i] <= fence->seqno) {
+				DRM_ERROR("signal point canot be out-of-order!\n");
+				ret = -EPERM;
+				goto err_points;
+			}
+		}
+		if (points[i])
+			timeline_count++;
+	}
+
+	chains = kmalloc_array(timeline_count, sizeof(void *), GFP_KERNEL);
+	if (!chains) {
+		ret = -ENOMEM;
+		goto err_points;
+	}
+	for (i = 0; i < timeline_count; i++) {
+		chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+		if (!chains[i]) {
+			for (j = 0; j < i; j++)
+				kfree(chains[j]);
+			ret = -ENOMEM;
+			goto err_chains;
+		}
+	}
+
+	for (i = 0, j = 0; i < args->count_handles; i++) {
+		if (points[i]) {
+			struct dma_fence *fence = dma_fence_get_stub();
+
+			drm_syncobj_add_point(syncobjs[i], chains[j++],
+					      fence, points[i]);
+			dma_fence_put(fence);
+		} else
+			drm_syncobj_assign_null_handle(syncobjs[i]);
+	}
+err_chains:
+	kfree(chains);
+err_points:
+	kfree(points);
+out:
+	drm_syncobj_array_free(syncobjs, args->count_handles);
+
+	return ret;
+}
+
 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private)
 {
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index e8d0d6b51875..236b01a1fabf 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -943,6 +943,7 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
 
 /**
  * Device specific ioctls should only be in their respective headers
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 9/9] drm/amdgpu: update version for timeline syncobj support in amdgpu
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
                     ` (5 preceding siblings ...)
  2019-03-20  5:47   ` [PATCH 8/9] drm/syncobj: add timeline signal ioctl for syncobj v4 Chunming Zhou
@ 2019-03-20  5:47   ` Chunming Zhou
  6 siblings, 0 replies; 16+ messages in thread
From: Chunming Zhou @ 2019-03-20  5:47 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	Tobias.Hector-5C7GfCeVMHo
  Cc: Chunming Zhou

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8a0732088640..4d8db87048d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -74,9 +74,10 @@
  * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
  * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
  * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
+ * - 3.31.0 - Add syncobj timeline support to AMDGPU_CS.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	30
+#define KMS_DRIVER_MINOR	31
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
  2019-03-20  5:47 [PATCH 1/9] dma-buf: add new dma_fence_chain container v6 Chunming Zhou
  2019-03-20  5:47 ` [PATCH 5/9] drm/syncobj: use the timeline point in drm_syncobj_find_fence v4 Chunming Zhou
       [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
@ 2019-03-21  0:24 ` kbuild test robot
       [not found]   ` <201903210813.K5uuPgyD%lkp-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
  2 siblings, 1 reply; 16+ messages in thread
From: kbuild test robot @ 2019-03-21  0:24 UTC (permalink / raw)
  To: Chunming Zhou
  Cc: Christian König, dri-devel, Tobias.Hector, amd-gfx, jason,
	Christian.Koenig, kbuild-all

Hi Chunming,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.1-rc1 next-20190320]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'


sparse warnings: (new ones prefixed by >>)

>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in initializer (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef] <asn:4>*__old @@
   drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct dma_fence [noderef] <asn:4>*__old
   drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence *[assigned] prev
>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in initializer (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef] <asn:4>*__new @@
   drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct dma_fence [noderef] <asn:4>*__new
   drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence *[assigned] replacement
>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in assignment (different address spaces) @@    expected struct dma_fence *tmp @@    got struct dma_fence [noderef] <struct dma_fence *tmp @@
   drivers/dma-buf/dma-fence-chain.c:73:21:    expected struct dma_fence *tmp
   drivers/dma-buf/dma-fence-chain.c:73:21:    got struct dma_fence [noderef] <asn:4>*[assigned] __ret
>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in argument 1 (different address spaces) @@    expected struct dma_fence *fence @@    got struct dma_fence struct dma_fence *fence @@
   drivers/dma-buf/dma-fence-chain.c:190:28:    expected struct dma_fence *fence
   drivers/dma-buf/dma-fence-chain.c:190:28:    got struct dma_fence [noderef] <asn:4>*prev
>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in assignment (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
   drivers/dma-buf/dma-fence-chain.c:222:21:    expected struct dma_fence [noderef] <asn:4>*prev
   drivers/dma-buf/dma-fence-chain.c:222:21:    got struct dma_fence *prev
   drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression using sizeof(void)
   drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression using sizeof(void)

vim +73 drivers/dma-buf/dma-fence-chain.c

    38	
    39	/**
    40	 * dma_fence_chain_walk - chain walking function
    41	 * @fence: current chain node
    42	 *
    43	 * Walk the chain to the next node. Returns the next fence or NULL if we are at
    44	 * the end of the chain. Garbage collects chain nodes which are already
    45	 * signaled.
    46	 */
    47	struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
    48	{
    49		struct dma_fence_chain *chain, *prev_chain;
    50		struct dma_fence *prev, *replacement, *tmp;
    51	
    52		chain = to_dma_fence_chain(fence);
    53		if (!chain) {
    54			dma_fence_put(fence);
    55			return NULL;
    56		}
    57	
    58		while ((prev = dma_fence_chain_get_prev(chain))) {
    59	
    60			prev_chain = to_dma_fence_chain(prev);
    61			if (prev_chain) {
    62				if (!dma_fence_is_signaled(prev_chain->fence))
    63					break;
    64	
    65				replacement = dma_fence_chain_get_prev(prev_chain);
    66			} else {
    67				if (!dma_fence_is_signaled(prev))
    68					break;
    69	
    70				replacement = NULL;
    71			}
    72	
  > 73			tmp = cmpxchg(&chain->prev, prev, replacement);
    74			if (tmp == prev)
    75				dma_fence_put(tmp);
    76			else
    77				dma_fence_put(replacement);
    78			dma_fence_put(prev);
    79		}
    80	
    81		dma_fence_put(fence);
    82		return prev;
    83	}
    84	EXPORT_SYMBOL(dma_fence_chain_walk);
    85	
    86	/**
    87	 * dma_fence_chain_find_seqno - find fence chain node by seqno
    88	 * @pfence: pointer to the chain node where to start
    89	 * @seqno: the sequence number to search for
    90	 *
    91	 * Advance the fence pointer to the chain node which will signal this sequence
    92	 * number. If no sequence number is provided then this is a no-op.
    93	 *
    94	 * Returns EINVAL if the fence is not a chain node or the sequence number has
    95	 * not yet advanced far enough.
    96	 */
    97	int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
    98	{
    99		struct dma_fence_chain *chain;
   100	
   101		if (!seqno)
   102			return 0;
   103	
   104		chain = to_dma_fence_chain(*pfence);
   105		if (!chain || chain->base.seqno < seqno)
   106			return -EINVAL;
   107	
   108		dma_fence_chain_for_each(*pfence, &chain->base) {
   109			if ((*pfence)->context != chain->base.context ||
   110			    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
   111				break;
   112		}
   113		dma_fence_put(&chain->base);
   114	
   115		return 0;
   116	}
   117	EXPORT_SYMBOL(dma_fence_chain_find_seqno);
   118	
   119	static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
   120	{
   121	        return "dma_fence_chain";
   122	}
   123	
   124	static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
   125	{
   126	        return "unbound";
   127	}
   128	
   129	static void dma_fence_chain_irq_work(struct irq_work *work)
   130	{
   131		struct dma_fence_chain *chain;
   132	
   133		chain = container_of(work, typeof(*chain), work);
   134	
   135		/* Try to rearm the callback */
   136		if (!dma_fence_chain_enable_signaling(&chain->base))
   137			/* Ok, we are done. No more unsignaled fences left */
   138			dma_fence_signal(&chain->base);
   139		dma_fence_put(&chain->base);
   140	}
   141	
   142	static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
   143	{
   144		struct dma_fence_chain *chain;
   145	
   146		chain = container_of(cb, typeof(*chain), cb);
   147		irq_work_queue(&chain->work);
   148		dma_fence_put(f);
   149	}
   150	
   151	static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
   152	{
   153		struct dma_fence_chain *head = to_dma_fence_chain(fence);
   154	
   155		dma_fence_get(&head->base);
   156		dma_fence_chain_for_each(fence, &head->base) {
   157			struct dma_fence_chain *chain = to_dma_fence_chain(fence);
   158			struct dma_fence *f = chain ? chain->fence : fence;
   159	
   160			dma_fence_get(f);
   161			if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
   162				dma_fence_put(fence);
   163				return true;
   164			}
   165			dma_fence_put(f);
   166		}
   167		dma_fence_put(&head->base);
   168		return false;
   169	}
   170	
   171	static bool dma_fence_chain_signaled(struct dma_fence *fence)
   172	{
   173		dma_fence_chain_for_each(fence, fence) {
   174			struct dma_fence_chain *chain = to_dma_fence_chain(fence);
   175			struct dma_fence *f = chain ? chain->fence : fence;
   176	
   177			if (!dma_fence_is_signaled(f)) {
   178				dma_fence_put(fence);
   179				return false;
   180			}
   181		}
   182	
   183		return true;
   184	}
   185	
   186	static void dma_fence_chain_release(struct dma_fence *fence)
   187	{
   188		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
   189	
 > 190		dma_fence_put(chain->prev);
   191		dma_fence_put(chain->fence);
   192		dma_fence_free(fence);
   193	}
   194	
   195	const struct dma_fence_ops dma_fence_chain_ops = {
   196		.get_driver_name = dma_fence_chain_get_driver_name,
   197		.get_timeline_name = dma_fence_chain_get_timeline_name,
   198		.enable_signaling = dma_fence_chain_enable_signaling,
   199		.signaled = dma_fence_chain_signaled,
   200		.release = dma_fence_chain_release,
   201	};
   202	EXPORT_SYMBOL(dma_fence_chain_ops);
   203	
   204	/**
   205	 * dma_fence_chain_init - initialize a fence chain
   206	 * @chain: the chain node to initialize
   207	 * @prev: the previous fence
   208	 * @fence: the current fence
   209	 *
   210	 * Initialize a new chain node and either start a new chain or add the node to
   211	 * the existing chain of the previous fence.
   212	 */
   213	void dma_fence_chain_init(struct dma_fence_chain *chain,
   214				  struct dma_fence *prev,
   215				  struct dma_fence *fence,
   216				  uint64_t seqno)
   217	{
   218		struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
   219		uint64_t context;
   220	
   221		spin_lock_init(&chain->lock);
 > 222		chain->prev = prev;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
       [not found]   ` <201903210813.K5uuPgyD%lkp-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
@ 2019-03-21  6:34     ` zhoucm1
  2019-03-21 11:30       ` Christian König
  0 siblings, 1 reply; 16+ messages in thread
From: zhoucm1 @ 2019-03-21  6:34 UTC (permalink / raw)
  To: kbuild test robot, Chunming Zhou
  Cc: Christian König, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w,
	Tobias.Hector-5C7GfCeVMHo,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jason-fQELhIk9awXprZlt/sZkLg, Christian.Koenig-5C7GfCeVMHo,
	kbuild-all-JC7UmRfGjtg

Hi Lionel and Christian,

Below is robot report for chain->prev, which was added __rcu as you 
suggested.

How to fix this line "tmp = cmpxchg(&chain->prev, prev, replacement); "?
I checked kernel header file, seems it has no cmpxchg for rcu.

Any suggestion to fix this robot report?

Thanks,
-David

On 2019年03月21日 08:24, kbuild test robot wrote:
> Hi Chunming,
>
> I love your patch! Perhaps something to improve:
>
> [auto build test WARNING on linus/master]
> [also build test WARNING on v5.1-rc1 next-20190320]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
>
> url:    https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
> reproduce:
>          # apt-get install sparse
>          make ARCH=x86_64 allmodconfig
>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
>
>
> sparse warnings: (new ones prefixed by >>)
>
>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in initializer (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef] <asn:4>*__old @@
>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct dma_fence [noderef] <asn:4>*__old
>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence *[assigned] prev
>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in initializer (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef] <asn:4>*__new @@
>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct dma_fence [noderef] <asn:4>*__new
>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence *[assigned] replacement
>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in assignment (different address spaces) @@    expected struct dma_fence *tmp @@    got struct dma_fence [noderef] <struct dma_fence *tmp @@
>     drivers/dma-buf/dma-fence-chain.c:73:21:    expected struct dma_fence *tmp
>     drivers/dma-buf/dma-fence-chain.c:73:21:    got struct dma_fence [noderef] <asn:4>*[assigned] __ret
>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in argument 1 (different address spaces) @@    expected struct dma_fence *fence @@    got struct dma_fence struct dma_fence *fence @@
>     drivers/dma-buf/dma-fence-chain.c:190:28:    expected struct dma_fence *fence
>     drivers/dma-buf/dma-fence-chain.c:190:28:    got struct dma_fence [noderef] <asn:4>*prev
>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in assignment (different address spaces) @@    expected struct dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
>     drivers/dma-buf/dma-fence-chain.c:222:21:    expected struct dma_fence [noderef] <asn:4>*prev
>     drivers/dma-buf/dma-fence-chain.c:222:21:    got struct dma_fence *prev
>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression using sizeof(void)
>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression using sizeof(void)
>
> vim +73 drivers/dma-buf/dma-fence-chain.c
>
>      38	
>      39	/**
>      40	 * dma_fence_chain_walk - chain walking function
>      41	 * @fence: current chain node
>      42	 *
>      43	 * Walk the chain to the next node. Returns the next fence or NULL if we are at
>      44	 * the end of the chain. Garbage collects chain nodes which are already
>      45	 * signaled.
>      46	 */
>      47	struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
>      48	{
>      49		struct dma_fence_chain *chain, *prev_chain;
>      50		struct dma_fence *prev, *replacement, *tmp;
>      51	
>      52		chain = to_dma_fence_chain(fence);
>      53		if (!chain) {
>      54			dma_fence_put(fence);
>      55			return NULL;
>      56		}
>      57	
>      58		while ((prev = dma_fence_chain_get_prev(chain))) {
>      59	
>      60			prev_chain = to_dma_fence_chain(prev);
>      61			if (prev_chain) {
>      62				if (!dma_fence_is_signaled(prev_chain->fence))
>      63					break;
>      64	
>      65				replacement = dma_fence_chain_get_prev(prev_chain);
>      66			} else {
>      67				if (!dma_fence_is_signaled(prev))
>      68					break;
>      69	
>      70				replacement = NULL;
>      71			}
>      72	
>    > 73			tmp = cmpxchg(&chain->prev, prev, replacement);
>      74			if (tmp == prev)
>      75				dma_fence_put(tmp);
>      76			else
>      77				dma_fence_put(replacement);
>      78			dma_fence_put(prev);
>      79		}
>      80	
>      81		dma_fence_put(fence);
>      82		return prev;
>      83	}
>      84	EXPORT_SYMBOL(dma_fence_chain_walk);
>      85	
>      86	/**
>      87	 * dma_fence_chain_find_seqno - find fence chain node by seqno
>      88	 * @pfence: pointer to the chain node where to start
>      89	 * @seqno: the sequence number to search for
>      90	 *
>      91	 * Advance the fence pointer to the chain node which will signal this sequence
>      92	 * number. If no sequence number is provided then this is a no-op.
>      93	 *
>      94	 * Returns EINVAL if the fence is not a chain node or the sequence number has
>      95	 * not yet advanced far enough.
>      96	 */
>      97	int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
>      98	{
>      99		struct dma_fence_chain *chain;
>     100	
>     101		if (!seqno)
>     102			return 0;
>     103	
>     104		chain = to_dma_fence_chain(*pfence);
>     105		if (!chain || chain->base.seqno < seqno)
>     106			return -EINVAL;
>     107	
>     108		dma_fence_chain_for_each(*pfence, &chain->base) {
>     109			if ((*pfence)->context != chain->base.context ||
>     110			    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
>     111				break;
>     112		}
>     113		dma_fence_put(&chain->base);
>     114	
>     115		return 0;
>     116	}
>     117	EXPORT_SYMBOL(dma_fence_chain_find_seqno);
>     118	
>     119	static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
>     120	{
>     121	        return "dma_fence_chain";
>     122	}
>     123	
>     124	static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
>     125	{
>     126	        return "unbound";
>     127	}
>     128	
>     129	static void dma_fence_chain_irq_work(struct irq_work *work)
>     130	{
>     131		struct dma_fence_chain *chain;
>     132	
>     133		chain = container_of(work, typeof(*chain), work);
>     134	
>     135		/* Try to rearm the callback */
>     136		if (!dma_fence_chain_enable_signaling(&chain->base))
>     137			/* Ok, we are done. No more unsignaled fences left */
>     138			dma_fence_signal(&chain->base);
>     139		dma_fence_put(&chain->base);
>     140	}
>     141	
>     142	static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
>     143	{
>     144		struct dma_fence_chain *chain;
>     145	
>     146		chain = container_of(cb, typeof(*chain), cb);
>     147		irq_work_queue(&chain->work);
>     148		dma_fence_put(f);
>     149	}
>     150	
>     151	static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
>     152	{
>     153		struct dma_fence_chain *head = to_dma_fence_chain(fence);
>     154	
>     155		dma_fence_get(&head->base);
>     156		dma_fence_chain_for_each(fence, &head->base) {
>     157			struct dma_fence_chain *chain = to_dma_fence_chain(fence);
>     158			struct dma_fence *f = chain ? chain->fence : fence;
>     159	
>     160			dma_fence_get(f);
>     161			if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
>     162				dma_fence_put(fence);
>     163				return true;
>     164			}
>     165			dma_fence_put(f);
>     166		}
>     167		dma_fence_put(&head->base);
>     168		return false;
>     169	}
>     170	
>     171	static bool dma_fence_chain_signaled(struct dma_fence *fence)
>     172	{
>     173		dma_fence_chain_for_each(fence, fence) {
>     174			struct dma_fence_chain *chain = to_dma_fence_chain(fence);
>     175			struct dma_fence *f = chain ? chain->fence : fence;
>     176	
>     177			if (!dma_fence_is_signaled(f)) {
>     178				dma_fence_put(fence);
>     179				return false;
>     180			}
>     181		}
>     182	
>     183		return true;
>     184	}
>     185	
>     186	static void dma_fence_chain_release(struct dma_fence *fence)
>     187	{
>     188		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
>     189	
>   > 190		dma_fence_put(chain->prev);
>     191		dma_fence_put(chain->fence);
>     192		dma_fence_free(fence);
>     193	}
>     194	
>     195	const struct dma_fence_ops dma_fence_chain_ops = {
>     196		.get_driver_name = dma_fence_chain_get_driver_name,
>     197		.get_timeline_name = dma_fence_chain_get_timeline_name,
>     198		.enable_signaling = dma_fence_chain_enable_signaling,
>     199		.signaled = dma_fence_chain_signaled,
>     200		.release = dma_fence_chain_release,
>     201	};
>     202	EXPORT_SYMBOL(dma_fence_chain_ops);
>     203	
>     204	/**
>     205	 * dma_fence_chain_init - initialize a fence chain
>     206	 * @chain: the chain node to initialize
>     207	 * @prev: the previous fence
>     208	 * @fence: the current fence
>     209	 *
>     210	 * Initialize a new chain node and either start a new chain or add the node to
>     211	 * the existing chain of the previous fence.
>     212	 */
>     213	void dma_fence_chain_init(struct dma_fence_chain *chain,
>     214				  struct dma_fence *prev,
>     215				  struct dma_fence *fence,
>     216				  uint64_t seqno)
>     217	{
>     218		struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
>     219		uint64_t context;
>     220	
>     221		spin_lock_init(&chain->lock);
>   > 222		chain->prev = prev;
>
> ---
> 0-DAY kernel test infrastructure                Open Source Technology Center
> https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
  2019-03-21  6:34     ` zhoucm1
@ 2019-03-21 11:30       ` Christian König
  2019-03-21 14:13         ` Zhou, David(ChunMing)
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2019-03-21 11:30 UTC (permalink / raw)
  To: zhoucm1, kbuild test robot, Chunming Zhou
  Cc: dri-devel, Tobias.Hector, amd-gfx, jason, Christian.Koenig, kbuild-all

Hi David,

For the cmpxchg() case I of hand don't know either. Looks like so far 
nobody has used cmpxchg() with rcu protected structures.

The other cases should be replaced by RCU_INIT_POINTER() or 
rcu_dereference_protected(.., true);

Regards,
Christian.

Am 21.03.19 um 07:34 schrieb zhoucm1:
> Hi Lionel and Christian,
>
> Below is robot report for chain->prev, which was added __rcu as you 
> suggested.
>
> How to fix this line "tmp = cmpxchg(&chain->prev, prev, replacement); "?
> I checked kernel header file, seems it has no cmpxchg for rcu.
>
> Any suggestion to fix this robot report?
>
> Thanks,
> -David
>
> On 2019年03月21日 08:24, kbuild test robot wrote:
>> Hi Chunming,
>>
>> I love your patch! Perhaps something to improve:
>>
>> [auto build test WARNING on linus/master]
>> [also build test WARNING on v5.1-rc1 next-20190320]
>> [if your patch is applied to the wrong git tree, please drop us a 
>> note to help improve the system]
>>
>> url: 
>> https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
>> reproduce:
>>          # apt-get install sparse
>>          make ARCH=x86_64 allmodconfig
>>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
>>
>>
>> sparse warnings: (new ones prefixed by >>)
>>
>>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in 
>>>> initializer (different address spaces) @@    expected struct 
>>>> dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef] 
>>>> <asn:4>*__old @@
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct 
>> dma_fence [noderef] <asn:4>*__old
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence 
>> *[assigned] prev
>>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in 
>>>> initializer (different address spaces) @@    expected struct 
>>>> dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef] 
>>>> <asn:4>*__new @@
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct 
>> dma_fence [noderef] <asn:4>*__new
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence 
>> *[assigned] replacement
>>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in 
>>>> assignment (different address spaces) @@    expected struct 
>>>> dma_fence *tmp @@    got struct dma_fence [noderef] <struct 
>>>> dma_fence *tmp @@
>>     drivers/dma-buf/dma-fence-chain.c:73:21:    expected struct 
>> dma_fence *tmp
>>     drivers/dma-buf/dma-fence-chain.c:73:21:    got struct dma_fence 
>> [noderef] <asn:4>*[assigned] __ret
>>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in 
>>>> argument 1 (different address spaces) @@    expected struct 
>>>> dma_fence *fence @@    got struct dma_fence struct dma_fence *fence @@
>>     drivers/dma-buf/dma-fence-chain.c:190:28:    expected struct 
>> dma_fence *fence
>>     drivers/dma-buf/dma-fence-chain.c:190:28:    got struct dma_fence 
>> [noderef] <asn:4>*prev
>>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in 
>>>> assignment (different address spaces) @@    expected struct 
>>>> dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
>>     drivers/dma-buf/dma-fence-chain.c:222:21:    expected struct 
>> dma_fence [noderef] <asn:4>*prev
>>     drivers/dma-buf/dma-fence-chain.c:222:21:    got struct dma_fence 
>> *prev
>>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression 
>> using sizeof(void)
>>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression 
>> using sizeof(void)
>>
>> vim +73 drivers/dma-buf/dma-fence-chain.c
>>
>>      38
>>      39    /**
>>      40     * dma_fence_chain_walk - chain walking function
>>      41     * @fence: current chain node
>>      42     *
>>      43     * Walk the chain to the next node. Returns the next fence 
>> or NULL if we are at
>>      44     * the end of the chain. Garbage collects chain nodes 
>> which are already
>>      45     * signaled.
>>      46     */
>>      47    struct dma_fence *dma_fence_chain_walk(struct dma_fence 
>> *fence)
>>      48    {
>>      49        struct dma_fence_chain *chain, *prev_chain;
>>      50        struct dma_fence *prev, *replacement, *tmp;
>>      51
>>      52        chain = to_dma_fence_chain(fence);
>>      53        if (!chain) {
>>      54            dma_fence_put(fence);
>>      55            return NULL;
>>      56        }
>>      57
>>      58        while ((prev = dma_fence_chain_get_prev(chain))) {
>>      59
>>      60            prev_chain = to_dma_fence_chain(prev);
>>      61            if (prev_chain) {
>>      62                if (!dma_fence_is_signaled(prev_chain->fence))
>>      63                    break;
>>      64
>>      65                replacement = 
>> dma_fence_chain_get_prev(prev_chain);
>>      66            } else {
>>      67                if (!dma_fence_is_signaled(prev))
>>      68                    break;
>>      69
>>      70                replacement = NULL;
>>      71            }
>>      72
>>    > 73            tmp = cmpxchg(&chain->prev, prev, replacement);
>>      74            if (tmp == prev)
>>      75                dma_fence_put(tmp);
>>      76            else
>>      77                dma_fence_put(replacement);
>>      78            dma_fence_put(prev);
>>      79        }
>>      80
>>      81        dma_fence_put(fence);
>>      82        return prev;
>>      83    }
>>      84    EXPORT_SYMBOL(dma_fence_chain_walk);
>>      85
>>      86    /**
>>      87     * dma_fence_chain_find_seqno - find fence chain node by 
>> seqno
>>      88     * @pfence: pointer to the chain node where to start
>>      89     * @seqno: the sequence number to search for
>>      90     *
>>      91     * Advance the fence pointer to the chain node which will 
>> signal this sequence
>>      92     * number. If no sequence number is provided then this is 
>> a no-op.
>>      93     *
>>      94     * Returns EINVAL if the fence is not a chain node or the 
>> sequence number has
>>      95     * not yet advanced far enough.
>>      96     */
>>      97    int dma_fence_chain_find_seqno(struct dma_fence **pfence, 
>> uint64_t seqno)
>>      98    {
>>      99        struct dma_fence_chain *chain;
>>     100
>>     101        if (!seqno)
>>     102            return 0;
>>     103
>>     104        chain = to_dma_fence_chain(*pfence);
>>     105        if (!chain || chain->base.seqno < seqno)
>>     106            return -EINVAL;
>>     107
>>     108        dma_fence_chain_for_each(*pfence, &chain->base) {
>>     109            if ((*pfence)->context != chain->base.context ||
>>     110 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
>>     111                break;
>>     112        }
>>     113        dma_fence_put(&chain->base);
>>     114
>>     115        return 0;
>>     116    }
>>     117    EXPORT_SYMBOL(dma_fence_chain_find_seqno);
>>     118
>>     119    static const char *dma_fence_chain_get_driver_name(struct 
>> dma_fence *fence)
>>     120    {
>>     121            return "dma_fence_chain";
>>     122    }
>>     123
>>     124    static const char 
>> *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
>>     125    {
>>     126            return "unbound";
>>     127    }
>>     128
>>     129    static void dma_fence_chain_irq_work(struct irq_work *work)
>>     130    {
>>     131        struct dma_fence_chain *chain;
>>     132
>>     133        chain = container_of(work, typeof(*chain), work);
>>     134
>>     135        /* Try to rearm the callback */
>>     136        if (!dma_fence_chain_enable_signaling(&chain->base))
>>     137            /* Ok, we are done. No more unsignaled fences left */
>>     138            dma_fence_signal(&chain->base);
>>     139        dma_fence_put(&chain->base);
>>     140    }
>>     141
>>     142    static void dma_fence_chain_cb(struct dma_fence *f, struct 
>> dma_fence_cb *cb)
>>     143    {
>>     144        struct dma_fence_chain *chain;
>>     145
>>     146        chain = container_of(cb, typeof(*chain), cb);
>>     147        irq_work_queue(&chain->work);
>>     148        dma_fence_put(f);
>>     149    }
>>     150
>>     151    static bool dma_fence_chain_enable_signaling(struct 
>> dma_fence *fence)
>>     152    {
>>     153        struct dma_fence_chain *head = to_dma_fence_chain(fence);
>>     154
>>     155        dma_fence_get(&head->base);
>>     156        dma_fence_chain_for_each(fence, &head->base) {
>>     157            struct dma_fence_chain *chain = 
>> to_dma_fence_chain(fence);
>>     158            struct dma_fence *f = chain ? chain->fence : fence;
>>     159
>>     160            dma_fence_get(f);
>>     161            if (!dma_fence_add_callback(f, &head->cb, 
>> dma_fence_chain_cb)) {
>>     162                dma_fence_put(fence);
>>     163                return true;
>>     164            }
>>     165            dma_fence_put(f);
>>     166        }
>>     167        dma_fence_put(&head->base);
>>     168        return false;
>>     169    }
>>     170
>>     171    static bool dma_fence_chain_signaled(struct dma_fence *fence)
>>     172    {
>>     173        dma_fence_chain_for_each(fence, fence) {
>>     174            struct dma_fence_chain *chain = 
>> to_dma_fence_chain(fence);
>>     175            struct dma_fence *f = chain ? chain->fence : fence;
>>     176
>>     177            if (!dma_fence_is_signaled(f)) {
>>     178                dma_fence_put(fence);
>>     179                return false;
>>     180            }
>>     181        }
>>     182
>>     183        return true;
>>     184    }
>>     185
>>     186    static void dma_fence_chain_release(struct dma_fence *fence)
>>     187    {
>>     188        struct dma_fence_chain *chain = 
>> to_dma_fence_chain(fence);
>>     189
>>   > 190        dma_fence_put(chain->prev);
>>     191        dma_fence_put(chain->fence);
>>     192        dma_fence_free(fence);
>>     193    }
>>     194
>>     195    const struct dma_fence_ops dma_fence_chain_ops = {
>>     196        .get_driver_name = dma_fence_chain_get_driver_name,
>>     197        .get_timeline_name = dma_fence_chain_get_timeline_name,
>>     198        .enable_signaling = dma_fence_chain_enable_signaling,
>>     199        .signaled = dma_fence_chain_signaled,
>>     200        .release = dma_fence_chain_release,
>>     201    };
>>     202    EXPORT_SYMBOL(dma_fence_chain_ops);
>>     203
>>     204    /**
>>     205     * dma_fence_chain_init - initialize a fence chain
>>     206     * @chain: the chain node to initialize
>>     207     * @prev: the previous fence
>>     208     * @fence: the current fence
>>     209     *
>>     210     * Initialize a new chain node and either start a new 
>> chain or add the node to
>>     211     * the existing chain of the previous fence.
>>     212     */
>>     213    void dma_fence_chain_init(struct dma_fence_chain *chain,
>>     214                  struct dma_fence *prev,
>>     215                  struct dma_fence *fence,
>>     216                  uint64_t seqno)
>>     217    {
>>     218        struct dma_fence_chain *prev_chain = 
>> to_dma_fence_chain(prev);
>>     219        uint64_t context;
>>     220
>>     221        spin_lock_init(&chain->lock);
>>   > 222        chain->prev = prev;
>>
>> ---
>> 0-DAY kernel test infrastructure                Open Source 
>> Technology Center
>> https://lists.01.org/pipermail/kbuild-all Intel Corporation
>

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re:[PATCH 1/9] dma-buf: add new dma_fence_chain container v6
  2019-03-21 11:30       ` Christian König
@ 2019-03-21 14:13         ` Zhou, David(ChunMing)
       [not found]           ` <2q1nzdv6akhy5260mi-2nbzia-ttsfc8-dz76ft-4ifnnm-oz4kfp-uezs6vwoq2oq-wd0h34-v7m6m7-cd3muc-v4wzqd-yktmtp-2338r6riorqe-ez3yxk-8jr766yyjh0ob0v5e8-33h712k31w5b1ntkih.1553177522341-2ueSQiBKiTY7tOexoI0I+QC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhou, David(ChunMing) @ 2019-03-21 14:13 UTC (permalink / raw)
  To: Koenig, Christian, Zhou, David(ChunMing), kbuild test robot
  Cc: dri-devel, Hector, Tobias, amd-gfx, jason, kbuild-all


[-- Attachment #1.1: Type: text/plain, Size: 12243 bytes --]

cmpxchg be replaced by some simple c sentance?
otherwise we have to remove __rcu of chian->prev.

-David

-------- Original Message --------
Subject: Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
From: Christian König
To: "Zhou, David(ChunMing)" ,kbuild test robot ,"Zhou, David(ChunMing)"
CC: kbuild-all@01.org,dri-devel@lists.freedesktop.org,amd-gfx@lists.freedesktop.org,lionel.g.landwerlin@intel.com,jason@jlekstrand.net,"Koenig, Christian" ,"Hector, Tobias"

Hi David,

For the cmpxchg() case I of hand don't know either. Looks like so far
nobody has used cmpxchg() with rcu protected structures.

The other cases should be replaced by RCU_INIT_POINTER() or
rcu_dereference_protected(.., true);

Regards,
Christian.

Am 21.03.19 um 07:34 schrieb zhoucm1:
> Hi Lionel and Christian,
>
> Below is robot report for chain->prev, which was added __rcu as you
> suggested.
>
> How to fix this line "tmp = cmpxchg(&chain->prev, prev, replacement); "?
> I checked kernel header file, seems it has no cmpxchg for rcu.
>
> Any suggestion to fix this robot report?
>
> Thanks,
> -David
>
> On 2019年03月21日 08:24, kbuild test robot wrote:
>> Hi Chunming,
>>
>> I love your patch! Perhaps something to improve:
>>
>> [auto build test WARNING on linus/master]
>> [also build test WARNING on v5.1-rc1 next-20190320]
>> [if your patch is applied to the wrong git tree, please drop us a
>> note to help improve the system]
>>
>> url:
>> https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
>> reproduce:
>>          # apt-get install sparse
>>          make ARCH=x86_64 allmodconfig
>>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
>>
>>
>> sparse warnings: (new ones prefixed by >>)
>>
>>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>>>> initializer (different address spaces) @@    expected struct
>>>> dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef]
>>>> <asn:4>*__old @@
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct
>> dma_fence [noderef] <asn:4>*__old
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence
>> *[assigned] prev
>>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>>>> initializer (different address spaces) @@    expected struct
>>>> dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef]
>>>> <asn:4>*__new @@
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    expected struct
>> dma_fence [noderef] <asn:4>*__new
>>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence
>> *[assigned] replacement
>>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in
>>>> assignment (different address spaces) @@    expected struct
>>>> dma_fence *tmp @@    got struct dma_fence [noderef] <struct
>>>> dma_fence *tmp @@
>>     drivers/dma-buf/dma-fence-chain.c:73:21:    expected struct
>> dma_fence *tmp
>>     drivers/dma-buf/dma-fence-chain.c:73:21:    got struct dma_fence
>> [noderef] <asn:4>*[assigned] __ret
>>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in
>>>> argument 1 (different address spaces) @@    expected struct
>>>> dma_fence *fence @@    got struct dma_fence struct dma_fence *fence @@
>>     drivers/dma-buf/dma-fence-chain.c:190:28:    expected struct
>> dma_fence *fence
>>     drivers/dma-buf/dma-fence-chain.c:190:28:    got struct dma_fence
>> [noderef] <asn:4>*prev
>>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in
>>>> assignment (different address spaces) @@    expected struct
>>>> dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
>>     drivers/dma-buf/dma-fence-chain.c:222:21:    expected struct
>> dma_fence [noderef] <asn:4>*prev
>>     drivers/dma-buf/dma-fence-chain.c:222:21:    got struct dma_fence
>> *prev
>>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>> using sizeof(void)
>>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>> using sizeof(void)
>>
>> vim +73 drivers/dma-buf/dma-fence-chain.c
>>
>>      38
>>      39    /**
>>      40     * dma_fence_chain_walk - chain walking function
>>      41     * @fence: current chain node
>>      42     *
>>      43     * Walk the chain to the next node. Returns the next fence
>> or NULL if we are at
>>      44     * the end of the chain. Garbage collects chain nodes
>> which are already
>>      45     * signaled.
>>      46     */
>>      47    struct dma_fence *dma_fence_chain_walk(struct dma_fence
>> *fence)
>>      48    {
>>      49        struct dma_fence_chain *chain, *prev_chain;
>>      50        struct dma_fence *prev, *replacement, *tmp;
>>      51
>>      52        chain = to_dma_fence_chain(fence);
>>      53        if (!chain) {
>>      54            dma_fence_put(fence);
>>      55            return NULL;
>>      56        }
>>      57
>>      58        while ((prev = dma_fence_chain_get_prev(chain))) {
>>      59
>>      60            prev_chain = to_dma_fence_chain(prev);
>>      61            if (prev_chain) {
>>      62                if (!dma_fence_is_signaled(prev_chain->fence))
>>      63                    break;
>>      64
>>      65                replacement =
>> dma_fence_chain_get_prev(prev_chain);
>>      66            } else {
>>      67                if (!dma_fence_is_signaled(prev))
>>      68                    break;
>>      69
>>      70                replacement = NULL;
>>      71            }
>>      72
>>    > 73            tmp = cmpxchg(&chain->prev, prev, replacement);
>>      74            if (tmp == prev)
>>      75                dma_fence_put(tmp);
>>      76            else
>>      77                dma_fence_put(replacement);
>>      78            dma_fence_put(prev);
>>      79        }
>>      80
>>      81        dma_fence_put(fence);
>>      82        return prev;
>>      83    }
>>      84    EXPORT_SYMBOL(dma_fence_chain_walk);
>>      85
>>      86    /**
>>      87     * dma_fence_chain_find_seqno - find fence chain node by
>> seqno
>>      88     * @pfence: pointer to the chain node where to start
>>      89     * @seqno: the sequence number to search for
>>      90     *
>>      91     * Advance the fence pointer to the chain node which will
>> signal this sequence
>>      92     * number. If no sequence number is provided then this is
>> a no-op.
>>      93     *
>>      94     * Returns EINVAL if the fence is not a chain node or the
>> sequence number has
>>      95     * not yet advanced far enough.
>>      96     */
>>      97    int dma_fence_chain_find_seqno(struct dma_fence **pfence,
>> uint64_t seqno)
>>      98    {
>>      99        struct dma_fence_chain *chain;
>>     100
>>     101        if (!seqno)
>>     102            return 0;
>>     103
>>     104        chain = to_dma_fence_chain(*pfence);
>>     105        if (!chain || chain->base.seqno < seqno)
>>     106            return -EINVAL;
>>     107
>>     108        dma_fence_chain_for_each(*pfence, &chain->base) {
>>     109            if ((*pfence)->context != chain->base.context ||
>>     110 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
>>     111                break;
>>     112        }
>>     113        dma_fence_put(&chain->base);
>>     114
>>     115        return 0;
>>     116    }
>>     117    EXPORT_SYMBOL(dma_fence_chain_find_seqno);
>>     118
>>     119    static const char *dma_fence_chain_get_driver_name(struct
>> dma_fence *fence)
>>     120    {
>>     121            return "dma_fence_chain";
>>     122    }
>>     123
>>     124    static const char
>> *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
>>     125    {
>>     126            return "unbound";
>>     127    }
>>     128
>>     129    static void dma_fence_chain_irq_work(struct irq_work *work)
>>     130    {
>>     131        struct dma_fence_chain *chain;
>>     132
>>     133        chain = container_of(work, typeof(*chain), work);
>>     134
>>     135        /* Try to rearm the callback */
>>     136        if (!dma_fence_chain_enable_signaling(&chain->base))
>>     137            /* Ok, we are done. No more unsignaled fences left */
>>     138            dma_fence_signal(&chain->base);
>>     139        dma_fence_put(&chain->base);
>>     140    }
>>     141
>>     142    static void dma_fence_chain_cb(struct dma_fence *f, struct
>> dma_fence_cb *cb)
>>     143    {
>>     144        struct dma_fence_chain *chain;
>>     145
>>     146        chain = container_of(cb, typeof(*chain), cb);
>>     147        irq_work_queue(&chain->work);
>>     148        dma_fence_put(f);
>>     149    }
>>     150
>>     151    static bool dma_fence_chain_enable_signaling(struct
>> dma_fence *fence)
>>     152    {
>>     153        struct dma_fence_chain *head = to_dma_fence_chain(fence);
>>     154
>>     155        dma_fence_get(&head->base);
>>     156        dma_fence_chain_for_each(fence, &head->base) {
>>     157            struct dma_fence_chain *chain =
>> to_dma_fence_chain(fence);
>>     158            struct dma_fence *f = chain ? chain->fence : fence;
>>     159
>>     160            dma_fence_get(f);
>>     161            if (!dma_fence_add_callback(f, &head->cb,
>> dma_fence_chain_cb)) {
>>     162                dma_fence_put(fence);
>>     163                return true;
>>     164            }
>>     165            dma_fence_put(f);
>>     166        }
>>     167        dma_fence_put(&head->base);
>>     168        return false;
>>     169    }
>>     170
>>     171    static bool dma_fence_chain_signaled(struct dma_fence *fence)
>>     172    {
>>     173        dma_fence_chain_for_each(fence, fence) {
>>     174            struct dma_fence_chain *chain =
>> to_dma_fence_chain(fence);
>>     175            struct dma_fence *f = chain ? chain->fence : fence;
>>     176
>>     177            if (!dma_fence_is_signaled(f)) {
>>     178                dma_fence_put(fence);
>>     179                return false;
>>     180            }
>>     181        }
>>     182
>>     183        return true;
>>     184    }
>>     185
>>     186    static void dma_fence_chain_release(struct dma_fence *fence)
>>     187    {
>>     188        struct dma_fence_chain *chain =
>> to_dma_fence_chain(fence);
>>     189
>>   > 190        dma_fence_put(chain->prev);
>>     191        dma_fence_put(chain->fence);
>>     192        dma_fence_free(fence);
>>     193    }
>>     194
>>     195    const struct dma_fence_ops dma_fence_chain_ops = {
>>     196        .get_driver_name = dma_fence_chain_get_driver_name,
>>     197        .get_timeline_name = dma_fence_chain_get_timeline_name,
>>     198        .enable_signaling = dma_fence_chain_enable_signaling,
>>     199        .signaled = dma_fence_chain_signaled,
>>     200        .release = dma_fence_chain_release,
>>     201    };
>>     202    EXPORT_SYMBOL(dma_fence_chain_ops);
>>     203
>>     204    /**
>>     205     * dma_fence_chain_init - initialize a fence chain
>>     206     * @chain: the chain node to initialize
>>     207     * @prev: the previous fence
>>     208     * @fence: the current fence
>>     209     *
>>     210     * Initialize a new chain node and either start a new
>> chain or add the node to
>>     211     * the existing chain of the previous fence.
>>     212     */
>>     213    void dma_fence_chain_init(struct dma_fence_chain *chain,
>>     214                  struct dma_fence *prev,
>>     215                  struct dma_fence *fence,
>>     216                  uint64_t seqno)
>>     217    {
>>     218        struct dma_fence_chain *prev_chain =
>> to_dma_fence_chain(prev);
>>     219        uint64_t context;
>>     220
>>     221        spin_lock_init(&chain->lock);
>>   > 222        chain->prev = prev;
>>
>> ---
>> 0-DAY kernel test infrastructure                Open Source
>> Technology Center
>> https://lists.01.org/pipermail/kbuild-all Intel Corporation
>


[-- Attachment #1.2: Type: text/html, Size: 25471 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
       [not found]           ` <2q1nzdv6akhy5260mi-2nbzia-ttsfc8-dz76ft-4ifnnm-oz4kfp-uezs6vwoq2oq-wd0h34-v7m6m7-cd3muc-v4wzqd-yktmtp-2338r6riorqe-ez3yxk-8jr766yyjh0ob0v5e8-33h712k31w5b1ntkih.1553177522341-2ueSQiBKiTY7tOexoI0I+QC/G2K4zDHf@public.gmane.org>
@ 2019-03-21 14:40             ` Christian König
       [not found]               ` <6bd1f7e6-965b-3bd5-e4e0-f3b04e0638fd-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2019-03-21 14:40 UTC (permalink / raw)
  To: Zhou, David(ChunMing), Koenig, Christian, kbuild test robot
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Hector, Tobias,
	kbuild-all-JC7UmRfGjtg, jason-fQELhIk9awXprZlt/sZkLg,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w


[-- Attachment #1.1: Type: text/plain, Size: 14885 bytes --]

No, atomic cmpxchg is a hardware operation. If you want to replace that 
you need a lock again.

Maybe just add a comment and use an explicit cast to void* ? Not sure if 
that silences the warning.

Christian.

Am 21.03.19 um 15:13 schrieb Zhou, David(ChunMing):
> cmpxchg be replaced by some simple c sentance?
> otherwise we have to remove __rcu of chian->prev.
>
> -David
>
> -------- Original Message --------
> Subject: Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
> From: Christian König
> To: "Zhou, David(ChunMing)" ,kbuild test robot ,"Zhou, David(ChunMing)"
> CC: 
> kbuild-all-JC7UmRfGjtg@public.gmane.org,dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org,jason-fQELhIk9awXprZlt/sZkLg@public.gmane.org,"Koenig, 
> Christian" ,"Hector, Tobias"
>
> Hi David,
>
> For the cmpxchg() case I of hand don't know either. Looks like so far
> nobody has used cmpxchg() with rcu protected structures.
>
> The other cases should be replaced by RCU_INIT_POINTER() or
> rcu_dereference_protected(.., true);
>
> Regards,
> Christian.
>
> Am 21.03.19 um 07:34 schrieb zhoucm1:
> > Hi Lionel and Christian,
> >
> > Below is robot report for chain->prev, which was added __rcu as you
> > suggested.
> >
> > How to fix this line "tmp = cmpxchg(&chain->prev, prev, replacement); "?
> > I checked kernel header file, seems it has no cmpxchg for rcu.
> >
> > Any suggestion to fix this robot report?
> >
> > Thanks,
> > -David
> >
> > On 2019年03月21日 08:24, kbuild test robot wrote:
> >> Hi Chunming,
> >>
> >> I love your patch! Perhaps something to improve:
> >>
> >> [auto build test WARNING on linus/master]
> >> [also build test WARNING on v5.1-rc1 next-20190320]
> >> [if your patch is applied to the wrong git tree, please drop us a
> >> note to help improve the system]
> >>
> >> url:
> >> 
> https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
> >> reproduce:
> >>          # apt-get install sparse
> >>          make ARCH=x86_64 allmodconfig
> >>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
> >>
> >>
> >> sparse warnings: (new ones prefixed by >>)
> >>
> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
> >>>> initializer (different address spaces) @@    expected struct
> >>>> dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef]
> >>>> <asn:4>*__old @@
> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
> >> dma_fence [noderef] <asn:4>*__old
> >>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence
> >> *[assigned] prev
> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
> >>>> initializer (different address spaces) @@    expected struct
> >>>> dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef]
> >>>> <asn:4>*__new @@
> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
> >> dma_fence [noderef] <asn:4>*__new
> >>     drivers/dma-buf/dma-fence-chain.c:73:23:    got struct dma_fence
> >> *[assigned] replacement
> >>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in
> >>>> assignment (different address spaces) @@ expected struct
> >>>> dma_fence *tmp @@    got struct dma_fence [noderef] <struct
> >>>> dma_fence *tmp @@
> >>     drivers/dma-buf/dma-fence-chain.c:73:21: expected struct
> >> dma_fence *tmp
> >>     drivers/dma-buf/dma-fence-chain.c:73:21:    got struct dma_fence
> >> [noderef] <asn:4>*[assigned] __ret
> >>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in
> >>>> argument 1 (different address spaces) @@ expected struct
> >>>> dma_fence *fence @@    got struct dma_fence struct dma_fence 
> *fence @@
> >>     drivers/dma-buf/dma-fence-chain.c:190:28: expected struct
> >> dma_fence *fence
> >>     drivers/dma-buf/dma-fence-chain.c:190:28: got struct dma_fence
> >> [noderef] <asn:4>*prev
> >>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in
> >>>> assignment (different address spaces) @@ expected struct
> >>>> dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
> >>     drivers/dma-buf/dma-fence-chain.c:222:21: expected struct
> >> dma_fence [noderef] <asn:4>*prev
> >>     drivers/dma-buf/dma-fence-chain.c:222:21: got struct dma_fence
> >> *prev
> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
> >> using sizeof(void)
> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
> >> using sizeof(void)
> >>
> >> vim +73 drivers/dma-buf/dma-fence-chain.c
> >>
> >>      38
> >>      39    /**
> >>      40     * dma_fence_chain_walk - chain walking function
> >>      41     * @fence: current chain node
> >>      42     *
> >>      43     * Walk the chain to the next node. Returns the next fence
> >> or NULL if we are at
> >>      44     * the end of the chain. Garbage collects chain nodes
> >> which are already
> >>      45     * signaled.
> >>      46     */
> >>      47    struct dma_fence *dma_fence_chain_walk(struct dma_fence
> >> *fence)
> >>      48    {
> >>      49        struct dma_fence_chain *chain, *prev_chain;
> >>      50        struct dma_fence *prev, *replacement, *tmp;
> >>      51
> >>      52        chain = to_dma_fence_chain(fence);
> >>      53        if (!chain) {
> >>      54            dma_fence_put(fence);
> >>      55            return NULL;
> >>      56        }
> >>      57
> >>      58        while ((prev = dma_fence_chain_get_prev(chain))) {
> >>      59
> >>      60            prev_chain = to_dma_fence_chain(prev);
> >>      61            if (prev_chain) {
> >>      62                if (!dma_fence_is_signaled(prev_chain->fence))
> >>      63                    break;
> >>      64
> >>      65                replacement =
> >> dma_fence_chain_get_prev(prev_chain);
> >>      66            } else {
> >>      67                if (!dma_fence_is_signaled(prev))
> >>      68                    break;
> >>      69
> >>      70                replacement = NULL;
> >>      71            }
> >>      72
> >>    > 73            tmp = cmpxchg(&chain->prev, prev, replacement);
> >>      74            if (tmp == prev)
> >>      75                dma_fence_put(tmp);
> >>      76            else
> >>      77                dma_fence_put(replacement);
> >>      78            dma_fence_put(prev);
> >>      79        }
> >>      80
> >>      81        dma_fence_put(fence);
> >>      82        return prev;
> >>      83    }
> >>      84    EXPORT_SYMBOL(dma_fence_chain_walk);
> >>      85
> >>      86    /**
> >>      87     * dma_fence_chain_find_seqno - find fence chain node by
> >> seqno
> >>      88     * @pfence: pointer to the chain node where to start
> >>      89     * @seqno: the sequence number to search for
> >>      90     *
> >>      91     * Advance the fence pointer to the chain node which will
> >> signal this sequence
> >>      92     * number. If no sequence number is provided then this is
> >> a no-op.
> >>      93     *
> >>      94     * Returns EINVAL if the fence is not a chain node or the
> >> sequence number has
> >>      95     * not yet advanced far enough.
> >>      96     */
> >>      97    int dma_fence_chain_find_seqno(struct dma_fence **pfence,
> >> uint64_t seqno)
> >>      98    {
> >>      99        struct dma_fence_chain *chain;
> >>     100
> >>     101        if (!seqno)
> >>     102            return 0;
> >>     103
> >>     104        chain = to_dma_fence_chain(*pfence);
> >>     105        if (!chain || chain->base.seqno < seqno)
> >>     106            return -EINVAL;
> >>     107
> >>     108        dma_fence_chain_for_each(*pfence, &chain->base) {
> >>     109            if ((*pfence)->context != chain->base.context ||
> >>     110 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
> >>     111                break;
> >>     112        }
> >>     113        dma_fence_put(&chain->base);
> >>     114
> >>     115        return 0;
> >>     116    }
> >>     117 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
> >>     118
> >>     119    static const char *dma_fence_chain_get_driver_name(struct
> >> dma_fence *fence)
> >>     120    {
> >>     121            return "dma_fence_chain";
> >>     122    }
> >>     123
> >>     124    static const char
> >> *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
> >>     125    {
> >>     126            return "unbound";
> >>     127    }
> >>     128
> >>     129    static void dma_fence_chain_irq_work(struct irq_work *work)
> >>     130    {
> >>     131        struct dma_fence_chain *chain;
> >>     132
> >>     133        chain = container_of(work, typeof(*chain), work);
> >>     134
> >>     135        /* Try to rearm the callback */
> >>     136        if (!dma_fence_chain_enable_signaling(&chain->base))
> >>     137            /* Ok, we are done. No more unsignaled fences 
> left */
> >>     138 dma_fence_signal(&chain->base);
> >>     139        dma_fence_put(&chain->base);
> >>     140    }
> >>     141
> >>     142    static void dma_fence_chain_cb(struct dma_fence *f, struct
> >> dma_fence_cb *cb)
> >>     143    {
> >>     144        struct dma_fence_chain *chain;
> >>     145
> >>     146        chain = container_of(cb, typeof(*chain), cb);
> >>     147        irq_work_queue(&chain->work);
> >>     148        dma_fence_put(f);
> >>     149    }
> >>     150
> >>     151    static bool dma_fence_chain_enable_signaling(struct
> >> dma_fence *fence)
> >>     152    {
> >>     153        struct dma_fence_chain *head = 
> to_dma_fence_chain(fence);
> >>     154
> >>     155        dma_fence_get(&head->base);
> >>     156        dma_fence_chain_for_each(fence, &head->base) {
> >>     157            struct dma_fence_chain *chain =
> >> to_dma_fence_chain(fence);
> >>     158            struct dma_fence *f = chain ? chain->fence : fence;
> >>     159
> >>     160            dma_fence_get(f);
> >>     161            if (!dma_fence_add_callback(f, &head->cb,
> >> dma_fence_chain_cb)) {
> >>     162                dma_fence_put(fence);
> >>     163                return true;
> >>     164            }
> >>     165            dma_fence_put(f);
> >>     166        }
> >>     167        dma_fence_put(&head->base);
> >>     168        return false;
> >>     169    }
> >>     170
> >>     171    static bool dma_fence_chain_signaled(struct dma_fence 
> *fence)
> >>     172    {
> >>     173        dma_fence_chain_for_each(fence, fence) {
> >>     174            struct dma_fence_chain *chain =
> >> to_dma_fence_chain(fence);
> >>     175            struct dma_fence *f = chain ? chain->fence : fence;
> >>     176
> >>     177            if (!dma_fence_is_signaled(f)) {
> >>     178                dma_fence_put(fence);
> >>     179                return false;
> >>     180            }
> >>     181        }
> >>     182
> >>     183        return true;
> >>     184    }
> >>     185
> >>     186    static void dma_fence_chain_release(struct dma_fence *fence)
> >>     187    {
> >>     188        struct dma_fence_chain *chain =
> >> to_dma_fence_chain(fence);
> >>     189
> >>   > 190        dma_fence_put(chain->prev);
> >>     191        dma_fence_put(chain->fence);
> >>     192        dma_fence_free(fence);
> >>     193    }
> >>     194
> >>     195    const struct dma_fence_ops dma_fence_chain_ops = {
> >>     196        .get_driver_name = dma_fence_chain_get_driver_name,
> >>     197        .get_timeline_name = dma_fence_chain_get_timeline_name,
> >>     198        .enable_signaling = dma_fence_chain_enable_signaling,
> >>     199        .signaled = dma_fence_chain_signaled,
> >>     200        .release = dma_fence_chain_release,
> >>     201    };
> >>     202    EXPORT_SYMBOL(dma_fence_chain_ops);
> >>     203
> >>     204    /**
> >>     205     * dma_fence_chain_init - initialize a fence chain
> >>     206     * @chain: the chain node to initialize
> >>     207     * @prev: the previous fence
> >>     208     * @fence: the current fence
> >>     209     *
> >>     210     * Initialize a new chain node and either start a new
> >> chain or add the node to
> >>     211     * the existing chain of the previous fence.
> >>     212     */
> >>     213    void dma_fence_chain_init(struct dma_fence_chain *chain,
> >>     214                  struct dma_fence *prev,
> >>     215                  struct dma_fence *fence,
> >>     216                  uint64_t seqno)
> >>     217    {
> >>     218        struct dma_fence_chain *prev_chain =
> >> to_dma_fence_chain(prev);
> >>     219        uint64_t context;
> >>     220
> >>     221        spin_lock_init(&chain->lock);
> >>   > 222        chain->prev = prev;
> >>
> >> ---
> >> 0-DAY kernel test infrastructure Open Source
> >> Technology Center
> >> https://lists.01.org/pipermail/kbuild-all Intel Corporation
> >
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 24369 bytes --]

[-- Attachment #2: Type: text/plain, Size: 153 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
       [not found]               ` <6bd1f7e6-965b-3bd5-e4e0-f3b04e0638fd-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-03-22  7:34                 ` zhoucm1
       [not found]                   ` <5cb29811-a383-f473-5443-25e9d968c516-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: zhoucm1 @ 2019-03-22  7:34 UTC (permalink / raw)
  To: christian.koenig-5C7GfCeVMHo, Zhou, David(ChunMing), kbuild test robot
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Hector, Tobias,
	kbuild-all-JC7UmRfGjtg, jason-fQELhIk9awXprZlt/sZkLg,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w


[-- Attachment #1.1: Type: text/plain, Size: 15268 bytes --]

how about the attached?

If ok, I will merge to pathc#1.


-David


On 2019年03月21日 22:40, Christian König wrote:
> No, atomic cmpxchg is a hardware operation. If you want to replace 
> that you need a lock again.
>
> Maybe just add a comment and use an explicit cast to void* ? Not sure 
> if that silences the warning.
>
> Christian.
>
> Am 21.03.19 um 15:13 schrieb Zhou, David(ChunMing):
>> cmpxchg be replaced by some simple c sentance?
>> otherwise we have to remove __rcu of chian->prev.
>>
>> -David
>>
>> -------- Original Message --------
>> Subject: Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
>> From: Christian König
>> To: "Zhou, David(ChunMing)" ,kbuild test robot ,"Zhou, David(ChunMing)"
>> CC: 
>> kbuild-all-JC7UmRfGjtg@public.gmane.org,dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org,jason-fQELhIk9awXprZlt/sZkLg@public.gmane.org,"Koenig, 
>> Christian" ,"Hector, Tobias"
>>
>> Hi David,
>>
>> For the cmpxchg() case I of hand don't know either. Looks like so far
>> nobody has used cmpxchg() with rcu protected structures.
>>
>> The other cases should be replaced by RCU_INIT_POINTER() or
>> rcu_dereference_protected(.., true);
>>
>> Regards,
>> Christian.
>>
>> Am 21.03.19 um 07:34 schrieb zhoucm1:
>> > Hi Lionel and Christian,
>> >
>> > Below is robot report for chain->prev, which was added __rcu as you
>> > suggested.
>> >
>> > How to fix this line "tmp = cmpxchg(&chain->prev, prev, 
>> replacement); "?
>> > I checked kernel header file, seems it has no cmpxchg for rcu.
>> >
>> > Any suggestion to fix this robot report?
>> >
>> > Thanks,
>> > -David
>> >
>> > On 2019年03月21日 08:24, kbuild test robot wrote:
>> >> Hi Chunming,
>> >>
>> >> I love your patch! Perhaps something to improve:
>> >>
>> >> [auto build test WARNING on linus/master]
>> >> [also build test WARNING on v5.1-rc1 next-20190320]
>> >> [if your patch is applied to the wrong git tree, please drop us a
>> >> note to help improve the system]
>> >>
>> >> url:
>> >> 
>> https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
>> >> reproduce:
>> >>          # apt-get install sparse
>> >>          make ARCH=x86_64 allmodconfig
>> >>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
>> >>
>> >>
>> >> sparse warnings: (new ones prefixed by >>)
>> >>
>> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>> >>>> initializer (different address spaces) @@    expected struct
>> >>>> dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef]
>> >>>> <asn:4>*__old @@
>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
>> >> dma_fence [noderef] <asn:4>*__old
>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: got struct dma_fence
>> >> *[assigned] prev
>> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>> >>>> initializer (different address spaces) @@    expected struct
>> >>>> dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef]
>> >>>> <asn:4>*__new @@
>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
>> >> dma_fence [noderef] <asn:4>*__new
>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: got struct dma_fence
>> >> *[assigned] replacement
>> >>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in
>> >>>> assignment (different address spaces) @@    expected struct
>> >>>> dma_fence *tmp @@    got struct dma_fence [noderef] <struct
>> >>>> dma_fence *tmp @@
>> >>     drivers/dma-buf/dma-fence-chain.c:73:21: expected struct
>> >> dma_fence *tmp
>> >>     drivers/dma-buf/dma-fence-chain.c:73:21: got struct dma_fence
>> >> [noderef] <asn:4>*[assigned] __ret
>> >>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect type in
>> >>>> argument 1 (different address spaces) @@    expected struct
>> >>>> dma_fence *fence @@    got struct dma_fence struct dma_fence 
>> *fence @@
>> >>     drivers/dma-buf/dma-fence-chain.c:190:28: expected struct
>> >> dma_fence *fence
>> >>     drivers/dma-buf/dma-fence-chain.c:190:28: got struct dma_fence
>> >> [noderef] <asn:4>*prev
>> >>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect type in
>> >>>> assignment (different address spaces) @@    expected struct
>> >>>> dma_fence [noderef] <asn:4>*prev @@    got [noderef] <asn:4>*prev @@
>> >>     drivers/dma-buf/dma-fence-chain.c:222:21: expected struct
>> >> dma_fence [noderef] <asn:4>*prev
>> >>     drivers/dma-buf/dma-fence-chain.c:222:21: got struct dma_fence
>> >> *prev
>> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>> >> using sizeof(void)
>> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>> >> using sizeof(void)
>> >>
>> >> vim +73 drivers/dma-buf/dma-fence-chain.c
>> >>
>> >>      38
>> >>      39    /**
>> >>      40     * dma_fence_chain_walk - chain walking function
>> >>      41     * @fence: current chain node
>> >>      42     *
>> >>      43     * Walk the chain to the next node. Returns the next fence
>> >> or NULL if we are at
>> >>      44     * the end of the chain. Garbage collects chain nodes
>> >> which are already
>> >>      45     * signaled.
>> >>      46     */
>> >>      47    struct dma_fence *dma_fence_chain_walk(struct dma_fence
>> >> *fence)
>> >>      48    {
>> >>      49        struct dma_fence_chain *chain, *prev_chain;
>> >>      50        struct dma_fence *prev, *replacement, *tmp;
>> >>      51
>> >>      52        chain = to_dma_fence_chain(fence);
>> >>      53        if (!chain) {
>> >>      54            dma_fence_put(fence);
>> >>      55            return NULL;
>> >>      56        }
>> >>      57
>> >>      58        while ((prev = dma_fence_chain_get_prev(chain))) {
>> >>      59
>> >>      60            prev_chain = to_dma_fence_chain(prev);
>> >>      61            if (prev_chain) {
>> >>      62                if (!dma_fence_is_signaled(prev_chain->fence))
>> >>      63                    break;
>> >>      64
>> >>      65                replacement =
>> >> dma_fence_chain_get_prev(prev_chain);
>> >>      66            } else {
>> >>      67                if (!dma_fence_is_signaled(prev))
>> >>      68                    break;
>> >>      69
>> >>      70                replacement = NULL;
>> >>      71            }
>> >>      72
>> >>    > 73            tmp = cmpxchg(&chain->prev, prev, replacement);
>> >>      74            if (tmp == prev)
>> >>      75                dma_fence_put(tmp);
>> >>      76            else
>> >>      77 dma_fence_put(replacement);
>> >>      78            dma_fence_put(prev);
>> >>      79        }
>> >>      80
>> >>      81        dma_fence_put(fence);
>> >>      82        return prev;
>> >>      83    }
>> >>      84    EXPORT_SYMBOL(dma_fence_chain_walk);
>> >>      85
>> >>      86    /**
>> >>      87     * dma_fence_chain_find_seqno - find fence chain node by
>> >> seqno
>> >>      88     * @pfence: pointer to the chain node where to start
>> >>      89     * @seqno: the sequence number to search for
>> >>      90     *
>> >>      91     * Advance the fence pointer to the chain node which will
>> >> signal this sequence
>> >>      92     * number. If no sequence number is provided then this is
>> >> a no-op.
>> >>      93     *
>> >>      94     * Returns EINVAL if the fence is not a chain node or the
>> >> sequence number has
>> >>      95     * not yet advanced far enough.
>> >>      96     */
>> >>      97    int dma_fence_chain_find_seqno(struct dma_fence **pfence,
>> >> uint64_t seqno)
>> >>      98    {
>> >>      99        struct dma_fence_chain *chain;
>> >>     100
>> >>     101        if (!seqno)
>> >>     102            return 0;
>> >>     103
>> >>     104        chain = to_dma_fence_chain(*pfence);
>> >>     105        if (!chain || chain->base.seqno < seqno)
>> >>     106            return -EINVAL;
>> >>     107
>> >>     108        dma_fence_chain_for_each(*pfence, &chain->base) {
>> >>     109            if ((*pfence)->context != chain->base.context ||
>> >>     110 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
>> >>     111                break;
>> >>     112        }
>> >>     113 dma_fence_put(&chain->base);
>> >>     114
>> >>     115        return 0;
>> >>     116    }
>> >>     117 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
>> >>     118
>> >>     119    static const char *dma_fence_chain_get_driver_name(struct
>> >> dma_fence *fence)
>> >>     120    {
>> >>     121            return "dma_fence_chain";
>> >>     122    }
>> >>     123
>> >>     124    static const char
>> >> *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
>> >>     125    {
>> >>     126            return "unbound";
>> >>     127    }
>> >>     128
>> >>     129    static void dma_fence_chain_irq_work(struct irq_work *work)
>> >>     130    {
>> >>     131        struct dma_fence_chain *chain;
>> >>     132
>> >>     133        chain = container_of(work, typeof(*chain), work);
>> >>     134
>> >>     135        /* Try to rearm the callback */
>> >>     136        if (!dma_fence_chain_enable_signaling(&chain->base))
>> >>     137            /* Ok, we are done. No more unsignaled fences 
>> left */
>> >>     138 dma_fence_signal(&chain->base);
>> >>     139 dma_fence_put(&chain->base);
>> >>     140    }
>> >>     141
>> >>     142    static void dma_fence_chain_cb(struct dma_fence *f, struct
>> >> dma_fence_cb *cb)
>> >>     143    {
>> >>     144        struct dma_fence_chain *chain;
>> >>     145
>> >>     146        chain = container_of(cb, typeof(*chain), cb);
>> >>     147 irq_work_queue(&chain->work);
>> >>     148        dma_fence_put(f);
>> >>     149    }
>> >>     150
>> >>     151    static bool dma_fence_chain_enable_signaling(struct
>> >> dma_fence *fence)
>> >>     152    {
>> >>     153        struct dma_fence_chain *head = 
>> to_dma_fence_chain(fence);
>> >>     154
>> >>     155        dma_fence_get(&head->base);
>> >>     156        dma_fence_chain_for_each(fence, &head->base) {
>> >>     157            struct dma_fence_chain *chain =
>> >> to_dma_fence_chain(fence);
>> >>     158            struct dma_fence *f = chain ? chain->fence : fence;
>> >>     159
>> >>     160            dma_fence_get(f);
>> >>     161            if (!dma_fence_add_callback(f, &head->cb,
>> >> dma_fence_chain_cb)) {
>> >>     162                dma_fence_put(fence);
>> >>     163                return true;
>> >>     164            }
>> >>     165            dma_fence_put(f);
>> >>     166        }
>> >>     167        dma_fence_put(&head->base);
>> >>     168        return false;
>> >>     169    }
>> >>     170
>> >>     171    static bool dma_fence_chain_signaled(struct dma_fence 
>> *fence)
>> >>     172    {
>> >>     173        dma_fence_chain_for_each(fence, fence) {
>> >>     174            struct dma_fence_chain *chain =
>> >> to_dma_fence_chain(fence);
>> >>     175            struct dma_fence *f = chain ? chain->fence : fence;
>> >>     176
>> >>     177            if (!dma_fence_is_signaled(f)) {
>> >>     178                dma_fence_put(fence);
>> >>     179                return false;
>> >>     180            }
>> >>     181        }
>> >>     182
>> >>     183        return true;
>> >>     184    }
>> >>     185
>> >>     186    static void dma_fence_chain_release(struct dma_fence 
>> *fence)
>> >>     187    {
>> >>     188        struct dma_fence_chain *chain =
>> >> to_dma_fence_chain(fence);
>> >>     189
>> >>   > 190        dma_fence_put(chain->prev);
>> >>     191        dma_fence_put(chain->fence);
>> >>     192        dma_fence_free(fence);
>> >>     193    }
>> >>     194
>> >>     195    const struct dma_fence_ops dma_fence_chain_ops = {
>> >>     196        .get_driver_name = dma_fence_chain_get_driver_name,
>> >>     197        .get_timeline_name = dma_fence_chain_get_timeline_name,
>> >>     198        .enable_signaling = dma_fence_chain_enable_signaling,
>> >>     199        .signaled = dma_fence_chain_signaled,
>> >>     200        .release = dma_fence_chain_release,
>> >>     201    };
>> >>     202    EXPORT_SYMBOL(dma_fence_chain_ops);
>> >>     203
>> >>     204    /**
>> >>     205     * dma_fence_chain_init - initialize a fence chain
>> >>     206     * @chain: the chain node to initialize
>> >>     207     * @prev: the previous fence
>> >>     208     * @fence: the current fence
>> >>     209     *
>> >>     210     * Initialize a new chain node and either start a new
>> >> chain or add the node to
>> >>     211     * the existing chain of the previous fence.
>> >>     212     */
>> >>     213    void dma_fence_chain_init(struct dma_fence_chain *chain,
>> >>     214                  struct dma_fence *prev,
>> >>     215                  struct dma_fence *fence,
>> >>     216                  uint64_t seqno)
>> >>     217    {
>> >>     218        struct dma_fence_chain *prev_chain =
>> >> to_dma_fence_chain(prev);
>> >>     219        uint64_t context;
>> >>     220
>> >>     221 spin_lock_init(&chain->lock);
>> >>   > 222        chain->prev = prev;
>> >>
>> >> ---
>> >> 0-DAY kernel test infrastructure Open Source
>> >> Technology Center
>> >> https://lists.01.org/pipermail/kbuild-all Intel Corporation
>> >
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>


[-- Attachment #1.2: Type: text/html, Size: 25835 bytes --]

[-- Attachment #2: 0001-fix-rcu-warning-from-kernel-robot.patch --]
[-- Type: text/x-patch, Size: 1503 bytes --]

>From 0cb7171b2a99b425323a8e02a968c9488de29608 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou-5C7GfCeVMHo@public.gmane.org>
Date: Fri, 22 Mar 2019 15:33:01 +0800
Subject: [PATCH] fix rcu warning from kernel robot

Signed-off-by: Chunming Zhou <david1.zhou-5C7GfCeVMHo@public.gmane.org>
---
 drivers/dma-buf/dma-fence-chain.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 0c5e3c902fa0..c729f98a7bd3 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -70,7 +70,7 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
 			replacement = NULL;
 		}
 
-		tmp = cmpxchg(&chain->prev, prev, replacement);
+		tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
 		if (tmp == prev)
 			dma_fence_put(tmp);
 		else
@@ -187,7 +187,7 @@ static void dma_fence_chain_release(struct dma_fence *fence)
 {
 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
 
-	dma_fence_put(chain->prev);
+	dma_fence_put(rcu_dereference_protected(chain->prev, true));
 	dma_fence_put(chain->fence);
 	dma_fence_free(fence);
 }
@@ -219,7 +219,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
 	uint64_t context;
 
 	spin_lock_init(&chain->lock);
-	chain->prev = prev;
+	rcu_assign_pointer(chain->prev, prev);
 	chain->fence = fence;
 	chain->prev_seqno = 0;
 	init_irq_work(&chain->work, dma_fence_chain_irq_work);
-- 
2.17.1


[-- Attachment #3: Type: text/plain, Size: 153 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
       [not found]                   ` <5cb29811-a383-f473-5443-25e9d968c516-5C7GfCeVMHo@public.gmane.org>
@ 2019-03-22 15:44                     ` Christian König
  0 siblings, 0 replies; 16+ messages in thread
From: Christian König @ 2019-03-22 15:44 UTC (permalink / raw)
  To: zhoucm1, christian.koenig-5C7GfCeVMHo, Zhou, David(ChunMing),
	kbuild test robot
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w, Hector, Tobias,
	kbuild-all-JC7UmRfGjtg, jason-fQELhIk9awXprZlt/sZkLg


[-- Attachment #1.1: Type: text/plain, Size: 15873 bytes --]

Yeah, that should work.

Christian.

Am 22.03.19 um 08:34 schrieb zhoucm1:
>
> how about the attached?
>
> If ok, I will merge to pathc#1.
>
>
> -David
>
>
> On 2019年03月21日 22:40, Christian König wrote:
>> No, atomic cmpxchg is a hardware operation. If you want to replace 
>> that you need a lock again.
>>
>> Maybe just add a comment and use an explicit cast to void* ? Not sure 
>> if that silences the warning.
>>
>> Christian.
>>
>> Am 21.03.19 um 15:13 schrieb Zhou, David(ChunMing):
>>> cmpxchg be replaced by some simple c sentance?
>>> otherwise we have to remove __rcu of chian->prev.
>>>
>>> -David
>>>
>>> -------- Original Message --------
>>> Subject: Re: [PATCH 1/9] dma-buf: add new dma_fence_chain container v6
>>> From: Christian König
>>> To: "Zhou, David(ChunMing)" ,kbuild test robot ,"Zhou, David(ChunMing)"
>>> CC: 
>>> kbuild-all-JC7UmRfGjtg@public.gmane.org,dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,lionel.g.landwerlin-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org,jason-fQELhIk9awXprZlt/sZkLg@public.gmane.org,"Koenig, 
>>> Christian" ,"Hector, Tobias"
>>>
>>> Hi David,
>>>
>>> For the cmpxchg() case I of hand don't know either. Looks like so far
>>> nobody has used cmpxchg() with rcu protected structures.
>>>
>>> The other cases should be replaced by RCU_INIT_POINTER() or
>>> rcu_dereference_protected(.., true);
>>>
>>> Regards,
>>> Christian.
>>>
>>> Am 21.03.19 um 07:34 schrieb zhoucm1:
>>> > Hi Lionel and Christian,
>>> >
>>> > Below is robot report for chain->prev, which was added __rcu as you
>>> > suggested.
>>> >
>>> > How to fix this line "tmp = cmpxchg(&chain->prev, prev, 
>>> replacement); "?
>>> > I checked kernel header file, seems it has no cmpxchg for rcu.
>>> >
>>> > Any suggestion to fix this robot report?
>>> >
>>> > Thanks,
>>> > -David
>>> >
>>> > On 2019年03月21日 08:24, kbuild test robot wrote:
>>> >> Hi Chunming,
>>> >>
>>> >> I love your patch! Perhaps something to improve:
>>> >>
>>> >> [auto build test WARNING on linus/master]
>>> >> [also build test WARNING on v5.1-rc1 next-20190320]
>>> >> [if your patch is applied to the wrong git tree, please drop us a
>>> >> note to help improve the system]
>>> >>
>>> >> url:
>>> >> 
>>> https://github.com/0day-ci/linux/commits/Chunming-Zhou/dma-buf-add-new-dma_fence_chain-container-v6/20190320-223607
>>> >> reproduce:
>>> >>          # apt-get install sparse
>>> >>          make ARCH=x86_64 allmodconfig
>>> >>          make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'
>>> >>
>>> >>
>>> >> sparse warnings: (new ones prefixed by >>)
>>> >>
>>> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>>> >>>> initializer (different address spaces) @@    expected struct
>>> >>>> dma_fence [noderef] <asn:4>*__old @@    got  dma_fence [noderef]
>>> >>>> <asn:4>*__old @@
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
>>> >> dma_fence [noderef] <asn:4>*__old
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: got struct dma_fence
>>> >> *[assigned] prev
>>> >>>> drivers/dma-buf/dma-fence-chain.c:73:23: sparse: incorrect type in
>>> >>>> initializer (different address spaces) @@    expected struct
>>> >>>> dma_fence [noderef] <asn:4>*__new @@    got  dma_fence [noderef]
>>> >>>> <asn:4>*__new @@
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: expected struct
>>> >> dma_fence [noderef] <asn:4>*__new
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:23: got struct dma_fence
>>> >> *[assigned] replacement
>>> >>>> drivers/dma-buf/dma-fence-chain.c:73:21: sparse: incorrect type in
>>> >>>> assignment (different address spaces) @@    expected struct
>>> >>>> dma_fence *tmp @@    got struct dma_fence [noderef] <struct
>>> >>>> dma_fence *tmp @@
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:21: expected struct
>>> >> dma_fence *tmp
>>> >>     drivers/dma-buf/dma-fence-chain.c:73:21: got struct dma_fence
>>> >> [noderef] <asn:4>*[assigned] __ret
>>> >>>> drivers/dma-buf/dma-fence-chain.c:190:28: sparse: incorrect 
>>> type in
>>> >>>> argument 1 (different address spaces) @@    expected struct
>>> >>>> dma_fence *fence @@    got struct dma_fence struct dma_fence 
>>> *fence @@
>>> >> drivers/dma-buf/dma-fence-chain.c:190:28:    expected struct
>>> >> dma_fence *fence
>>> >> drivers/dma-buf/dma-fence-chain.c:190:28:    got struct dma_fence
>>> >> [noderef] <asn:4>*prev
>>> >>>> drivers/dma-buf/dma-fence-chain.c:222:21: sparse: incorrect 
>>> type in
>>> >>>> assignment (different address spaces) @@    expected struct
>>> >>>> dma_fence [noderef] <asn:4>*prev @@    got [noderef] 
>>> <asn:4>*prev @@
>>> >> drivers/dma-buf/dma-fence-chain.c:222:21:    expected struct
>>> >> dma_fence [noderef] <asn:4>*prev
>>> >> drivers/dma-buf/dma-fence-chain.c:222:21:    got struct dma_fence
>>> >> *prev
>>> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>>> >> using sizeof(void)
>>> >>     drivers/dma-buf/dma-fence-chain.c:235:33: sparse: expression
>>> >> using sizeof(void)
>>> >>
>>> >> vim +73 drivers/dma-buf/dma-fence-chain.c
>>> >>
>>> >>      38
>>> >>      39    /**
>>> >>      40     * dma_fence_chain_walk - chain walking function
>>> >>      41     * @fence: current chain node
>>> >>      42     *
>>> >>      43     * Walk the chain to the next node. Returns the next 
>>> fence
>>> >> or NULL if we are at
>>> >>      44     * the end of the chain. Garbage collects chain nodes
>>> >> which are already
>>> >>      45     * signaled.
>>> >>      46     */
>>> >>      47    struct dma_fence *dma_fence_chain_walk(struct dma_fence
>>> >> *fence)
>>> >>      48    {
>>> >>      49        struct dma_fence_chain *chain, *prev_chain;
>>> >>      50        struct dma_fence *prev, *replacement, *tmp;
>>> >>      51
>>> >>      52        chain = to_dma_fence_chain(fence);
>>> >>      53        if (!chain) {
>>> >>      54            dma_fence_put(fence);
>>> >>      55            return NULL;
>>> >>      56        }
>>> >>      57
>>> >>      58        while ((prev = dma_fence_chain_get_prev(chain))) {
>>> >>      59
>>> >>      60            prev_chain = to_dma_fence_chain(prev);
>>> >>      61            if (prev_chain) {
>>> >>      62                if (!dma_fence_is_signaled(prev_chain->fence))
>>> >>      63                    break;
>>> >>      64
>>> >>      65                replacement =
>>> >> dma_fence_chain_get_prev(prev_chain);
>>> >>      66            } else {
>>> >>      67                if (!dma_fence_is_signaled(prev))
>>> >>      68                    break;
>>> >>      69
>>> >>      70                replacement = NULL;
>>> >>      71            }
>>> >>      72
>>> >>    > 73            tmp = cmpxchg(&chain->prev, prev, replacement);
>>> >>      74            if (tmp == prev)
>>> >>      75                dma_fence_put(tmp);
>>> >>      76            else
>>> >>      77 dma_fence_put(replacement);
>>> >>      78            dma_fence_put(prev);
>>> >>      79        }
>>> >>      80
>>> >>      81        dma_fence_put(fence);
>>> >>      82        return prev;
>>> >>      83    }
>>> >>      84    EXPORT_SYMBOL(dma_fence_chain_walk);
>>> >>      85
>>> >>      86    /**
>>> >>      87     * dma_fence_chain_find_seqno - find fence chain node by
>>> >> seqno
>>> >>      88     * @pfence: pointer to the chain node where to start
>>> >>      89     * @seqno: the sequence number to search for
>>> >>      90     *
>>> >>      91     * Advance the fence pointer to the chain node which will
>>> >> signal this sequence
>>> >>      92     * number. If no sequence number is provided then this is
>>> >> a no-op.
>>> >>      93     *
>>> >>      94     * Returns EINVAL if the fence is not a chain node or the
>>> >> sequence number has
>>> >>      95     * not yet advanced far enough.
>>> >>      96     */
>>> >>      97    int dma_fence_chain_find_seqno(struct dma_fence **pfence,
>>> >> uint64_t seqno)
>>> >>      98    {
>>> >>      99        struct dma_fence_chain *chain;
>>> >>     100
>>> >>     101        if (!seqno)
>>> >>     102            return 0;
>>> >>     103
>>> >>     104        chain = to_dma_fence_chain(*pfence);
>>> >>     105        if (!chain || chain->base.seqno < seqno)
>>> >>     106            return -EINVAL;
>>> >>     107
>>> >>     108 dma_fence_chain_for_each(*pfence, &chain->base) {
>>> >>     109            if ((*pfence)->context != chain->base.context ||
>>> >>     110 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
>>> >>     111                break;
>>> >>     112        }
>>> >>     113 dma_fence_put(&chain->base);
>>> >>     114
>>> >>     115        return 0;
>>> >>     116    }
>>> >>     117 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
>>> >>     118
>>> >>     119    static const char *dma_fence_chain_get_driver_name(struct
>>> >> dma_fence *fence)
>>> >>     120    {
>>> >>     121            return "dma_fence_chain";
>>> >>     122    }
>>> >>     123
>>> >>     124    static const char
>>> >> *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
>>> >>     125    {
>>> >>     126            return "unbound";
>>> >>     127    }
>>> >>     128
>>> >>     129    static void dma_fence_chain_irq_work(struct irq_work 
>>> *work)
>>> >>     130    {
>>> >>     131        struct dma_fence_chain *chain;
>>> >>     132
>>> >>     133        chain = container_of(work, typeof(*chain), work);
>>> >>     134
>>> >>     135        /* Try to rearm the callback */
>>> >>     136        if (!dma_fence_chain_enable_signaling(&chain->base))
>>> >>     137            /* Ok, we are done. No more unsignaled fences 
>>> left */
>>> >>     138 dma_fence_signal(&chain->base);
>>> >>     139 dma_fence_put(&chain->base);
>>> >>     140    }
>>> >>     141
>>> >>     142    static void dma_fence_chain_cb(struct dma_fence *f, 
>>> struct
>>> >> dma_fence_cb *cb)
>>> >>     143    {
>>> >>     144        struct dma_fence_chain *chain;
>>> >>     145
>>> >>     146        chain = container_of(cb, typeof(*chain), cb);
>>> >>     147 irq_work_queue(&chain->work);
>>> >>     148        dma_fence_put(f);
>>> >>     149    }
>>> >>     150
>>> >>     151    static bool dma_fence_chain_enable_signaling(struct
>>> >> dma_fence *fence)
>>> >>     152    {
>>> >>     153        struct dma_fence_chain *head = 
>>> to_dma_fence_chain(fence);
>>> >>     154
>>> >>     155 dma_fence_get(&head->base);
>>> >>     156        dma_fence_chain_for_each(fence, &head->base) {
>>> >>     157            struct dma_fence_chain *chain =
>>> >> to_dma_fence_chain(fence);
>>> >>     158            struct dma_fence *f = chain ? chain->fence : 
>>> fence;
>>> >>     159
>>> >>     160            dma_fence_get(f);
>>> >>     161            if (!dma_fence_add_callback(f, &head->cb,
>>> >> dma_fence_chain_cb)) {
>>> >>     162                dma_fence_put(fence);
>>> >>     163                return true;
>>> >>     164            }
>>> >>     165            dma_fence_put(f);
>>> >>     166        }
>>> >>     167 dma_fence_put(&head->base);
>>> >>     168        return false;
>>> >>     169    }
>>> >>     170
>>> >>     171    static bool dma_fence_chain_signaled(struct dma_fence 
>>> *fence)
>>> >>     172    {
>>> >>     173        dma_fence_chain_for_each(fence, fence) {
>>> >>     174            struct dma_fence_chain *chain =
>>> >> to_dma_fence_chain(fence);
>>> >>     175            struct dma_fence *f = chain ? chain->fence : 
>>> fence;
>>> >>     176
>>> >>     177            if (!dma_fence_is_signaled(f)) {
>>> >>     178                dma_fence_put(fence);
>>> >>     179                return false;
>>> >>     180            }
>>> >>     181        }
>>> >>     182
>>> >>     183        return true;
>>> >>     184    }
>>> >>     185
>>> >>     186    static void dma_fence_chain_release(struct dma_fence 
>>> *fence)
>>> >>     187    {
>>> >>     188        struct dma_fence_chain *chain =
>>> >> to_dma_fence_chain(fence);
>>> >>     189
>>> >>   > 190 dma_fence_put(chain->prev);
>>> >>     191        dma_fence_put(chain->fence);
>>> >>     192        dma_fence_free(fence);
>>> >>     193    }
>>> >>     194
>>> >>     195    const struct dma_fence_ops dma_fence_chain_ops = {
>>> >>     196        .get_driver_name = dma_fence_chain_get_driver_name,
>>> >>     197        .get_timeline_name = 
>>> dma_fence_chain_get_timeline_name,
>>> >>     198        .enable_signaling = dma_fence_chain_enable_signaling,
>>> >>     199        .signaled = dma_fence_chain_signaled,
>>> >>     200        .release = dma_fence_chain_release,
>>> >>     201    };
>>> >>     202    EXPORT_SYMBOL(dma_fence_chain_ops);
>>> >>     203
>>> >>     204    /**
>>> >>     205     * dma_fence_chain_init - initialize a fence chain
>>> >>     206     * @chain: the chain node to initialize
>>> >>     207     * @prev: the previous fence
>>> >>     208     * @fence: the current fence
>>> >>     209     *
>>> >>     210     * Initialize a new chain node and either start a new
>>> >> chain or add the node to
>>> >>     211     * the existing chain of the previous fence.
>>> >>     212     */
>>> >>     213    void dma_fence_chain_init(struct dma_fence_chain *chain,
>>> >>     214                  struct dma_fence *prev,
>>> >>     215                  struct dma_fence *fence,
>>> >>     216                  uint64_t seqno)
>>> >>     217    {
>>> >>     218        struct dma_fence_chain *prev_chain =
>>> >> to_dma_fence_chain(prev);
>>> >>     219        uint64_t context;
>>> >>     220
>>> >>     221 spin_lock_init(&chain->lock);
>>> >>   > 222        chain->prev = prev;
>>> >>
>>> >> ---
>>> >> 0-DAY kernel test infrastructure Open Source
>>> >> Technology Center
>>> >> https://lists.01.org/pipermail/kbuild-all Intel Corporation
>>> >
>>>
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 27917 bytes --]

[-- Attachment #2: Type: text/plain, Size: 153 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2019-03-22 15:44 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-20  5:47 [PATCH 1/9] dma-buf: add new dma_fence_chain container v6 Chunming Zhou
2019-03-20  5:47 ` [PATCH 5/9] drm/syncobj: use the timeline point in drm_syncobj_find_fence v4 Chunming Zhou
     [not found] ` <20190320054724.14636-1-david1.zhou-5C7GfCeVMHo@public.gmane.org>
2019-03-20  5:47   ` [PATCH 2/9] drm/syncobj: add new drm_syncobj_add_point interface v4 Chunming Zhou
2019-03-20  5:47   ` [PATCH 3/9] drm/syncobj: add support for timeline point wait v8 Chunming Zhou
2019-03-20  5:47   ` [PATCH 4/9] drm/syncobj: add timeline payload query ioctl v6 Chunming Zhou
2019-03-20  5:47   ` [PATCH 6/9] drm/amdgpu: add timeline support in amdgpu CS v3 Chunming Zhou
2019-03-20  5:47   ` [PATCH 7/9] drm/syncobj: add transition iotcls between binary and timeline v2 Chunming Zhou
2019-03-20  5:47   ` [PATCH 8/9] drm/syncobj: add timeline signal ioctl for syncobj v4 Chunming Zhou
2019-03-20  5:47   ` [PATCH 9/9] drm/amdgpu: update version for timeline syncobj support in amdgpu Chunming Zhou
2019-03-21  0:24 ` [PATCH 1/9] dma-buf: add new dma_fence_chain container v6 kbuild test robot
     [not found]   ` <201903210813.K5uuPgyD%lkp-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2019-03-21  6:34     ` zhoucm1
2019-03-21 11:30       ` Christian König
2019-03-21 14:13         ` Zhou, David(ChunMing)
     [not found]           ` <2q1nzdv6akhy5260mi-2nbzia-ttsfc8-dz76ft-4ifnnm-oz4kfp-uezs6vwoq2oq-wd0h34-v7m6m7-cd3muc-v4wzqd-yktmtp-2338r6riorqe-ez3yxk-8jr766yyjh0ob0v5e8-33h712k31w5b1ntkih.1553177522341-2ueSQiBKiTY7tOexoI0I+QC/G2K4zDHf@public.gmane.org>
2019-03-21 14:40             ` [PATCH " Christian König
     [not found]               ` <6bd1f7e6-965b-3bd5-e4e0-f3b04e0638fd-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-03-22  7:34                 ` zhoucm1
     [not found]                   ` <5cb29811-a383-f473-5443-25e9d968c516-5C7GfCeVMHo@public.gmane.org>
2019-03-22 15:44                     ` Christian König

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).