All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3
@ 2021-10-06 12:36 ` Christian König
  0 siblings, 0 replies; 11+ messages in thread
From: Christian König @ 2021-10-06 12:36 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel; +Cc: tvrtko.ursulin

A simpler version of the iterator to be used when the dma_resv object is
locked.

v2: fix index check here as well
v3: minor coding improvement, some documentation cleanup

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 51 ++++++++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 20 +++++++++++++++
 2 files changed, 71 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a480af9581bd..2f98caa68ae5 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -423,6 +423,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
 }
 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
 
+/**
+ * dma_resv_iter_first - first fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the first fence in the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
+{
+	struct dma_fence *fence;
+
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->index = 0;
+	if (cursor->all_fences)
+		cursor->fences = dma_resv_shared_list(cursor->obj);
+	else
+		cursor->fences = NULL;
+
+	fence = dma_resv_excl_fence(cursor->obj);
+	if (!fence)
+		fence = dma_resv_iter_next(cursor);
+
+	cursor->is_restarted = true;
+	return fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_first);
+
+/**
+ * dma_resv_iter_next - next fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the next fences from the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
+{
+	unsigned int idx;
+
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->is_restarted = false;
+	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
+		return NULL;
+
+	idx = cursor->index++;
+	return rcu_dereference_protected(cursor->fences->shared[idx],
+					 dma_resv_held(cursor->obj));
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_next);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 764138ad8583..491359cea54c 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -179,6 +179,8 @@ struct dma_resv_iter {
 
 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
 
 /**
  * dma_resv_iter_begin - initialize a dma_resv_iter object
@@ -244,6 +246,24 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
 	for (fence = dma_resv_iter_first_unlocked(cursor);		\
 	     fence; fence = dma_resv_iter_next_unlocked(cursor))
 
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_first(cursor); fence;	\
+	     fence = dma_resv_iter_next(cursor))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3
@ 2021-10-06 12:36 ` Christian König
  0 siblings, 0 replies; 11+ messages in thread
From: Christian König @ 2021-10-06 12:36 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel; +Cc: tvrtko.ursulin

A simpler version of the iterator to be used when the dma_resv object is
locked.

v2: fix index check here as well
v3: minor coding improvement, some documentation cleanup

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 51 ++++++++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   | 20 +++++++++++++++
 2 files changed, 71 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a480af9581bd..2f98caa68ae5 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -423,6 +423,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
 }
 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
 
+/**
+ * dma_resv_iter_first - first fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the first fence in the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
+{
+	struct dma_fence *fence;
+
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->index = 0;
+	if (cursor->all_fences)
+		cursor->fences = dma_resv_shared_list(cursor->obj);
+	else
+		cursor->fences = NULL;
+
+	fence = dma_resv_excl_fence(cursor->obj);
+	if (!fence)
+		fence = dma_resv_iter_next(cursor);
+
+	cursor->is_restarted = true;
+	return fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_first);
+
+/**
+ * dma_resv_iter_next - next fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the next fences from the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
+{
+	unsigned int idx;
+
+	dma_resv_assert_held(cursor->obj);
+
+	cursor->is_restarted = false;
+	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
+		return NULL;
+
+	idx = cursor->index++;
+	return rcu_dereference_protected(cursor->fences->shared[idx],
+					 dma_resv_held(cursor->obj));
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_next);
+
 /**
  * dma_resv_copy_fences - Copy all fences from src to dst.
  * @dst: the destination reservation object
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 764138ad8583..491359cea54c 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -179,6 +179,8 @@ struct dma_resv_iter {
 
 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
 
 /**
  * dma_resv_iter_begin - initialize a dma_resv_iter object
@@ -244,6 +246,24 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
 	for (fence = dma_resv_iter_first_unlocked(cursor);		\
 	     fence; fence = dma_resv_iter_next_unlocked(cursor))
 
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_first(cursor); fence;	\
+	     fence = dma_resv_iter_next(cursor))
+
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/2] dma-buf: add dma_resv selftest v3
  2021-10-06 12:36 ` [Intel-gfx] " Christian König
@ 2021-10-06 12:36   ` Christian König
  -1 siblings, 0 replies; 11+ messages in thread
From: Christian König @ 2021-10-06 12:36 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel; +Cc: tvrtko.ursulin

Just exercising a very minor subset of the functionality, but already
proven useful.

v2: add missing locking
v3: some more cleanup and consolidation, add unlocked test as well

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/Makefile      |   3 +-
 drivers/dma-buf/selftests.h   |   1 +
 drivers/dma-buf/st-dma-resv.c | 282 ++++++++++++++++++++++++++++++++++
 3 files changed, 285 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/st-dma-resv.c

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 1ef021273a06..511805dbeb75 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
 dmabuf_selftests-y := \
 	selftest.o \
 	st-dma-fence.o \
-	st-dma-fence-chain.o
+	st-dma-fence-chain.o \
+	st-dma-resv.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index bc8cea67bf1e..97d73aaa31da 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,3 +12,4 @@
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
 selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
new file mode 100644
index 000000000000..50d3791ccb8c
--- /dev/null
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+* Copyright © 2019 Intel Corporation
+* Copyright © 2021 Advanced Micro Devices, Inc.
+*/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dma-resv.h>
+
+#include "selftest.h"
+
+static struct spinlock fence_lock;
+
+static const char *fence_name(struct dma_fence *f)
+{
+	return "selftest";
+}
+
+static const struct dma_fence_ops fence_ops = {
+	.get_driver_name = fence_name,
+	.get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_fence(void)
+{
+	struct dma_fence *f;
+
+	f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
+	return f;
+}
+
+static int sanitycheck(void *arg)
+{
+	struct dma_resv resv;
+	struct dma_fence *f;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_signal(f);
+	dma_fence_put(f);
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r)
+		pr_err("Resv locking failed\n");
+	else
+		dma_resv_unlock(&resv);
+	dma_resv_fini(&resv);
+	return r;
+}
+
+static int test_signaling(void *arg, bool shared)
+{
+	struct dma_resv resv;
+	struct dma_fence *f;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			goto err_unlock;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+
+	if (dma_resv_test_signaled(&resv, shared)) {
+		pr_err("Resv unexpectedly signaled\n");
+		r = -EINVAL;
+		goto err_unlock;
+	}
+	dma_fence_signal(f);
+	if (!dma_resv_test_signaled(&resv, shared)) {
+		pr_err("Resv not reporting signaled\n");
+		r = -EINVAL;
+		goto err_unlock;
+	}
+err_unlock:
+	dma_resv_unlock(&resv);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_signaling(void *arg)
+{
+	return test_signaling(arg, false);
+}
+
+static int test_shared_signaling(void *arg)
+{
+	return test_signaling(arg, true);
+}
+
+static int test_for_each(void *arg, bool shared)
+{
+	struct dma_resv_iter cursor;
+	struct dma_fence *f, *fence;
+	struct dma_resv resv;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			goto err_unlock;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+
+	r = -ENOENT;
+	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+		if (!r) {
+			pr_err("More than one fence found\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		if (f != fence) {
+			pr_err("Unexpected fence\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+			pr_err("Unexpected fence usage\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		r = 0;
+	}
+	if (r) {
+		pr_err("No fence found\n");
+		goto err_unlock;
+	}
+	dma_fence_signal(f);
+err_unlock:
+	dma_resv_unlock(&resv);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_for_each(void *arg)
+{
+	return test_for_each(arg, false);
+}
+
+static int test_shared_for_each(void *arg)
+{
+	return test_for_each(arg, false);
+}
+
+static int test_for_each_unlocked(void *arg, bool shared)
+{
+	struct dma_resv_iter cursor;
+	struct dma_fence *f, *fence;
+	struct dma_resv resv;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			dma_resv_unlock(&resv);
+			goto err_free;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+	dma_resv_unlock(&resv);
+
+	r = -ENOENT;
+	dma_resv_iter_begin(&cursor, &resv, shared);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (!r) {
+			dma_resv_iter_end(&cursor);
+			pr_err("More than one fence found\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		if (f != fence) {
+			dma_resv_iter_end(&cursor);
+			pr_err("Unexpected fence\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+			dma_resv_iter_end(&cursor);
+			pr_err("Unexpected fence usage\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		r = 0;
+	}
+	dma_resv_iter_end(&cursor);
+	if (r) {
+		pr_err("No fence found\n");
+		goto err_free;
+	}
+	dma_fence_signal(f);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_for_each_unlocked(void *arg)
+{
+	return test_for_each_unlocked(arg, false);
+}
+
+static int test_shared_for_each_unlocked(void *arg)
+{
+	return test_for_each_unlocked(arg, true);
+}
+
+int dma_resv(void)
+{
+	static const struct subtest tests[] = {
+		SUBTEST(sanitycheck),
+		SUBTEST(test_excl_signaling),
+		SUBTEST(test_shared_signaling),
+		SUBTEST(test_excl_for_each),
+		SUBTEST(test_shared_for_each),
+		SUBTEST(test_excl_for_each_unlocked),
+		SUBTEST(test_shared_for_each_unlocked),
+	};
+
+	spin_lock_init(&fence_lock);
+	return subtests(tests, NULL);
+}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH 2/2] dma-buf: add dma_resv selftest v3
@ 2021-10-06 12:36   ` Christian König
  0 siblings, 0 replies; 11+ messages in thread
From: Christian König @ 2021-10-06 12:36 UTC (permalink / raw)
  To: linaro-mm-sig, dri-devel, linux-media, intel-gfx, daniel; +Cc: tvrtko.ursulin

Just exercising a very minor subset of the functionality, but already
proven useful.

v2: add missing locking
v3: some more cleanup and consolidation, add unlocked test as well

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/Makefile      |   3 +-
 drivers/dma-buf/selftests.h   |   1 +
 drivers/dma-buf/st-dma-resv.c | 282 ++++++++++++++++++++++++++++++++++
 3 files changed, 285 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/st-dma-resv.c

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 1ef021273a06..511805dbeb75 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
 dmabuf_selftests-y := \
 	selftest.o \
 	st-dma-fence.o \
-	st-dma-fence-chain.o
+	st-dma-fence-chain.o \
+	st-dma-resv.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index bc8cea67bf1e..97d73aaa31da 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,3 +12,4 @@
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
 selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
new file mode 100644
index 000000000000..50d3791ccb8c
--- /dev/null
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+* Copyright © 2019 Intel Corporation
+* Copyright © 2021 Advanced Micro Devices, Inc.
+*/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dma-resv.h>
+
+#include "selftest.h"
+
+static struct spinlock fence_lock;
+
+static const char *fence_name(struct dma_fence *f)
+{
+	return "selftest";
+}
+
+static const struct dma_fence_ops fence_ops = {
+	.get_driver_name = fence_name,
+	.get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_fence(void)
+{
+	struct dma_fence *f;
+
+	f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
+	return f;
+}
+
+static int sanitycheck(void *arg)
+{
+	struct dma_resv resv;
+	struct dma_fence *f;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_signal(f);
+	dma_fence_put(f);
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r)
+		pr_err("Resv locking failed\n");
+	else
+		dma_resv_unlock(&resv);
+	dma_resv_fini(&resv);
+	return r;
+}
+
+static int test_signaling(void *arg, bool shared)
+{
+	struct dma_resv resv;
+	struct dma_fence *f;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			goto err_unlock;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+
+	if (dma_resv_test_signaled(&resv, shared)) {
+		pr_err("Resv unexpectedly signaled\n");
+		r = -EINVAL;
+		goto err_unlock;
+	}
+	dma_fence_signal(f);
+	if (!dma_resv_test_signaled(&resv, shared)) {
+		pr_err("Resv not reporting signaled\n");
+		r = -EINVAL;
+		goto err_unlock;
+	}
+err_unlock:
+	dma_resv_unlock(&resv);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_signaling(void *arg)
+{
+	return test_signaling(arg, false);
+}
+
+static int test_shared_signaling(void *arg)
+{
+	return test_signaling(arg, true);
+}
+
+static int test_for_each(void *arg, bool shared)
+{
+	struct dma_resv_iter cursor;
+	struct dma_fence *f, *fence;
+	struct dma_resv resv;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			goto err_unlock;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+
+	r = -ENOENT;
+	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+		if (!r) {
+			pr_err("More than one fence found\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		if (f != fence) {
+			pr_err("Unexpected fence\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+			pr_err("Unexpected fence usage\n");
+			r = -EINVAL;
+			goto err_unlock;
+		}
+		r = 0;
+	}
+	if (r) {
+		pr_err("No fence found\n");
+		goto err_unlock;
+	}
+	dma_fence_signal(f);
+err_unlock:
+	dma_resv_unlock(&resv);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_for_each(void *arg)
+{
+	return test_for_each(arg, false);
+}
+
+static int test_shared_for_each(void *arg)
+{
+	return test_for_each(arg, false);
+}
+
+static int test_for_each_unlocked(void *arg, bool shared)
+{
+	struct dma_resv_iter cursor;
+	struct dma_fence *f, *fence;
+	struct dma_resv resv;
+	int r;
+
+	f = alloc_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_resv_init(&resv);
+	r = dma_resv_lock(&resv, NULL);
+	if (r) {
+		pr_err("Resv locking failed\n");
+		goto err_free;
+	}
+
+	if (shared) {
+		r = dma_resv_reserve_shared(&resv, 1);
+		if (r) {
+			pr_err("Resv shared slot allocation failed\n");
+			dma_resv_unlock(&resv);
+			goto err_free;
+		}
+
+		dma_resv_add_shared_fence(&resv, f);
+	} else {
+		dma_resv_add_excl_fence(&resv, f);
+	}
+	dma_resv_unlock(&resv);
+
+	r = -ENOENT;
+	dma_resv_iter_begin(&cursor, &resv, shared);
+	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+		if (!r) {
+			dma_resv_iter_end(&cursor);
+			pr_err("More than one fence found\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		if (f != fence) {
+			dma_resv_iter_end(&cursor);
+			pr_err("Unexpected fence\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+			dma_resv_iter_end(&cursor);
+			pr_err("Unexpected fence usage\n");
+			r = -EINVAL;
+			goto err_free;
+		}
+		r = 0;
+	}
+	dma_resv_iter_end(&cursor);
+	if (r) {
+		pr_err("No fence found\n");
+		goto err_free;
+	}
+	dma_fence_signal(f);
+err_free:
+	dma_resv_fini(&resv);
+	dma_fence_put(f);
+	return r;
+}
+
+static int test_excl_for_each_unlocked(void *arg)
+{
+	return test_for_each_unlocked(arg, false);
+}
+
+static int test_shared_for_each_unlocked(void *arg)
+{
+	return test_for_each_unlocked(arg, true);
+}
+
+int dma_resv(void)
+{
+	static const struct subtest tests[] = {
+		SUBTEST(sanitycheck),
+		SUBTEST(test_excl_signaling),
+		SUBTEST(test_shared_signaling),
+		SUBTEST(test_excl_for_each),
+		SUBTEST(test_shared_for_each),
+		SUBTEST(test_excl_for_each_unlocked),
+		SUBTEST(test_shared_for_each_unlocked),
+	};
+
+	spin_lock_init(&fence_lock);
+	return subtests(tests, NULL);
+}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3
  2021-10-06 12:36 ` [Intel-gfx] " Christian König
@ 2021-10-06 12:40   ` Tvrtko Ursulin
  -1 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2021-10-06 12:40 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media,
	intel-gfx, daniel


On 06/10/2021 13:36, Christian König wrote:
> A simpler version of the iterator to be used when the dma_resv object is
> locked.
> 
> v2: fix index check here as well
> v3: minor coding improvement, some documentation cleanup
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/dma-buf/dma-resv.c | 51 ++++++++++++++++++++++++++++++++++++++
>   include/linux/dma-resv.h   | 20 +++++++++++++++
>   2 files changed, 71 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a480af9581bd..2f98caa68ae5 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -423,6 +423,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
>   }
>   EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
>   
> +/**
> + * dma_resv_iter_first - first fence from a locked dma_resv object
> + * @cursor: cursor to record the current position
> + *
> + * Return the first fence in the dma_resv object while holding the
> + * &dma_resv.lock.
> + */
> +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
> +{
> +	struct dma_fence *fence;
> +
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->index = 0;
> +	if (cursor->all_fences)
> +		cursor->fences = dma_resv_shared_list(cursor->obj);
> +	else
> +		cursor->fences = NULL;
> +
> +	fence = dma_resv_excl_fence(cursor->obj);
> +	if (!fence)
> +		fence = dma_resv_iter_next(cursor);
> +
> +	cursor->is_restarted = true;
> +	return fence;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_first);
> +
> +/**
> + * dma_resv_iter_next - next fence from a locked dma_resv object
> + * @cursor: cursor to record the current position
> + *
> + * Return the next fences from the dma_resv object while holding the
> + * &dma_resv.lock.
> + */
> +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
> +{
> +	unsigned int idx;
> +
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->is_restarted = false;
> +	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
> +		return NULL;
> +
> +	idx = cursor->index++;
> +	return rcu_dereference_protected(cursor->fences->shared[idx],
> +					 dma_resv_held(cursor->obj));
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_next);
> +
>   /**
>    * dma_resv_copy_fences - Copy all fences from src to dst.
>    * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 764138ad8583..491359cea54c 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -179,6 +179,8 @@ struct dma_resv_iter {
>   
>   struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
>   struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
> +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
> +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
>   
>   /**
>    * dma_resv_iter_begin - initialize a dma_resv_iter object
> @@ -244,6 +246,24 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   	for (fence = dma_resv_iter_first_unlocked(cursor);		\
>   	     fence; fence = dma_resv_iter_next_unlocked(cursor))
>   
> +/**
> + * dma_resv_for_each_fence - fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @obj: a dma_resv object pointer
> + * @all_fences: true if all fences should be returned
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object while holding the
> + * &dma_resv.lock. @all_fences controls if the shared fences are returned as
> + * well. The cursor initialisation is part of the iterator and the fence stays
> + * valid as long as the lock is held and so no extra reference to the fence is
> + * taken.
> + */
> +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +	     fence = dma_resv_iter_first(cursor); fence;	\
> +	     fence = dma_resv_iter_next(cursor))
> +
>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>   
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3
@ 2021-10-06 12:40   ` Tvrtko Ursulin
  0 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2021-10-06 12:40 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media,
	intel-gfx, daniel


On 06/10/2021 13:36, Christian König wrote:
> A simpler version of the iterator to be used when the dma_resv object is
> locked.
> 
> v2: fix index check here as well
> v3: minor coding improvement, some documentation cleanup
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/dma-buf/dma-resv.c | 51 ++++++++++++++++++++++++++++++++++++++
>   include/linux/dma-resv.h   | 20 +++++++++++++++
>   2 files changed, 71 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index a480af9581bd..2f98caa68ae5 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -423,6 +423,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
>   }
>   EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
>   
> +/**
> + * dma_resv_iter_first - first fence from a locked dma_resv object
> + * @cursor: cursor to record the current position
> + *
> + * Return the first fence in the dma_resv object while holding the
> + * &dma_resv.lock.
> + */
> +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
> +{
> +	struct dma_fence *fence;
> +
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->index = 0;
> +	if (cursor->all_fences)
> +		cursor->fences = dma_resv_shared_list(cursor->obj);
> +	else
> +		cursor->fences = NULL;
> +
> +	fence = dma_resv_excl_fence(cursor->obj);
> +	if (!fence)
> +		fence = dma_resv_iter_next(cursor);
> +
> +	cursor->is_restarted = true;
> +	return fence;
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_first);
> +
> +/**
> + * dma_resv_iter_next - next fence from a locked dma_resv object
> + * @cursor: cursor to record the current position
> + *
> + * Return the next fences from the dma_resv object while holding the
> + * &dma_resv.lock.
> + */
> +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
> +{
> +	unsigned int idx;
> +
> +	dma_resv_assert_held(cursor->obj);
> +
> +	cursor->is_restarted = false;
> +	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
> +		return NULL;
> +
> +	idx = cursor->index++;
> +	return rcu_dereference_protected(cursor->fences->shared[idx],
> +					 dma_resv_held(cursor->obj));
> +}
> +EXPORT_SYMBOL_GPL(dma_resv_iter_next);
> +
>   /**
>    * dma_resv_copy_fences - Copy all fences from src to dst.
>    * @dst: the destination reservation object
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 764138ad8583..491359cea54c 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -179,6 +179,8 @@ struct dma_resv_iter {
>   
>   struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
>   struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
> +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
> +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
>   
>   /**
>    * dma_resv_iter_begin - initialize a dma_resv_iter object
> @@ -244,6 +246,24 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
>   	for (fence = dma_resv_iter_first_unlocked(cursor);		\
>   	     fence; fence = dma_resv_iter_next_unlocked(cursor))
>   
> +/**
> + * dma_resv_for_each_fence - fence iterator
> + * @cursor: a struct dma_resv_iter pointer
> + * @obj: a dma_resv object pointer
> + * @all_fences: true if all fences should be returned
> + * @fence: the current fence
> + *
> + * Iterate over the fences in a struct dma_resv object while holding the
> + * &dma_resv.lock. @all_fences controls if the shared fences are returned as
> + * well. The cursor initialisation is part of the iterator and the fence stays
> + * valid as long as the lock is held and so no extra reference to the fence is
> + * taken.
> + */
> +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
> +	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
> +	     fence = dma_resv_iter_first(cursor); fence;	\
> +	     fence = dma_resv_iter_next(cursor))
> +
>   #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>   #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>   
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] dma-buf: add dma_resv selftest v3
  2021-10-06 12:36   ` [Intel-gfx] " Christian König
@ 2021-10-06 12:54     ` Tvrtko Ursulin
  -1 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2021-10-06 12:54 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media,
	intel-gfx, daniel


On 06/10/2021 13:36, Christian König wrote:
> Just exercising a very minor subset of the functionality, but already
> proven useful.
> 
> v2: add missing locking
> v3: some more cleanup and consolidation, add unlocked test as well
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/dma-buf/Makefile      |   3 +-
>   drivers/dma-buf/selftests.h   |   1 +
>   drivers/dma-buf/st-dma-resv.c | 282 ++++++++++++++++++++++++++++++++++
>   3 files changed, 285 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/dma-buf/st-dma-resv.c
> 
> diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
> index 1ef021273a06..511805dbeb75 100644
> --- a/drivers/dma-buf/Makefile
> +++ b/drivers/dma-buf/Makefile
> @@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
>   dmabuf_selftests-y := \
>   	selftest.o \
>   	st-dma-fence.o \
> -	st-dma-fence-chain.o
> +	st-dma-fence-chain.o \
> +	st-dma-resv.o
>   
>   obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
> diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
> index bc8cea67bf1e..97d73aaa31da 100644
> --- a/drivers/dma-buf/selftests.h
> +++ b/drivers/dma-buf/selftests.h
> @@ -12,3 +12,4 @@
>   selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
>   selftest(dma_fence, dma_fence)
>   selftest(dma_fence_chain, dma_fence_chain)
> +selftest(dma_resv, dma_resv)
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> new file mode 100644
> index 000000000000..50d3791ccb8c
> --- /dev/null
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -0,0 +1,282 @@
> +/* SPDX-License-Identifier: MIT */
> +
> +/*
> +* Copyright © 2019 Intel Corporation
> +* Copyright © 2021 Advanced Micro Devices, Inc.
> +*/
> +
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/dma-resv.h>
> +
> +#include "selftest.h"
> +
> +static struct spinlock fence_lock;
> +
> +static const char *fence_name(struct dma_fence *f)
> +{
> +	return "selftest";
> +}
> +
> +static const struct dma_fence_ops fence_ops = {
> +	.get_driver_name = fence_name,
> +	.get_timeline_name = fence_name,
> +};
> +
> +static struct dma_fence *alloc_fence(void)
> +{
> +	struct dma_fence *f;
> +
> +	f = kmalloc(sizeof(*f), GFP_KERNEL);
> +	if (!f)
> +		return NULL;
> +
> +	dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
> +	return f;
> +}
> +
> +static int sanitycheck(void *arg)
> +{
> +	struct dma_resv resv;
> +	struct dma_fence *f;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_fence_signal(f);
> +	dma_fence_put(f);
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r)
> +		pr_err("Resv locking failed\n");
> +	else
> +		dma_resv_unlock(&resv);
> +	dma_resv_fini(&resv);
> +	return r;
> +}
> +
> +static int test_signaling(void *arg, bool shared)
> +{
> +	struct dma_resv resv;
> +	struct dma_fence *f;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			goto err_unlock;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}
> +
> +	if (dma_resv_test_signaled(&resv, shared)) {
> +		pr_err("Resv unexpectedly signaled\n");
> +		r = -EINVAL;
> +		goto err_unlock;
> +	}
> +	dma_fence_signal(f);
> +	if (!dma_resv_test_signaled(&resv, shared)) {
> +		pr_err("Resv not reporting signaled\n");
> +		r = -EINVAL;
> +		goto err_unlock;
> +	}
> +err_unlock:
> +	dma_resv_unlock(&resv);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_signaling(void *arg)
> +{
> +	return test_signaling(arg, false);
> +}
> +
> +static int test_shared_signaling(void *arg)
> +{
> +	return test_signaling(arg, true);
> +}
> +
> +static int test_for_each(void *arg, bool shared)
> +{
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *f, *fence;
> +	struct dma_resv resv;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			goto err_unlock;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}

This block repeates three times so could be consolidated but it doesn't 
matter hugely.

> +
> +	r = -ENOENT;
> +	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
> +		if (!r) {
> +			pr_err("More than one fence found\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		if (f != fence) {
> +			pr_err("Unexpected fence\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
> +			pr_err("Unexpected fence usage\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		r = 0;
> +	}
> +	if (r) {
> +		pr_err("No fence found\n");
> +		goto err_unlock;
> +	}
> +	dma_fence_signal(f);

This would warn if the loop jumps to err_unlock but I guess there are 
bigger problems in that case.

> +err_unlock:
> +	dma_resv_unlock(&resv);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_for_each(void *arg)
> +{
> +	return test_for_each(arg, false);
> +}
> +
> +static int test_shared_for_each(void *arg)
> +{
> +	return test_for_each(arg, false);

true

With that:

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

> +}
> +
> +static int test_for_each_unlocked(void *arg, bool shared)
> +{
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *f, *fence;
> +	struct dma_resv resv;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			dma_resv_unlock(&resv);
> +			goto err_free;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}
> +	dma_resv_unlock(&resv);
> +
> +	r = -ENOENT;
> +	dma_resv_iter_begin(&cursor, &resv, shared);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		if (!r) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("More than one fence found\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		if (f != fence) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("Unexpected fence\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("Unexpected fence usage\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		r = 0;
> +	}
> +	dma_resv_iter_end(&cursor);
> +	if (r) {
> +		pr_err("No fence found\n");
> +		goto err_free;
> +	}
> +	dma_fence_signal(f);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_for_each_unlocked(void *arg)
> +{
> +	return test_for_each_unlocked(arg, false);
> +}
> +
> +static int test_shared_for_each_unlocked(void *arg)
> +{
> +	return test_for_each_unlocked(arg, true);
> +}
> +
> +int dma_resv(void)
> +{
> +	static const struct subtest tests[] = {
> +		SUBTEST(sanitycheck),
> +		SUBTEST(test_excl_signaling),
> +		SUBTEST(test_shared_signaling),
> +		SUBTEST(test_excl_for_each),
> +		SUBTEST(test_shared_for_each),
> +		SUBTEST(test_excl_for_each_unlocked),
> +		SUBTEST(test_shared_for_each_unlocked),
> +	};
> +
> +	spin_lock_init(&fence_lock);
> +	return subtests(tests, NULL);
> +}
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [PATCH 2/2] dma-buf: add dma_resv selftest v3
@ 2021-10-06 12:54     ` Tvrtko Ursulin
  0 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2021-10-06 12:54 UTC (permalink / raw)
  To: Christian König, linaro-mm-sig, dri-devel, linux-media,
	intel-gfx, daniel


On 06/10/2021 13:36, Christian König wrote:
> Just exercising a very minor subset of the functionality, but already
> proven useful.
> 
> v2: add missing locking
> v3: some more cleanup and consolidation, add unlocked test as well
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/dma-buf/Makefile      |   3 +-
>   drivers/dma-buf/selftests.h   |   1 +
>   drivers/dma-buf/st-dma-resv.c | 282 ++++++++++++++++++++++++++++++++++
>   3 files changed, 285 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/dma-buf/st-dma-resv.c
> 
> diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
> index 1ef021273a06..511805dbeb75 100644
> --- a/drivers/dma-buf/Makefile
> +++ b/drivers/dma-buf/Makefile
> @@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
>   dmabuf_selftests-y := \
>   	selftest.o \
>   	st-dma-fence.o \
> -	st-dma-fence-chain.o
> +	st-dma-fence-chain.o \
> +	st-dma-resv.o
>   
>   obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
> diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
> index bc8cea67bf1e..97d73aaa31da 100644
> --- a/drivers/dma-buf/selftests.h
> +++ b/drivers/dma-buf/selftests.h
> @@ -12,3 +12,4 @@
>   selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
>   selftest(dma_fence, dma_fence)
>   selftest(dma_fence_chain, dma_fence_chain)
> +selftest(dma_resv, dma_resv)
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> new file mode 100644
> index 000000000000..50d3791ccb8c
> --- /dev/null
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -0,0 +1,282 @@
> +/* SPDX-License-Identifier: MIT */
> +
> +/*
> +* Copyright © 2019 Intel Corporation
> +* Copyright © 2021 Advanced Micro Devices, Inc.
> +*/
> +
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/dma-resv.h>
> +
> +#include "selftest.h"
> +
> +static struct spinlock fence_lock;
> +
> +static const char *fence_name(struct dma_fence *f)
> +{
> +	return "selftest";
> +}
> +
> +static const struct dma_fence_ops fence_ops = {
> +	.get_driver_name = fence_name,
> +	.get_timeline_name = fence_name,
> +};
> +
> +static struct dma_fence *alloc_fence(void)
> +{
> +	struct dma_fence *f;
> +
> +	f = kmalloc(sizeof(*f), GFP_KERNEL);
> +	if (!f)
> +		return NULL;
> +
> +	dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
> +	return f;
> +}
> +
> +static int sanitycheck(void *arg)
> +{
> +	struct dma_resv resv;
> +	struct dma_fence *f;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_fence_signal(f);
> +	dma_fence_put(f);
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r)
> +		pr_err("Resv locking failed\n");
> +	else
> +		dma_resv_unlock(&resv);
> +	dma_resv_fini(&resv);
> +	return r;
> +}
> +
> +static int test_signaling(void *arg, bool shared)
> +{
> +	struct dma_resv resv;
> +	struct dma_fence *f;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			goto err_unlock;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}
> +
> +	if (dma_resv_test_signaled(&resv, shared)) {
> +		pr_err("Resv unexpectedly signaled\n");
> +		r = -EINVAL;
> +		goto err_unlock;
> +	}
> +	dma_fence_signal(f);
> +	if (!dma_resv_test_signaled(&resv, shared)) {
> +		pr_err("Resv not reporting signaled\n");
> +		r = -EINVAL;
> +		goto err_unlock;
> +	}
> +err_unlock:
> +	dma_resv_unlock(&resv);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_signaling(void *arg)
> +{
> +	return test_signaling(arg, false);
> +}
> +
> +static int test_shared_signaling(void *arg)
> +{
> +	return test_signaling(arg, true);
> +}
> +
> +static int test_for_each(void *arg, bool shared)
> +{
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *f, *fence;
> +	struct dma_resv resv;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			goto err_unlock;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}

This block repeates three times so could be consolidated but it doesn't 
matter hugely.

> +
> +	r = -ENOENT;
> +	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
> +		if (!r) {
> +			pr_err("More than one fence found\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		if (f != fence) {
> +			pr_err("Unexpected fence\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
> +			pr_err("Unexpected fence usage\n");
> +			r = -EINVAL;
> +			goto err_unlock;
> +		}
> +		r = 0;
> +	}
> +	if (r) {
> +		pr_err("No fence found\n");
> +		goto err_unlock;
> +	}
> +	dma_fence_signal(f);

This would warn if the loop jumps to err_unlock but I guess there are 
bigger problems in that case.

> +err_unlock:
> +	dma_resv_unlock(&resv);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_for_each(void *arg)
> +{
> +	return test_for_each(arg, false);
> +}
> +
> +static int test_shared_for_each(void *arg)
> +{
> +	return test_for_each(arg, false);

true

With that:

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

> +}
> +
> +static int test_for_each_unlocked(void *arg, bool shared)
> +{
> +	struct dma_resv_iter cursor;
> +	struct dma_fence *f, *fence;
> +	struct dma_resv resv;
> +	int r;
> +
> +	f = alloc_fence();
> +	if (!f)
> +		return -ENOMEM;
> +
> +	dma_resv_init(&resv);
> +	r = dma_resv_lock(&resv, NULL);
> +	if (r) {
> +		pr_err("Resv locking failed\n");
> +		goto err_free;
> +	}
> +
> +	if (shared) {
> +		r = dma_resv_reserve_shared(&resv, 1);
> +		if (r) {
> +			pr_err("Resv shared slot allocation failed\n");
> +			dma_resv_unlock(&resv);
> +			goto err_free;
> +		}
> +
> +		dma_resv_add_shared_fence(&resv, f);
> +	} else {
> +		dma_resv_add_excl_fence(&resv, f);
> +	}
> +	dma_resv_unlock(&resv);
> +
> +	r = -ENOENT;
> +	dma_resv_iter_begin(&cursor, &resv, shared);
> +	dma_resv_for_each_fence_unlocked(&cursor, fence) {
> +		if (!r) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("More than one fence found\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		if (f != fence) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("Unexpected fence\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
> +			dma_resv_iter_end(&cursor);
> +			pr_err("Unexpected fence usage\n");
> +			r = -EINVAL;
> +			goto err_free;
> +		}
> +		r = 0;
> +	}
> +	dma_resv_iter_end(&cursor);
> +	if (r) {
> +		pr_err("No fence found\n");
> +		goto err_free;
> +	}
> +	dma_fence_signal(f);
> +err_free:
> +	dma_resv_fini(&resv);
> +	dma_fence_put(f);
> +	return r;
> +}
> +
> +static int test_excl_for_each_unlocked(void *arg)
> +{
> +	return test_for_each_unlocked(arg, false);
> +}
> +
> +static int test_shared_for_each_unlocked(void *arg)
> +{
> +	return test_for_each_unlocked(arg, true);
> +}
> +
> +int dma_resv(void)
> +{
> +	static const struct subtest tests[] = {
> +		SUBTEST(sanitycheck),
> +		SUBTEST(test_excl_signaling),
> +		SUBTEST(test_shared_signaling),
> +		SUBTEST(test_excl_for_each),
> +		SUBTEST(test_shared_for_each),
> +		SUBTEST(test_excl_for_each_unlocked),
> +		SUBTEST(test_shared_for_each_unlocked),
> +	};
> +
> +	spin_lock_init(&fence_lock);
> +	return subtests(tests, NULL);
> +}
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
  2021-10-06 12:36 ` [Intel-gfx] " Christian König
                   ` (2 preceding siblings ...)
  (?)
@ 2021-10-07 12:53 ` Patchwork
  -1 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2021-10-07 12:53 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
URL   : https://patchwork.freedesktop.org/series/95560/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
34a8413e8f2b dma-buf: add dma_resv_for_each_fence v3
-:109: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'cursor' - possible side-effects?
#109: FILE: include/linux/dma-resv.h:262:
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_first(cursor); fence;	\
+	     fence = dma_resv_iter_next(cursor))

-:109: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'fence' - possible side-effects?
#109: FILE: include/linux/dma-resv.h:262:
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)	\
+	for (dma_resv_iter_begin(cursor, obj, all_fences),	\
+	     fence = dma_resv_iter_first(cursor); fence;	\
+	     fence = dma_resv_iter_next(cursor))

-:116: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 0 errors, 1 warnings, 2 checks, 89 lines checked
92d8e5c71ccd dma-buf: add dma_resv selftest v3
-:40: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#40: 
new file mode 100644

-:45: WARNING:SPDX_LICENSE_TAG: Improper SPDX comment style for 'drivers/dma-buf/st-dma-resv.c', please use '//' instead
#45: FILE: drivers/dma-buf/st-dma-resv.c:1:
+/* SPDX-License-Identifier: MIT */

-:45: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#45: FILE: drivers/dma-buf/st-dma-resv.c:1:
+/* SPDX-License-Identifier: MIT */

-:48: WARNING:BLOCK_COMMENT_STYLE: Block comments should align the * on each line
#48: FILE: drivers/dma-buf/st-dma-resv.c:4:
+/*
+* Copyright © 2019 Intel Corporation

-:235: ERROR:OPEN_BRACE: that open brace { should be on the previous line
#235: FILE: drivers/dma-buf/st-dma-resv.c:191:
+static int test_for_each_unlocked(void *arg, bool shared)
+{

-:302: ERROR:OPEN_BRACE: that open brace { should be on the previous line
#302: FILE: drivers/dma-buf/st-dma-resv.c:258:
+static int test_excl_for_each_unlocked(void *arg)
+{

-:307: ERROR:OPEN_BRACE: that open brace { should be on the previous line
#307: FILE: drivers/dma-buf/st-dma-resv.c:263:
+static int test_shared_for_each_unlocked(void *arg)
+{

-:326: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: "Christian König" <ckoenig.leichtzumerken@gmail.com>' != 'Signed-off-by: Christian König <christian.koenig@amd.com>'

total: 3 errors, 5 warnings, 0 checks, 294 lines checked



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
  2021-10-06 12:36 ` [Intel-gfx] " Christian König
                   ` (3 preceding siblings ...)
  (?)
@ 2021-10-07 12:55 ` Patchwork
  -1 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2021-10-07 12:55 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
URL   : https://patchwork.freedesktop.org/series/95560/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
-
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT  0x0A3
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+ #define DP_DFP_CAPABILIT



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
  2021-10-06 12:36 ` [Intel-gfx] " Christian König
                   ` (4 preceding siblings ...)
  (?)
@ 2021-10-07 13:26 ` Patchwork
  -1 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2021-10-07 13:26 UTC (permalink / raw)
  To: Christian König; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 8441 bytes --]

== Series Details ==

Series: series starting with [1/2] dma-buf: add dma_resv_for_each_fence v3
URL   : https://patchwork.freedesktop.org/series/95560/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_10693 -> Patchwork_21277
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_21277 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_21277, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_21277:

### IGT changes ###

#### Possible regressions ####

  * igt@core_hotunplug@unbind-rebind:
    - fi-tgl-u2:          NOTRUN -> [INCOMPLETE][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@core_hotunplug@unbind-rebind.html

  
New tests
---------

  New tests have been introduced between CI_DRM_10693 and Patchwork_21277:

### New IGT tests (1) ###

  * igt@dmabuf@all@dma_resv:
    - Statuses : 32 pass(s)
    - Exec time: [0.02, 0.08] s

  

Known issues
------------

  Here are the changes found in Patchwork_21277 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@amdgpu/amd_basic@query-info:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][2] ([fdo#109315])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@amdgpu/amd_basic@query-info.html

  * igt@amdgpu/amd_basic@semaphore:
    - fi-bdw-5557u:       NOTRUN -> [SKIP][3] ([fdo#109271]) +27 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-bdw-5557u/igt@amdgpu/amd_basic@semaphore.html

  * igt@amdgpu/amd_cs_nop@nop-gfx0:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][4] ([fdo#109315] / [i915#2575]) +16 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@amdgpu/amd_cs_nop@nop-gfx0.html

  * igt@core_hotunplug@unbind-rebind:
    - fi-bdw-5557u:       NOTRUN -> [WARN][5] ([i915#3718])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-bdw-5557u/igt@core_hotunplug@unbind-rebind.html

  * igt@gem_huc_copy@huc-copy:
    - fi-tgl-u2:          NOTRUN -> [SKIP][6] ([i915#2190])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@gem_huc_copy@huc-copy.html
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][7] ([i915#2190])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@gem_huc_copy@huc-copy.html

  * igt@i915_pm_backlight@basic-brightness:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][8] ([i915#1155])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@i915_pm_backlight@basic-brightness.html

  * igt@i915_selftest@live@execlists:
    - fi-bsw-kefka:       [PASS][9] -> [INCOMPLETE][10] ([i915#2940])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10693/fi-bsw-kefka/igt@i915_selftest@live@execlists.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-bsw-kefka/igt@i915_selftest@live@execlists.html

  * igt@kms_chamelium@common-hpd-after-suspend:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][11] ([fdo#111827]) +8 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@kms_chamelium@common-hpd-after-suspend.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-bdw-5557u:       NOTRUN -> [SKIP][12] ([fdo#109271] / [fdo#111827]) +8 similar issues
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-bdw-5557u/igt@kms_chamelium@dp-crc-fast.html

  * igt@kms_chamelium@dp-hpd-fast:
    - fi-tgl-u2:          NOTRUN -> [SKIP][13] ([fdo#109284] / [fdo#111827]) +8 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@kms_chamelium@dp-hpd-fast.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-tgl-u2:          NOTRUN -> [SKIP][14] ([i915#4103]) +1 similar issue
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][15] ([i915#4103]) +1 similar issue
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  * igt@kms_force_connector_basic@force-load-detect:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][16] ([fdo#109285])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@kms_force_connector_basic@force-load-detect.html
    - fi-tgl-u2:          NOTRUN -> [SKIP][17] ([fdo#109285])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@kms_force_connector_basic@force-load-detect.html

  * igt@kms_psr@primary_mmap_gtt:
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][18] ([i915#1072]) +3 similar issues
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@kms_psr@primary_mmap_gtt.html

  * igt@prime_vgem@basic-userptr:
    - fi-tgl-u2:          NOTRUN -> [SKIP][19] ([i915#3301])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@prime_vgem@basic-userptr.html
    - fi-tgl-1115g4:      NOTRUN -> [SKIP][20] ([i915#3301])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-1115g4/igt@prime_vgem@basic-userptr.html

  * igt@runner@aborted:
    - fi-bsw-kefka:       NOTRUN -> [FAIL][21] ([fdo#109271] / [i915#1436] / [i915#3428])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-bsw-kefka/igt@runner@aborted.html
    - fi-tgl-u2:          NOTRUN -> [FAIL][22] ([i915#1602] / [i915#2722])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-tgl-u2/igt@runner@aborted.html

  
#### Possible fixes ####

  * igt@kms_chamelium@dp-crc-fast:
    - fi-kbl-7500u:       [FAIL][23] ([i915#1372]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10693/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [fdo#109315]: https://bugs.freedesktop.org/show_bug.cgi?id=109315
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1155]: https://gitlab.freedesktop.org/drm/intel/issues/1155
  [i915#1372]: https://gitlab.freedesktop.org/drm/intel/issues/1372
  [i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
  [i915#1602]: https://gitlab.freedesktop.org/drm/intel/issues/1602
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#2575]: https://gitlab.freedesktop.org/drm/intel/issues/2575
  [i915#2722]: https://gitlab.freedesktop.org/drm/intel/issues/2722
  [i915#2940]: https://gitlab.freedesktop.org/drm/intel/issues/2940
  [i915#3301]: https://gitlab.freedesktop.org/drm/intel/issues/3301
  [i915#3428]: https://gitlab.freedesktop.org/drm/intel/issues/3428
  [i915#3718]: https://gitlab.freedesktop.org/drm/intel/issues/3718
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103


Participating hosts (42 -> 38)
------------------------------

  Additional (2): fi-tgl-1115g4 fi-tgl-u2 
  Missing    (6): fi-ilk-m540 bat-dg1-6 fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 bat-jsl-1 


Build changes
-------------

  * Linux: CI_DRM_10693 -> Patchwork_21277

  CI-20190529: 20190529
  CI_DRM_10693: fd46c2693c04811a4c943cef620190993bf0da76 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6235: 65dd7d484d5d09de196def254afebf41dfde1052 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_21277: 92d8e5c71ccd9badf7900ee88e7d992c133f286c @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

92d8e5c71ccd dma-buf: add dma_resv selftest v3
34a8413e8f2b dma-buf: add dma_resv_for_each_fence v3

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21277/index.html

[-- Attachment #2: Type: text/html, Size: 10205 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-10-07 13:26 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-06 12:36 [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3 Christian König
2021-10-06 12:36 ` [Intel-gfx] " Christian König
2021-10-06 12:36 ` [PATCH 2/2] dma-buf: add dma_resv selftest v3 Christian König
2021-10-06 12:36   ` [Intel-gfx] " Christian König
2021-10-06 12:54   ` Tvrtko Ursulin
2021-10-06 12:54     ` [Intel-gfx] " Tvrtko Ursulin
2021-10-06 12:40 ` [PATCH 1/2] dma-buf: add dma_resv_for_each_fence v3 Tvrtko Ursulin
2021-10-06 12:40   ` [Intel-gfx] " Tvrtko Ursulin
2021-10-07 12:53 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/2] " Patchwork
2021-10-07 12:55 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-10-07 13:26 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.