All of lore.kernel.org
 help / color / mirror / Atom feed
* [CI 1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 15:37 ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      | 57 +++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      | 29 ++++++++++
 drivers/gpu/drm/i915/i915_drv.h               |  3 +
 drivers/gpu/drm/i915/intel_region_lmem.c      | 16 ++++++
 drivers/gpu/drm/i915/intel_region_lmem.h      | 11 ++++
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 40 +++++++++++++
 8 files changed, 159 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9bf1c0b20370..2f64db45f8e0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -120,6 +120,7 @@ gem-y += \
 	gem/i915_gem_internal.o \
 	gem/i915_gem_object.o \
 	gem/i915_gem_object_blt.o \
+	gem/i915_gem_lmem.o \
 	gem/i915_gem_mman.o \
 	gem/i915_gem_pages.o \
 	gem/i915_gem_phys.o \
@@ -148,6 +149,7 @@ i915-y += \
 	  i915_scheduler.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
+	  intel_region_lmem.o \
 	  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..6a38d8028110
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+	.get_pages = i915_gem_object_get_pages_buddy,
+	.put_pages = i915_gem_object_put_pages_buddy,
+	.release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+	return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+			    resource_size_t size,
+			    unsigned int flags)
+{
+	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+					     size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+			      resource_size_t size,
+			      unsigned int flags)
+{
+	static struct lock_class_key lock_class;
+	struct drm_i915_private *i915 = mem->i915;
+	struct drm_i915_gem_object *obj;
+
+	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+		return ERR_PTR(-E2BIG);
+
+	obj = i915_gem_object_alloc();
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+	obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+	i915_gem_object_init_memory_region(obj, mem, flags);
+
+	return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..fc3f15580fe3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+			    resource_size_t size,
+			    unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+			      resource_size_t size,
+			      unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16e58a74fa6f..dc33f096200e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,6 +101,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -1786,6 +1788,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..199532056e1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+	.init = intel_memory_region_init_buddy,
+	.release = intel_memory_region_release_buddy,
+	.create_object = __i915_gem_lmem_object_create,
+};
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..ed2a3bab6443
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 00a063730bc3..4b3cac73e291 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -33,6 +33,7 @@ selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
 selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 56091e7e599e..617a35cfac2f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,8 +11,10 @@
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
 #include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
@@ -252,6 +254,27 @@ static int igt_mock_contiguous(void *arg)
 	return err;
 }
 
+static int igt_lmem_create(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -280,3 +303,20 @@ int intel_memory_region_mock_selftests(void)
 	drm_dev_put(&i915->drm);
 	return err;
 }
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_lmem_create),
+	};
+
+	if (!HAS_LMEM(i915)) {
+		pr_info("device lacks LMEM support, skipping\n");
+		return 0;
+	}
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return i915_live_subtests(tests, i915);
+}
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 15:37 ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      | 57 +++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      | 29 ++++++++++
 drivers/gpu/drm/i915/i915_drv.h               |  3 +
 drivers/gpu/drm/i915/intel_region_lmem.c      | 16 ++++++
 drivers/gpu/drm/i915/intel_region_lmem.h      | 11 ++++
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 40 +++++++++++++
 8 files changed, 159 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9bf1c0b20370..2f64db45f8e0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -120,6 +120,7 @@ gem-y += \
 	gem/i915_gem_internal.o \
 	gem/i915_gem_object.o \
 	gem/i915_gem_object_blt.o \
+	gem/i915_gem_lmem.o \
 	gem/i915_gem_mman.o \
 	gem/i915_gem_pages.o \
 	gem/i915_gem_phys.o \
@@ -148,6 +149,7 @@ i915-y += \
 	  i915_scheduler.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
+	  intel_region_lmem.o \
 	  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..6a38d8028110
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+	.get_pages = i915_gem_object_get_pages_buddy,
+	.put_pages = i915_gem_object_put_pages_buddy,
+	.release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+	return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+			    resource_size_t size,
+			    unsigned int flags)
+{
+	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+					     size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+			      resource_size_t size,
+			      unsigned int flags)
+{
+	static struct lock_class_key lock_class;
+	struct drm_i915_private *i915 = mem->i915;
+	struct drm_i915_gem_object *obj;
+
+	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+		return ERR_PTR(-E2BIG);
+
+	obj = i915_gem_object_alloc();
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+	obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+	i915_gem_object_init_memory_region(obj, mem, flags);
+
+	return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..fc3f15580fe3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+			    resource_size_t size,
+			    unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+			      resource_size_t size,
+			      unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16e58a74fa6f..dc33f096200e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,6 +101,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -1786,6 +1788,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..199532056e1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+	.init = intel_memory_region_init_buddy,
+	.release = intel_memory_region_release_buddy,
+	.create_object = __i915_gem_lmem_object_create,
+};
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..ed2a3bab6443
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 00a063730bc3..4b3cac73e291 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -33,6 +33,7 @@ selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
 selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 56091e7e599e..617a35cfac2f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,8 +11,10 @@
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
 #include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
@@ -252,6 +254,27 @@ static int igt_mock_contiguous(void *arg)
 	return err;
 }
 
+static int igt_lmem_create(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -280,3 +303,20 @@ int intel_memory_region_mock_selftests(void)
 	drm_dev_put(&i915->drm);
 	return err;
 }
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_lmem_create),
+	};
+
+	if (!HAS_LMEM(i915)) {
+		pr_info("device lacks LMEM support, skipping\n");
+		return 0;
+	}
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return i915_live_subtests(tests, i915);
+}
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 2/7] drm/i915: setup io-mapping for LMEM
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

Create an io-mapping to describe the CPU aperture for lmem.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 199532056e1b..9a351af45ce6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -9,8 +9,32 @@
 #include "gem/i915_gem_region.h"
 #include "intel_region_lmem.h"
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+	io_mapping_fini(&mem->iomap);
+	intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+	int ret;
+
+	if (!io_mapping_init_wc(&mem->iomap,
+				mem->io_start,
+				resource_size(&mem->region)))
+		return -EIO;
+
+	ret = intel_memory_region_init_buddy(mem);
+	if (ret)
+		io_mapping_fini(&mem->iomap);
+
+	return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-	.init = intel_memory_region_init_buddy,
-	.release = intel_memory_region_release_buddy,
+	.init = region_lmem_init,
+	.release = region_lmem_release,
 	.create_object = __i915_gem_lmem_object_create,
 };
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 2/7] drm/i915: setup io-mapping for LMEM
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

Create an io-mapping to describe the CPU aperture for lmem.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 199532056e1b..9a351af45ce6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -9,8 +9,32 @@
 #include "gem/i915_gem_region.h"
 #include "intel_region_lmem.h"
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+	io_mapping_fini(&mem->iomap);
+	intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+	int ret;
+
+	if (!io_mapping_init_wc(&mem->iomap,
+				mem->io_start,
+				resource_size(&mem->region)))
+		return -EIO;
+
+	ret = intel_memory_region_init_buddy(mem);
+	if (ret)
+		io_mapping_fini(&mem->iomap);
+
+	return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-	.init = intel_memory_region_init_buddy,
-	.release = intel_memory_region_release_buddy,
+	.init = region_lmem_init,
+	.release = region_lmem_release,
 	.create_object = __i915_gem_lmem_object_create,
 };
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 3/7] drm/i915/lmem: support kernel mapping
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Steve Hampson <steven.t.hampson@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      |  39 ++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |  38 +++---
 .../drm/i915/selftests/intel_memory_region.c  | 113 ++++++++++++++++++
 5 files changed, 189 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 6a38d8028110..926f6c940e0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,50 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+	.flags = I915_GEM_OBJECT_HAS_IOMEM,
+
 	.get_pages = i915_gem_object_get_pages_buddy,
 	.put_pages = i915_gem_object_put_pages_buddy,
 	.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+				 unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+					unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+			    unsigned long n,
+			    unsigned long size)
+{
+	resource_size_t offset;
+
+	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
 	return obj->ops == &i915_gem_lmem_obj_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..7c176b8b7d2f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,6 +14,14 @@ struct intel_memory_region;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+					  unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+					       unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+					unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a387e3ee728b..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -31,10 +31,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE	BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY	BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT		BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY	BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT		BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
 
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..803bb7399cc6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
@@ -154,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 	rcu_read_unlock();
 }
 
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+	if (i915_gem_object_is_lmem(obj))
+		io_mapping_unmap((void __force __iomem *)ptr);
+	else if (is_vmalloc_addr(ptr))
+		vunmap(ptr);
+	else
+		kunmap(kmap_to_page(ptr));
+}
+
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
@@ -169,14 +180,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	i915_gem_object_make_unshrinkable(obj);
 
 	if (obj->mm.mapping) {
-		void *ptr;
-
-		ptr = page_mask_bits(obj->mm.mapping);
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
-
+		unmap_object(obj, page_mask_bits(obj->mm.mapping));
 		obj->mm.mapping = NULL;
 	}
 
@@ -231,7 +235,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -244,6 +248,13 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 	pgprot_t pgprot;
 	void *addr;
 
+	if (i915_gem_object_is_lmem(obj)) {
+		if (type != I915_MAP_WC)
+			return NULL;
+
+		return i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+	}
+
 	/* A single page can always be kmapped */
 	if (n_pages == 1 && type == I915_MAP_WB)
 		return kmap(sg_page(sgt->sgl));
@@ -285,11 +296,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			      enum i915_map_type type)
 {
 	enum i915_map_type has_type;
+	unsigned int flags;
 	bool pinned;
 	void *ptr;
 	int err;
 
-	if (unlikely(!i915_gem_object_has_struct_page(obj)))
+	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+	if (!i915_gem_object_type_has(obj, flags))
 		return ERR_PTR(-ENXIO);
 
 	err = mutex_lock_interruptible(&obj->mm.lock);
@@ -321,10 +334,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			goto err_unpin;
 		}
 
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
+		unmap_object(obj, ptr);
 
 		ptr = obj->mm.mapping = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 617a35cfac2f..2e0f91fac85d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -13,8 +13,10 @@
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
 #include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
@@ -275,6 +277,116 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_cpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	u32 bytes[] = {
+		0, /* rng placeholder */
+		sizeof(u32),
+		sizeof(u64),
+		64, /* cl */
+		PAGE_SIZE,
+		PAGE_SIZE - sizeof(u32),
+		PAGE_SIZE - sizeof(u64),
+		PAGE_SIZE - 64,
+	};
+	u32 *vaddr;
+	u32 sz;
+	u32 i;
+	int *order;
+	int count;
+	int err;
+
+	if (!HAS_ENGINE(i915, BCS0))
+		return 0;
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+	sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto out_put;
+	}
+
+	/* Put the pages into a known state -- from the gpu for added fun */
+	err = i915_gem_object_fill_blt(obj, i915->engine[BCS0]->kernel_context,
+				       0xdeadbeaf);
+	if (err)
+		goto out_unpin;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_unpin;
+
+	count = ARRAY_SIZE(bytes);
+	order = i915_random_order(count * count, &prng);
+	if (!order) {
+		err = -ENOMEM;
+		goto out_unpin;
+	}
+
+	/* We want to throw in a random width/align */
+	bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+				     sizeof(u32));
+
+	i = 0;
+	do {
+		u32 offset;
+		u32 align;
+		u32 dword;
+		u32 size;
+		u32 val;
+
+		size = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+		offset = igt_random_offset(&prng, 0, obj->base.size,
+					   size, align);
+
+		val = prandom_u32_state(&prng);
+		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+			 size / sizeof(u32));
+
+		/*
+		 * Sample random dw -- don't waste precious time reading every
+		 * single dw.
+		 */
+		dword = igt_random_offset(&prng, offset,
+					  offset + size,
+					  sizeof(u32), sizeof(u32));
+		dword /= sizeof(u32);
+		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+			       size, align, offset);
+			err = -EINVAL;
+			break;
+		}
+	} while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+	i915_gem_object_unpin_map(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -308,6 +420,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
+		SUBTEST(igt_lmem_write_cpu),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 3/7] drm/i915/lmem: support kernel mapping
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Steve Hampson <steven.t.hampson@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      |  39 ++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |  38 +++---
 .../drm/i915/selftests/intel_memory_region.c  | 113 ++++++++++++++++++
 5 files changed, 189 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 6a38d8028110..926f6c940e0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,50 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+	.flags = I915_GEM_OBJECT_HAS_IOMEM,
+
 	.get_pages = i915_gem_object_get_pages_buddy,
 	.put_pages = i915_gem_object_put_pages_buddy,
 	.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+				 unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+					unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+			    unsigned long n,
+			    unsigned long size)
+{
+	resource_size_t offset;
+
+	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
 	return obj->ops == &i915_gem_lmem_obj_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..7c176b8b7d2f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,6 +14,14 @@ struct intel_memory_region;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+					  unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+					       unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+					unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a387e3ee728b..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -31,10 +31,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE	BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY	BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT		BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY	BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT		BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
 
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..803bb7399cc6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
@@ -154,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 	rcu_read_unlock();
 }
 
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+	if (i915_gem_object_is_lmem(obj))
+		io_mapping_unmap((void __force __iomem *)ptr);
+	else if (is_vmalloc_addr(ptr))
+		vunmap(ptr);
+	else
+		kunmap(kmap_to_page(ptr));
+}
+
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
@@ -169,14 +180,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	i915_gem_object_make_unshrinkable(obj);
 
 	if (obj->mm.mapping) {
-		void *ptr;
-
-		ptr = page_mask_bits(obj->mm.mapping);
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
-
+		unmap_object(obj, page_mask_bits(obj->mm.mapping));
 		obj->mm.mapping = NULL;
 	}
 
@@ -231,7 +235,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -244,6 +248,13 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 	pgprot_t pgprot;
 	void *addr;
 
+	if (i915_gem_object_is_lmem(obj)) {
+		if (type != I915_MAP_WC)
+			return NULL;
+
+		return i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+	}
+
 	/* A single page can always be kmapped */
 	if (n_pages == 1 && type == I915_MAP_WB)
 		return kmap(sg_page(sgt->sgl));
@@ -285,11 +296,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			      enum i915_map_type type)
 {
 	enum i915_map_type has_type;
+	unsigned int flags;
 	bool pinned;
 	void *ptr;
 	int err;
 
-	if (unlikely(!i915_gem_object_has_struct_page(obj)))
+	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+	if (!i915_gem_object_type_has(obj, flags))
 		return ERR_PTR(-ENXIO);
 
 	err = mutex_lock_interruptible(&obj->mm.lock);
@@ -321,10 +334,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			goto err_unpin;
 		}
 
-		if (is_vmalloc_addr(ptr))
-			vunmap(ptr);
-		else
-			kunmap(kmap_to_page(ptr));
+		unmap_object(obj, ptr);
 
 		ptr = obj->mm.mapping = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 617a35cfac2f..2e0f91fac85d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -13,8 +13,10 @@
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
 #include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
@@ -275,6 +277,116 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_cpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	u32 bytes[] = {
+		0, /* rng placeholder */
+		sizeof(u32),
+		sizeof(u64),
+		64, /* cl */
+		PAGE_SIZE,
+		PAGE_SIZE - sizeof(u32),
+		PAGE_SIZE - sizeof(u64),
+		PAGE_SIZE - 64,
+	};
+	u32 *vaddr;
+	u32 sz;
+	u32 i;
+	int *order;
+	int count;
+	int err;
+
+	if (!HAS_ENGINE(i915, BCS0))
+		return 0;
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+	sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto out_put;
+	}
+
+	/* Put the pages into a known state -- from the gpu for added fun */
+	err = i915_gem_object_fill_blt(obj, i915->engine[BCS0]->kernel_context,
+				       0xdeadbeaf);
+	if (err)
+		goto out_unpin;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_unpin;
+
+	count = ARRAY_SIZE(bytes);
+	order = i915_random_order(count * count, &prng);
+	if (!order) {
+		err = -ENOMEM;
+		goto out_unpin;
+	}
+
+	/* We want to throw in a random width/align */
+	bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+				     sizeof(u32));
+
+	i = 0;
+	do {
+		u32 offset;
+		u32 align;
+		u32 dword;
+		u32 size;
+		u32 val;
+
+		size = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+		offset = igt_random_offset(&prng, 0, obj->base.size,
+					   size, align);
+
+		val = prandom_u32_state(&prng);
+		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+			 size / sizeof(u32));
+
+		/*
+		 * Sample random dw -- don't waste precious time reading every
+		 * single dw.
+		 */
+		dword = igt_random_offset(&prng, offset,
+					  offset + size,
+					  sizeof(u32), sizeof(u32));
+		dword /= sizeof(u32);
+		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+			       size, align, offset);
+			err = -EINVAL;
+			break;
+		}
+	} while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+	i915_gem_object_unpin_map(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -308,6 +420,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
+		SUBTEST(igt_lmem_write_cpu),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 4/7] drm/i915/selftests: add write-dword test for LMEM
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../drm/i915/selftests/intel_memory_region.c  | 166 ++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2e0f91fac85d..8bc6fadd14fb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,9 +11,11 @@
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -256,6 +258,125 @@ static int igt_mock_contiguous(void *arg)
 	return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+			    struct i915_vma *vma,
+			    u32 dword,
+			    u32 value)
+{
+	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+			       vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	unsigned long n;
+	int err;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		return err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
+
+	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+		u32 __iomem *base;
+		u32 read_val;
+
+		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+		read_val = ioread32(base + dword);
+		io_mapping_unmap_atomic(base);
+		if (read_val != val) {
+			pr_err("n=%lu base[%u]=%u, val=%u\n",
+			       n, dword, read_val, val);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+			 struct drm_i915_gem_object *obj)
+{
+	struct i915_gem_engines *engines;
+	struct i915_gem_engines_iter it;
+	struct i915_address_space *vm;
+	struct intel_context *ce;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	unsigned int count;
+	struct i915_vma *vma;
+	int *order;
+	int i, n;
+	int err = 0;
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	n = 0;
+	count = 0;
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		count++;
+		if (!intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		vm = ce->vm;
+		n++;
+	}
+	i915_gem_context_unlock_engines(ctx);
+	if (!n)
+		return 0;
+
+	order = i915_random_order(count * count, &prng);
+	if (!order)
+		return -ENOMEM;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_free;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto out_free;
+
+	i = 0;
+	engines = i915_gem_context_lock_engines(ctx);
+	do {
+		u32 rng = prandom_u32_state(&prng);
+		u32 dword = offset_in_page(rng) / 4;
+
+		ce = engines->engines[order[i] % engines->num_engines];
+		i = (i + 1) % (count * count);
+		if (!ce || !intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		err = igt_gpu_write_dw(ce, vma, dword, rng);
+		if (err)
+			break;
+
+		err = igt_cpu_check(obj, dword, rng);
+		if (err)
+			break;
+	} while (!__igt_timeout(end_time, NULL));
+	i915_gem_context_unlock_engines(ctx);
+
+out_free:
+	kfree(order);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -277,6 +398,50 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_context *ctx;
+	struct drm_file *file;
+	I915_RND_STATE(prng);
+	u32 sz;
+	int err;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_file;
+	}
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+	obj = i915_gem_object_create_lmem(i915, sz, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_file;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	err = igt_gpu_write(ctx, obj);
+	if (err)
+		pr_err("igt_gpu_write failed(%d)\n", err);
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+out_file:
+	mock_file_free(i915, file);
+	return err;
+}
+
 static int igt_lmem_write_cpu(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -421,6 +586,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
 		SUBTEST(igt_lmem_write_cpu),
+		SUBTEST(igt_lmem_write_gpu),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 4/7] drm/i915/selftests: add write-dword test for LMEM
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../drm/i915/selftests/intel_memory_region.c  | 166 ++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2e0f91fac85d..8bc6fadd14fb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,9 +11,11 @@
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -256,6 +258,125 @@ static int igt_mock_contiguous(void *arg)
 	return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+			    struct i915_vma *vma,
+			    u32 dword,
+			    u32 value)
+{
+	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+			       vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	unsigned long n;
+	int err;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		return err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
+
+	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+		u32 __iomem *base;
+		u32 read_val;
+
+		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+		read_val = ioread32(base + dword);
+		io_mapping_unmap_atomic(base);
+		if (read_val != val) {
+			pr_err("n=%lu base[%u]=%u, val=%u\n",
+			       n, dword, read_val, val);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+			 struct drm_i915_gem_object *obj)
+{
+	struct i915_gem_engines *engines;
+	struct i915_gem_engines_iter it;
+	struct i915_address_space *vm;
+	struct intel_context *ce;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	unsigned int count;
+	struct i915_vma *vma;
+	int *order;
+	int i, n;
+	int err = 0;
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	n = 0;
+	count = 0;
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		count++;
+		if (!intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		vm = ce->vm;
+		n++;
+	}
+	i915_gem_context_unlock_engines(ctx);
+	if (!n)
+		return 0;
+
+	order = i915_random_order(count * count, &prng);
+	if (!order)
+		return -ENOMEM;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_free;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto out_free;
+
+	i = 0;
+	engines = i915_gem_context_lock_engines(ctx);
+	do {
+		u32 rng = prandom_u32_state(&prng);
+		u32 dword = offset_in_page(rng) / 4;
+
+		ce = engines->engines[order[i] % engines->num_engines];
+		i = (i + 1) % (count * count);
+		if (!ce || !intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		err = igt_gpu_write_dw(ce, vma, dword, rng);
+		if (err)
+			break;
+
+		err = igt_cpu_check(obj, dword, rng);
+		if (err)
+			break;
+	} while (!__igt_timeout(end_time, NULL));
+	i915_gem_context_unlock_engines(ctx);
+
+out_free:
+	kfree(order);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -277,6 +398,50 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_context *ctx;
+	struct drm_file *file;
+	I915_RND_STATE(prng);
+	u32 sz;
+	int err;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_file;
+	}
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+	obj = i915_gem_object_create_lmem(i915, sz, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_file;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	err = igt_gpu_write(ctx, obj);
+	if (err)
+		pr_err("igt_gpu_write failed(%d)\n", err);
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+out_file:
+	mock_file_free(i915, file);
+	return err;
+}
+
 static int igt_lmem_write_cpu(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -421,6 +586,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
 		SUBTEST(igt_lmem_write_cpu),
+		SUBTEST(igt_lmem_write_gpu),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 5/7] drm/i915/selftests: extend coverage to include LMEM huge-pages
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Add LMEM objects to list of backends we test for huge-GTT-pages.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 123 +++++++++++++++++-
 1 file changed, 122 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index dac8344507c1..fa134a82083d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -983,7 +984,8 @@ static int gpu_write(struct intel_context *ce,
 			       vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 {
 	unsigned int needs_flush;
 	unsigned long n;
@@ -1015,6 +1017,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	unsigned long n;
+	int err;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		return err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
+
+	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+		u32 __iomem *base;
+		u32 read_val;
+
+		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+		read_val = ioread32(base + dword);
+		io_mapping_unmap_atomic(base);
+		if (read_val != val) {
+			pr_err("n=%lu base[%u]=%u, val=%u\n",
+			       n, dword, read_val, val);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	if (i915_gem_object_has_struct_page(obj))
+		return __cpu_check_shmem(obj, dword, val);
+	else if (i915_gem_object_is_lmem(obj))
+		return __cpu_check_lmem(obj, dword, val);
+
+	return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
 			    struct drm_i915_gem_object *obj,
 			    u64 size, u64 offset,
@@ -1399,6 +1446,79 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 	return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	static const unsigned int sizes[] = {
+		SZ_64K,
+		SZ_512K,
+		SZ_1M,
+		SZ_2M,
+	};
+	int i;
+	int err;
+
+	if (!HAS_LMEM(i915)) {
+		pr_info("device lacks LMEM support, skipping\n");
+		return 0;
+	}
+
+	/*
+	 * Sanity check that the HW uses huge pages correctly through LMEM
+	 * -- ensure that our writes land in the right place.
+	 */
+
+	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+		unsigned int size = sizes[i];
+
+		obj = i915_gem_object_create_lmem(i915, size,
+						  I915_BO_ALLOC_CONTIGUOUS);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			if (err == -E2BIG) {
+				pr_info("object too big for region!\n");
+				return 0;
+			}
+
+			return err;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			goto out_put;
+
+		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+			pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
+				size);
+			goto out_unpin;
+		}
+
+		err = igt_write_huge(ctx, obj);
+		if (err) {
+			pr_err("LMEM write-huge failed with size=%u\n", size);
+			goto out_unpin;
+		}
+
+		i915_gem_object_unpin_pages(obj);
+		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		i915_gem_object_put(obj);
+	}
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
@@ -1760,6 +1880,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_exhaust_huge),
 		SUBTEST(igt_ppgtt_gemfs_huge),
 		SUBTEST(igt_ppgtt_internal_huge),
+		SUBTEST(igt_ppgtt_lmem_huge),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 5/7] drm/i915/selftests: extend coverage to include LMEM huge-pages
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Add LMEM objects to list of backends we test for huge-GTT-pages.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 123 +++++++++++++++++-
 1 file changed, 122 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index dac8344507c1..fa134a82083d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -983,7 +984,8 @@ static int gpu_write(struct intel_context *ce,
 			       vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 {
 	unsigned int needs_flush;
 	unsigned long n;
@@ -1015,6 +1017,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	unsigned long n;
+	int err;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		return err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
+
+	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+		u32 __iomem *base;
+		u32 read_val;
+
+		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+		read_val = ioread32(base + dword);
+		io_mapping_unmap_atomic(base);
+		if (read_val != val) {
+			pr_err("n=%lu base[%u]=%u, val=%u\n",
+			       n, dword, read_val, val);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	if (i915_gem_object_has_struct_page(obj))
+		return __cpu_check_shmem(obj, dword, val);
+	else if (i915_gem_object_is_lmem(obj))
+		return __cpu_check_lmem(obj, dword, val);
+
+	return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
 			    struct drm_i915_gem_object *obj,
 			    u64 size, u64 offset,
@@ -1399,6 +1446,79 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 	return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	static const unsigned int sizes[] = {
+		SZ_64K,
+		SZ_512K,
+		SZ_1M,
+		SZ_2M,
+	};
+	int i;
+	int err;
+
+	if (!HAS_LMEM(i915)) {
+		pr_info("device lacks LMEM support, skipping\n");
+		return 0;
+	}
+
+	/*
+	 * Sanity check that the HW uses huge pages correctly through LMEM
+	 * -- ensure that our writes land in the right place.
+	 */
+
+	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+		unsigned int size = sizes[i];
+
+		obj = i915_gem_object_create_lmem(i915, size,
+						  I915_BO_ALLOC_CONTIGUOUS);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			if (err == -E2BIG) {
+				pr_info("object too big for region!\n");
+				return 0;
+			}
+
+			return err;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			goto out_put;
+
+		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+			pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
+				size);
+			goto out_unpin;
+		}
+
+		err = igt_write_huge(ctx, obj);
+		if (err) {
+			pr_err("LMEM write-huge failed with size=%u\n", size);
+			goto out_unpin;
+		}
+
+		i915_gem_object_unpin_pages(obj);
+		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		i915_gem_object_put(obj);
+	}
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
@@ -1760,6 +1880,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_exhaust_huge),
 		SUBTEST(igt_ppgtt_gemfs_huge),
 		SUBTEST(igt_ppgtt_internal_huge),
+		SUBTEST(igt_ppgtt_lmem_huge),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 6/7] drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Ditch the dubious static list of sizes to enumerate, in favour of
choosing a random size within the limits of each backing store. With
repeated CI runs this should give us a wider range of object sizes, and
in turn more page-size combinations, while using less machine time.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 229 ++++++------------
 1 file changed, 80 insertions(+), 149 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index fa134a82083d..b777999655b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1317,204 +1317,137 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 	return err;
 }
 
-static int igt_ppgtt_internal_huge(void *arg)
-{
-	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *i915 = ctx->i915;
-	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_64K,
-		SZ_128K,
-		SZ_256K,
-		SZ_512K,
-		SZ_1M,
-		SZ_2M,
-	};
-	int i;
-	int err;
-
-	/*
-	 * Sanity check that the HW uses huge pages correctly through internal
-	 * -- ensure that our writes land in the right place.
-	 */
-
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
-
-		obj = i915_gem_object_create_internal(i915, size);
-		if (IS_ERR(obj))
-			return PTR_ERR(obj);
-
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
-			goto out_put;
-
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
-			pr_info("internal unable to allocate huge-page(s) with size=%u\n",
-				size);
-			goto out_unpin;
-		}
-
-		err = igt_write_huge(ctx, obj);
-		if (err) {
-			pr_err("internal write-huge failed with size=%u\n",
-			       size);
-			goto out_unpin;
-		}
-
-		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		i915_gem_object_put(obj);
-	}
-
-	return 0;
-
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
-
-	return err;
-}
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
 
 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
 {
 	return i915->mm.gemfs && has_transparent_hugepage();
 }
 
-static int igt_ppgtt_gemfs_huge(void *arg)
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
 {
-	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *i915 = ctx->i915;
-	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_2M,
-		SZ_4M,
-		SZ_8M,
-		SZ_16M,
-		SZ_32M,
-	};
-	int i;
-	int err;
-
-	/*
-	 * Sanity check that the HW uses huge pages correctly through gemfs --
-	 * ensure that our writes land in the right place.
-	 */
-
 	if (!igt_can_allocate_thp(i915)) {
-		pr_info("missing THP support, skipping\n");
-		return 0;
+		pr_info("%s missing THP support, skipping\n", __func__);
+		return ERR_PTR(-ENODEV);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
-
-		obj = i915_gem_object_create_shmem(i915, size);
-		if (IS_ERR(obj))
-			return PTR_ERR(obj);
-
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
-			goto out_put;
+	return i915_gem_object_create_shmem(i915, size);
+}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
-			pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
-				size);
-			goto out_unpin;
-		}
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return i915_gem_object_create_internal(i915, size);
+}
 
-		err = igt_write_huge(ctx, obj);
-		if (err) {
-			pr_err("gemfs write-huge failed with size=%u\n",
-			       size);
-			goto out_unpin;
-		}
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return i915_gem_object_create_lmem(i915, size, flags);
+}
 
-		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		i915_gem_object_put(obj);
-	}
+static u32 igt_random_size(struct rnd_state *prng,
+			   u32 min_page_size,
+			   u32 max_page_size)
+{
+	u64 mask;
+	u32 size;
 
-	return 0;
+	GEM_BUG_ON(!is_power_of_2(min_page_size));
+	GEM_BUG_ON(!is_power_of_2(max_page_size));
+	GEM_BUG_ON(min_page_size < PAGE_SIZE);
+	GEM_BUG_ON(min_page_size > max_page_size);
 
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
+	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+	size = prandom_u32_state(prng) & mask;
+	if (size < min_page_size)
+		size |= min_page_size;
 
-	return err;
+	return size;
 }
 
-static int igt_ppgtt_lmem_huge(void *arg)
+static int igt_ppgtt_smoke_huge(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
 	struct drm_i915_private *i915 = ctx->i915;
 	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_64K,
-		SZ_512K,
-		SZ_1M,
-		SZ_2M,
+	I915_RND_STATE(prng);
+	struct {
+		igt_create_fn fn;
+		u32 min;
+		u32 max;
+	} backends[] = {
+		{ igt_create_internal, SZ_64K, SZ_2M,  },
+		{ igt_create_shmem,    SZ_64K, SZ_32M, },
+		{ igt_create_local,    SZ_64K, SZ_1G,  },
 	};
-	int i;
 	int err;
-
-	if (!HAS_LMEM(i915)) {
-		pr_info("device lacks LMEM support, skipping\n");
-		return 0;
-	}
+	int i;
 
 	/*
-	 * Sanity check that the HW uses huge pages correctly through LMEM
-	 * -- ensure that our writes land in the right place.
+	 * Sanity check that the HW uses huge pages correctly through our
+	 * various backends -- ensure that our writes land in the right place.
 	 */
 
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
+	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+		u32 min = backends[i].min;
+		u32 max = backends[i].max;
+		u32 size = max;
+try_again:
+		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
 
-		obj = i915_gem_object_create_lmem(i915, size,
-						  I915_BO_ALLOC_CONTIGUOUS);
+		obj = backends[i].fn(i915, size, 0);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			if (err == -E2BIG) {
-				pr_info("object too big for region!\n");
-				return 0;
+				size >>= 1;
+				goto try_again;
+			} else if (err == -ENODEV) {
+				err = 0;
+				continue;
 			}
 
 			return err;
 		}
 
 		err = i915_gem_object_pin_pages(obj);
-		if (err)
+		if (err) {
+			if (err == -ENXIO) {
+				i915_gem_object_put(obj);
+				size >>= 1;
+				goto try_again;
+			}
 			goto out_put;
+		}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
-			pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
-				size);
+		if (obj->mm.page_sizes.phys < min) {
+			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+				__func__, size, i);
+			err = -ENOMEM;
 			goto out_unpin;
 		}
 
 		err = igt_write_huge(ctx, obj);
 		if (err) {
-			pr_err("LMEM write-huge failed with size=%u\n", size);
-			goto out_unpin;
+			pr_err("%s write-huge failed with size=%u, i=%d\n",
+			       __func__, size, i);
 		}
-
+out_unpin:
 		i915_gem_object_unpin_pages(obj);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+out_put:
 		i915_gem_object_put(obj);
-	}
 
-	return 0;
+		if (err == -ENOMEM || err == -ENXIO)
+			err = 0;
 
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
+		if (err)
+			break;
 
-	if (err == -ENOMEM)
-		err = 0;
+		cond_resched();
+	}
 
 	return err;
 }
@@ -1878,9 +1811,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_pin_update),
 		SUBTEST(igt_tmpfs_fallback),
 		SUBTEST(igt_ppgtt_exhaust_huge),
-		SUBTEST(igt_ppgtt_gemfs_huge),
-		SUBTEST(igt_ppgtt_internal_huge),
-		SUBTEST(igt_ppgtt_lmem_huge),
+		SUBTEST(igt_ppgtt_smoke_huge),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 6/7] drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Ditch the dubious static list of sizes to enumerate, in favour of
choosing a random size within the limits of each backing store. With
repeated CI runs this should give us a wider range of object sizes, and
in turn more page-size combinations, while using less machine time.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 229 ++++++------------
 1 file changed, 80 insertions(+), 149 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index fa134a82083d..b777999655b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1317,204 +1317,137 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 	return err;
 }
 
-static int igt_ppgtt_internal_huge(void *arg)
-{
-	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *i915 = ctx->i915;
-	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_64K,
-		SZ_128K,
-		SZ_256K,
-		SZ_512K,
-		SZ_1M,
-		SZ_2M,
-	};
-	int i;
-	int err;
-
-	/*
-	 * Sanity check that the HW uses huge pages correctly through internal
-	 * -- ensure that our writes land in the right place.
-	 */
-
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
-
-		obj = i915_gem_object_create_internal(i915, size);
-		if (IS_ERR(obj))
-			return PTR_ERR(obj);
-
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
-			goto out_put;
-
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
-			pr_info("internal unable to allocate huge-page(s) with size=%u\n",
-				size);
-			goto out_unpin;
-		}
-
-		err = igt_write_huge(ctx, obj);
-		if (err) {
-			pr_err("internal write-huge failed with size=%u\n",
-			       size);
-			goto out_unpin;
-		}
-
-		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		i915_gem_object_put(obj);
-	}
-
-	return 0;
-
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
-
-	return err;
-}
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
 
 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
 {
 	return i915->mm.gemfs && has_transparent_hugepage();
 }
 
-static int igt_ppgtt_gemfs_huge(void *arg)
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
 {
-	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *i915 = ctx->i915;
-	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_2M,
-		SZ_4M,
-		SZ_8M,
-		SZ_16M,
-		SZ_32M,
-	};
-	int i;
-	int err;
-
-	/*
-	 * Sanity check that the HW uses huge pages correctly through gemfs --
-	 * ensure that our writes land in the right place.
-	 */
-
 	if (!igt_can_allocate_thp(i915)) {
-		pr_info("missing THP support, skipping\n");
-		return 0;
+		pr_info("%s missing THP support, skipping\n", __func__);
+		return ERR_PTR(-ENODEV);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
-
-		obj = i915_gem_object_create_shmem(i915, size);
-		if (IS_ERR(obj))
-			return PTR_ERR(obj);
-
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
-			goto out_put;
+	return i915_gem_object_create_shmem(i915, size);
+}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
-			pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
-				size);
-			goto out_unpin;
-		}
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return i915_gem_object_create_internal(i915, size);
+}
 
-		err = igt_write_huge(ctx, obj);
-		if (err) {
-			pr_err("gemfs write-huge failed with size=%u\n",
-			       size);
-			goto out_unpin;
-		}
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return i915_gem_object_create_lmem(i915, size, flags);
+}
 
-		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		i915_gem_object_put(obj);
-	}
+static u32 igt_random_size(struct rnd_state *prng,
+			   u32 min_page_size,
+			   u32 max_page_size)
+{
+	u64 mask;
+	u32 size;
 
-	return 0;
+	GEM_BUG_ON(!is_power_of_2(min_page_size));
+	GEM_BUG_ON(!is_power_of_2(max_page_size));
+	GEM_BUG_ON(min_page_size < PAGE_SIZE);
+	GEM_BUG_ON(min_page_size > max_page_size);
 
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
+	mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+	size = prandom_u32_state(prng) & mask;
+	if (size < min_page_size)
+		size |= min_page_size;
 
-	return err;
+	return size;
 }
 
-static int igt_ppgtt_lmem_huge(void *arg)
+static int igt_ppgtt_smoke_huge(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
 	struct drm_i915_private *i915 = ctx->i915;
 	struct drm_i915_gem_object *obj;
-	static const unsigned int sizes[] = {
-		SZ_64K,
-		SZ_512K,
-		SZ_1M,
-		SZ_2M,
+	I915_RND_STATE(prng);
+	struct {
+		igt_create_fn fn;
+		u32 min;
+		u32 max;
+	} backends[] = {
+		{ igt_create_internal, SZ_64K, SZ_2M,  },
+		{ igt_create_shmem,    SZ_64K, SZ_32M, },
+		{ igt_create_local,    SZ_64K, SZ_1G,  },
 	};
-	int i;
 	int err;
-
-	if (!HAS_LMEM(i915)) {
-		pr_info("device lacks LMEM support, skipping\n");
-		return 0;
-	}
+	int i;
 
 	/*
-	 * Sanity check that the HW uses huge pages correctly through LMEM
-	 * -- ensure that our writes land in the right place.
+	 * Sanity check that the HW uses huge pages correctly through our
+	 * various backends -- ensure that our writes land in the right place.
 	 */
 
-	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-		unsigned int size = sizes[i];
+	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+		u32 min = backends[i].min;
+		u32 max = backends[i].max;
+		u32 size = max;
+try_again:
+		size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
 
-		obj = i915_gem_object_create_lmem(i915, size,
-						  I915_BO_ALLOC_CONTIGUOUS);
+		obj = backends[i].fn(i915, size, 0);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			if (err == -E2BIG) {
-				pr_info("object too big for region!\n");
-				return 0;
+				size >>= 1;
+				goto try_again;
+			} else if (err == -ENODEV) {
+				err = 0;
+				continue;
 			}
 
 			return err;
 		}
 
 		err = i915_gem_object_pin_pages(obj);
-		if (err)
+		if (err) {
+			if (err == -ENXIO) {
+				i915_gem_object_put(obj);
+				size >>= 1;
+				goto try_again;
+			}
 			goto out_put;
+		}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
-			pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
-				size);
+		if (obj->mm.page_sizes.phys < min) {
+			pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+				__func__, size, i);
+			err = -ENOMEM;
 			goto out_unpin;
 		}
 
 		err = igt_write_huge(ctx, obj);
 		if (err) {
-			pr_err("LMEM write-huge failed with size=%u\n", size);
-			goto out_unpin;
+			pr_err("%s write-huge failed with size=%u, i=%d\n",
+			       __func__, size, i);
 		}
-
+out_unpin:
 		i915_gem_object_unpin_pages(obj);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+out_put:
 		i915_gem_object_put(obj);
-	}
 
-	return 0;
+		if (err == -ENOMEM || err == -ENXIO)
+			err = 0;
 
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-out_put:
-	i915_gem_object_put(obj);
+		if (err)
+			break;
 
-	if (err == -ENOMEM)
-		err = 0;
+		cond_resched();
+	}
 
 	return err;
 }
@@ -1878,9 +1811,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_pin_update),
 		SUBTEST(igt_tmpfs_fallback),
 		SUBTEST(igt_ppgtt_exhaust_huge),
-		SUBTEST(igt_ppgtt_gemfs_huge),
-		SUBTEST(igt_ppgtt_internal_huge),
-		SUBTEST(igt_ppgtt_lmem_huge),
+		SUBTEST(igt_ppgtt_smoke_huge),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [CI 7/7] drm/i915/selftests: add sanity selftest for huge-GTT-pages
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Now that for all the relevant backends we do randomised testing, we need
to make sure we still sanity check the obvious cases that might blow up,
such that introducing a temporary regression is less likely.  Also
rather than do this for every backend, just limit to our two memory
types: system and local.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 99 +++++++++++++++++++
 1 file changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b777999655b0..688c49a24f32 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1342,6 +1342,12 @@ igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
 	return i915_gem_object_create_internal(i915, size);
 }
 
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return huge_pages_object(i915, size, size);
+}
+
 static struct drm_i915_gem_object *
 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
 {
@@ -1452,6 +1458,98 @@ static int igt_ppgtt_smoke_huge(void *arg)
 	return err;
 }
 
+static int igt_ppgtt_sanity_check(void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+	struct drm_i915_private *i915 = ctx->i915;
+	unsigned int supported = INTEL_INFO(i915)->page_sizes;
+	struct {
+		igt_create_fn fn;
+		unsigned int flags;
+	} backends[] = {
+		{ igt_create_system, 0,                        },
+		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
+	};
+	struct {
+		u32 size;
+		u32 pages;
+	} combos[] = {
+		{ SZ_64K,		SZ_64K		},
+		{ SZ_2M,		SZ_2M		},
+		{ SZ_2M,		SZ_64K		},
+		{ SZ_2M - SZ_64K,	SZ_64K		},
+		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
+		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
+		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
+		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
+	};
+	int i, j;
+	int err;
+
+	if (supported == I915_GTT_PAGE_SIZE_4K)
+		return 0;
+
+	/*
+	 * Sanity check that the HW behaves with a limited set of combinations.
+	 * We already have a bunch of randomised testing, which should give us
+	 * a decent amount of variation between runs, however we should keep
+	 * this to limit the chances of introducing a temporary regression, by
+	 * testing the most obvious cases that might make something blow up.
+	 */
+
+	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+			struct drm_i915_gem_object *obj;
+			u32 size = combos[j].size;
+			u32 pages = combos[j].pages;
+
+			obj = backends[i].fn(i915, size, backends[i].flags);
+			if (IS_ERR(obj)) {
+				err = PTR_ERR(obj);
+				if (err == -ENODEV) {
+					pr_info("Device lacks local memory, skipping\n");
+					err = 0;
+					break;
+				}
+
+				return err;
+			}
+
+			err = i915_gem_object_pin_pages(obj);
+			if (err) {
+				i915_gem_object_put(obj);
+				goto out;
+			}
+
+			GEM_BUG_ON(pages > obj->base.size);
+			pages = pages & supported;
+
+			if (pages)
+				obj->mm.page_sizes.sg = pages;
+
+			err = igt_write_huge(ctx, obj);
+
+			i915_gem_object_unpin_pages(obj);
+			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			i915_gem_object_put(obj);
+
+			if (err) {
+				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+				       __func__, size, pages, i, j);
+				goto out;
+			}
+		}
+
+		cond_resched();
+	}
+
+out:
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
@@ -1812,6 +1910,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_tmpfs_fallback),
 		SUBTEST(igt_ppgtt_exhaust_huge),
 		SUBTEST(igt_ppgtt_smoke_huge),
+		SUBTEST(igt_ppgtt_sanity_check),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [Intel-gfx] [CI 7/7] drm/i915/selftests: add sanity selftest for huge-GTT-pages
@ 2019-10-25 15:37   ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-25 15:37 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Now that for all the relevant backends we do randomised testing, we need
to make sure we still sanity check the obvious cases that might blow up,
such that introducing a temporary regression is less likely.  Also
rather than do this for every backend, just limit to our two memory
types: system and local.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 99 +++++++++++++++++++
 1 file changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b777999655b0..688c49a24f32 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1342,6 +1342,12 @@ igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
 	return i915_gem_object_create_internal(i915, size);
 }
 
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+	return huge_pages_object(i915, size, size);
+}
+
 static struct drm_i915_gem_object *
 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
 {
@@ -1452,6 +1458,98 @@ static int igt_ppgtt_smoke_huge(void *arg)
 	return err;
 }
 
+static int igt_ppgtt_sanity_check(void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+	struct drm_i915_private *i915 = ctx->i915;
+	unsigned int supported = INTEL_INFO(i915)->page_sizes;
+	struct {
+		igt_create_fn fn;
+		unsigned int flags;
+	} backends[] = {
+		{ igt_create_system, 0,                        },
+		{ igt_create_local,  I915_BO_ALLOC_CONTIGUOUS, },
+	};
+	struct {
+		u32 size;
+		u32 pages;
+	} combos[] = {
+		{ SZ_64K,		SZ_64K		},
+		{ SZ_2M,		SZ_2M		},
+		{ SZ_2M,		SZ_64K		},
+		{ SZ_2M - SZ_64K,	SZ_64K		},
+		{ SZ_2M - SZ_4K,	SZ_64K | SZ_4K	},
+		{ SZ_2M + SZ_4K,	SZ_64K | SZ_4K	},
+		{ SZ_2M + SZ_4K,	SZ_2M  | SZ_4K	},
+		{ SZ_2M + SZ_64K,	SZ_2M  | SZ_64K },
+	};
+	int i, j;
+	int err;
+
+	if (supported == I915_GTT_PAGE_SIZE_4K)
+		return 0;
+
+	/*
+	 * Sanity check that the HW behaves with a limited set of combinations.
+	 * We already have a bunch of randomised testing, which should give us
+	 * a decent amount of variation between runs, however we should keep
+	 * this to limit the chances of introducing a temporary regression, by
+	 * testing the most obvious cases that might make something blow up.
+	 */
+
+	for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+		for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+			struct drm_i915_gem_object *obj;
+			u32 size = combos[j].size;
+			u32 pages = combos[j].pages;
+
+			obj = backends[i].fn(i915, size, backends[i].flags);
+			if (IS_ERR(obj)) {
+				err = PTR_ERR(obj);
+				if (err == -ENODEV) {
+					pr_info("Device lacks local memory, skipping\n");
+					err = 0;
+					break;
+				}
+
+				return err;
+			}
+
+			err = i915_gem_object_pin_pages(obj);
+			if (err) {
+				i915_gem_object_put(obj);
+				goto out;
+			}
+
+			GEM_BUG_ON(pages > obj->base.size);
+			pages = pages & supported;
+
+			if (pages)
+				obj->mm.page_sizes.sg = pages;
+
+			err = igt_write_huge(ctx, obj);
+
+			i915_gem_object_unpin_pages(obj);
+			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			i915_gem_object_put(obj);
+
+			if (err) {
+				pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+				       __func__, size, pages, i, j);
+				goto out;
+			}
+		}
+
+		cond_resched();
+	}
+
+out:
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 	struct i915_gem_context *ctx = arg;
@@ -1812,6 +1910,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_tmpfs_fallback),
 		SUBTEST(igt_ppgtt_exhaust_huge),
 		SUBTEST(igt_ppgtt_smoke_huge),
+		SUBTEST(igt_ppgtt_sanity_check),
 	};
 	struct drm_file *file;
 	struct i915_gem_context *ctx;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:13   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
f5c5fa5a5f44 drm/i915: support creating LMEM objects
-:36: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#36: 
new file mode 100644

total: 0 errors, 1 warnings, 0 checks, 206 lines checked
a81081abe8ac drm/i915: setup io-mapping for LMEM
f782ab26b95f drm/i915/lmem: support kernel mapping
-:227: WARNING:LINE_SPACING: Missing a blank line after declarations
#227: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:284:
+	struct drm_i915_gem_object *obj;
+	I915_RND_STATE(prng);

total: 0 errors, 1 warnings, 0 checks, 296 lines checked
4adec5f378e6 drm/i915/selftests: add write-dword test for LMEM
-:83: WARNING:LINE_SPACING: Missing a blank line after declarations
#83: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:312:
+	struct intel_context *ce;
+	I915_RND_STATE(prng);

-:164: WARNING:LINE_SPACING: Missing a blank line after declarations
#164: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:407:
+	struct drm_file *file;
+	I915_RND_STATE(prng);

total: 0 errors, 2 warnings, 0 checks, 193 lines checked
233d44730248 drm/i915/selftests: extend coverage to include LMEM huge-pages
4e3c1e4fdbfb drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
-:82: WARNING:NEW_TYPEDEFS: do not add new typedefs
#82: FILE: drivers/gpu/drm/i915/gem/selftests/huge_pages.c:1320:
+typedef struct drm_i915_gem_object *

total: 0 errors, 1 warnings, 0 checks, 293 lines checked
8dc38aae2b53 drm/i915/selftests: add sanity selftest for huge-GTT-pages

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:13   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
f5c5fa5a5f44 drm/i915: support creating LMEM objects
-:36: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#36: 
new file mode 100644

total: 0 errors, 1 warnings, 0 checks, 206 lines checked
a81081abe8ac drm/i915: setup io-mapping for LMEM
f782ab26b95f drm/i915/lmem: support kernel mapping
-:227: WARNING:LINE_SPACING: Missing a blank line after declarations
#227: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:284:
+	struct drm_i915_gem_object *obj;
+	I915_RND_STATE(prng);

total: 0 errors, 1 warnings, 0 checks, 296 lines checked
4adec5f378e6 drm/i915/selftests: add write-dword test for LMEM
-:83: WARNING:LINE_SPACING: Missing a blank line after declarations
#83: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:312:
+	struct intel_context *ce;
+	I915_RND_STATE(prng);

-:164: WARNING:LINE_SPACING: Missing a blank line after declarations
#164: FILE: drivers/gpu/drm/i915/selftests/intel_memory_region.c:407:
+	struct drm_file *file;
+	I915_RND_STATE(prng);

total: 0 errors, 2 warnings, 0 checks, 193 lines checked
233d44730248 drm/i915/selftests: extend coverage to include LMEM huge-pages
4e3c1e4fdbfb drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
-:82: WARNING:NEW_TYPEDEFS: do not add new typedefs
#82: FILE: drivers/gpu/drm/i915/gem/selftests/huge_pages.c:1320:
+typedef struct drm_i915_gem_object *

total: 0 errors, 1 warnings, 0 checks, 293 lines checked
8dc38aae2b53 drm/i915/selftests: add sanity selftest for huge-GTT-pages

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:16   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.6.0
Commit: drm/i915: support creating LMEM objects
Okay!

Commit: drm/i915: setup io-mapping for LMEM
Okay!

Commit: drm/i915/lmem: support kernel mapping
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51:    expected void *
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51:    got void [noderef] <asn:2> *
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51: warning: incorrect type in return expression (different address spaces)

Commit: drm/i915/selftests: add write-dword test for LMEM
Okay!

Commit: drm/i915/selftests: extend coverage to include LMEM huge-pages
Okay!

Commit: drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
Okay!

Commit: drm/i915/selftests: add sanity selftest for huge-GTT-pages
Okay!

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:16   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.6.0
Commit: drm/i915: support creating LMEM objects
Okay!

Commit: drm/i915: setup io-mapping for LMEM
Okay!

Commit: drm/i915/lmem: support kernel mapping
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51:    expected void *
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51:    got void [noderef] <asn:2> *
+drivers/gpu/drm/i915/gem/i915_gem_pages.c:255:51: warning: incorrect type in return expression (different address spaces)

Commit: drm/i915/selftests: add write-dword test for LMEM
Okay!

Commit: drm/i915/selftests: extend coverage to include LMEM huge-pages
Okay!

Commit: drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
Okay!

Commit: drm/i915/selftests: add sanity selftest for huge-GTT-pages
Okay!

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:32   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:32 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7187 -> Patchwork_14988
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html

New tests
---------

  New tests have been introduced between CI_DRM_7187 and Patchwork_14988:

### New IGT tests (1) ###

  * igt@i915_selftest@live_memory_region:
    - Statuses : 42 pass(s)
    - Exec time: [0.41, 1.39] s

  

Known issues
------------

  Here are the changes found in Patchwork_14988 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_mmap@basic:
    - fi-icl-u3:          [PASS][1] -> [DMESG-WARN][2] ([fdo#107724])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u3/igt@gem_mmap@basic.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u3/igt@gem_mmap@basic.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-icl-u2:          [PASS][3] -> [FAIL][4] ([fdo#109635 ] / [fdo#110387])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u2/igt@kms_chamelium@dp-crc-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u2/igt@kms_chamelium@dp-crc-fast.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-icl-u3:          [DMESG-WARN][5] ([fdo#107724]) -> [PASS][6] +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u3/igt@gem_exec_suspend@basic-s4-devices.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u3/igt@gem_exec_suspend@basic-s4-devices.html

  * {igt@i915_selftest@live_gt_heartbeat}:
    - {fi-icl-dsi}:       [DMESG-FAIL][7] ([fdo#112096]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-dsi/igt@i915_selftest@live_gt_heartbeat.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-dsi/igt@i915_selftest@live_gt_heartbeat.html
    - fi-cml-u2:          [DMESG-FAIL][9] ([fdo#112096]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-cml-u2/igt@i915_selftest@live_gt_heartbeat.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-cml-u2/igt@i915_selftest@live_gt_heartbeat.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#109635 ]: https://bugs.freedesktop.org/show_bug.cgi?id=109635 
  [fdo#110387]: https://bugs.freedesktop.org/show_bug.cgi?id=110387
  [fdo#111747]: https://bugs.freedesktop.org/show_bug.cgi?id=111747
  [fdo#112096]: https://bugs.freedesktop.org/show_bug.cgi?id=112096


Participating hosts (49 -> 43)
------------------------------

  Additional (1): fi-pnv-d510 
  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7187 -> Patchwork_14988

  CI-20190529: 20190529
  CI_DRM_7187: 9df5aeba240a65ea80008020d3027484bc6055b3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5241: 17b87c378fa155390b13a43f141371fd899d567b @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14988: 8dc38aae2b531b03b88737c35d9247eaaad0f3e9 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8dc38aae2b53 drm/i915/selftests: add sanity selftest for huge-GTT-pages
4e3c1e4fdbfb drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
233d44730248 drm/i915/selftests: extend coverage to include LMEM huge-pages
4adec5f378e6 drm/i915/selftests: add write-dword test for LMEM
f782ab26b95f drm/i915/lmem: support kernel mapping
a81081abe8ac drm/i915: setup io-mapping for LMEM
f5c5fa5a5f44 drm/i915: support creating LMEM objects

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-25 20:32   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-25 20:32 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7187 -> Patchwork_14988
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html

New tests
---------

  New tests have been introduced between CI_DRM_7187 and Patchwork_14988:

### New IGT tests (1) ###

  * igt@i915_selftest@live_memory_region:
    - Statuses : 42 pass(s)
    - Exec time: [0.41, 1.39] s

  

Known issues
------------

  Here are the changes found in Patchwork_14988 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_mmap@basic:
    - fi-icl-u3:          [PASS][1] -> [DMESG-WARN][2] ([fdo#107724])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u3/igt@gem_mmap@basic.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u3/igt@gem_mmap@basic.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-icl-u2:          [PASS][3] -> [FAIL][4] ([fdo#109635 ] / [fdo#110387])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u2/igt@kms_chamelium@dp-crc-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u2/igt@kms_chamelium@dp-crc-fast.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-icl-u3:          [DMESG-WARN][5] ([fdo#107724]) -> [PASS][6] +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-u3/igt@gem_exec_suspend@basic-s4-devices.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-u3/igt@gem_exec_suspend@basic-s4-devices.html

  * {igt@i915_selftest@live_gt_heartbeat}:
    - {fi-icl-dsi}:       [DMESG-FAIL][7] ([fdo#112096]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-icl-dsi/igt@i915_selftest@live_gt_heartbeat.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-icl-dsi/igt@i915_selftest@live_gt_heartbeat.html
    - fi-cml-u2:          [DMESG-FAIL][9] ([fdo#112096]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/fi-cml-u2/igt@i915_selftest@live_gt_heartbeat.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/fi-cml-u2/igt@i915_selftest@live_gt_heartbeat.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#109635 ]: https://bugs.freedesktop.org/show_bug.cgi?id=109635 
  [fdo#110387]: https://bugs.freedesktop.org/show_bug.cgi?id=110387
  [fdo#111747]: https://bugs.freedesktop.org/show_bug.cgi?id=111747
  [fdo#112096]: https://bugs.freedesktop.org/show_bug.cgi?id=112096


Participating hosts (49 -> 43)
------------------------------

  Additional (1): fi-pnv-d510 
  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7187 -> Patchwork_14988

  CI-20190529: 20190529
  CI_DRM_7187: 9df5aeba240a65ea80008020d3027484bc6055b3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5241: 17b87c378fa155390b13a43f141371fd899d567b @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14988: 8dc38aae2b531b03b88737c35d9247eaaad0f3e9 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8dc38aae2b53 drm/i915/selftests: add sanity selftest for huge-GTT-pages
4e3c1e4fdbfb drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests
233d44730248 drm/i915/selftests: extend coverage to include LMEM huge-pages
4adec5f378e6 drm/i915/selftests: add write-dword test for LMEM
f782ab26b95f drm/i915/lmem: support kernel mapping
a81081abe8ac drm/i915: setup io-mapping for LMEM
f5c5fa5a5f44 drm/i915: support creating LMEM objects

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-27 11:26   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-27 11:26 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7187_full -> Patchwork_14988_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14988_full:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@perf_pmu@enable-race-vecs0:
    - {shard-tglb}:       [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb2/igt@perf_pmu@enable-race-vecs0.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb4/igt@perf_pmu@enable-race-vecs0.html

  
Known issues
------------

  Here are the changes found in Patchwork_14988_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@vcs1-dirty-switch:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276] / [fdo#112080])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-switch.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb3/igt@gem_ctx_isolation@vcs1-dirty-switch.html

  * igt@gem_exec_parallel@fds:
    - shard-hsw:          [PASS][5] -> [INCOMPLETE][6] ([fdo#103540])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw8/igt@gem_exec_parallel@fds.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw2/igt@gem_exec_parallel@fds.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#111325]) +5 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_exec_schedule@in-order-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb4/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@preempt-other-bsd2:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#109276]) +9 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@gem_exec_schedule@preempt-other-bsd2.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@gem_exec_schedule@preempt-other-bsd2.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-iclb:         [PASS][11] -> [TIMEOUT][12] ([fdo#112068 ])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb4/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb7/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-snb:          [PASS][13] -> [DMESG-WARN][14] ([fdo#111870])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@kms_color@pipe-a-ctm-0-75:
    - shard-skl:          [PASS][15] -> [DMESG-WARN][16] ([fdo#106107]) +1 similar issue
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl7/igt@kms_color@pipe-a-ctm-0-75.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl3/igt@kms_color@pipe-a-ctm-0-75.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [PASS][17] -> [FAIL][18] ([fdo#103167]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-kbl:          [PASS][19] -> [DMESG-WARN][20] ([fdo#108566])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-apl:          [PASS][21] -> [DMESG-WARN][22] ([fdo#108566]) +2 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl8/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_plane_cursor@pipe-c-viewport-size-128:
    - shard-iclb:         [PASS][23] -> [INCOMPLETE][24] ([fdo#107713])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_plane_cursor@pipe-c-viewport-size-128.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb1/igt@kms_plane_cursor@pipe-c-viewport-size-128.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [PASS][25] -> [FAIL][26] ([fdo#103166])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb5/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_plane_multiple@atomic-pipe-b-tiling-yf:
    - shard-skl:          [PASS][27] -> [DMESG-WARN][28] ([fdo#106885])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl8/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl1/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][29] -> [SKIP][30] ([fdo#109441]) +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb3/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@perf@short-reads:
    - shard-kbl:          [PASS][31] -> [TIMEOUT][32] ([fdo#103183])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl1/igt@perf@short-reads.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl2/igt@perf@short-reads.html

  * igt@perf_pmu@init-busy-vcs1:
    - shard-iclb:         [PASS][33] -> [SKIP][34] ([fdo#112080]) +7 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@perf_pmu@init-busy-vcs1.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@perf_pmu@init-busy-vcs1.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@vcs1-s3:
    - shard-iclb:         [SKIP][35] ([fdo#109276] / [fdo#112080]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@gem_ctx_isolation@vcs1-s3.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_ctx_isolation@vcs1-s3.html

  * igt@gem_ctx_shared@q-smoketest-blt:
    - {shard-tglb}:       [INCOMPLETE][37] ([fdo#111735]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb6/igt@gem_ctx_shared@q-smoketest-blt.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb3/igt@gem_ctx_shared@q-smoketest-blt.html

  * igt@gem_ctx_switch@vcs1:
    - shard-iclb:         [SKIP][39] ([fdo#112080]) -> [PASS][40] +13 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_ctx_switch@vcs1.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_ctx_switch@vcs1.html

  * igt@gem_exec_blt@dumb-buf-max:
    - shard-apl:          [INCOMPLETE][41] ([fdo#103927]) -> [PASS][42] +1 similar issue
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl4/igt@gem_exec_blt@dumb-buf-max.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl8/igt@gem_exec_blt@dumb-buf-max.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][43] ([fdo#109276]) -> [PASS][44] +15 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_exec_schedule@wide-bsd:
    - shard-iclb:         [SKIP][45] ([fdo#111325]) -> [PASS][46] +1 similar issue
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@gem_exec_schedule@wide-bsd.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@gem_exec_schedule@wide-bsd.html

  * igt@gem_mmap_gtt@hang:
    - shard-snb:          [INCOMPLETE][47] ([fdo#105411]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb1/igt@gem_mmap_gtt@hang.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb7/igt@gem_mmap_gtt@hang.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-hsw:          [FAIL][49] ([fdo#112037]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw8/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw8/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_userptr_blits@sync-unmap-after-close:
    - shard-hsw:          [DMESG-WARN][51] ([fdo#111870]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw2/igt@gem_userptr_blits@sync-unmap-after-close.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw4/igt@gem_userptr_blits@sync-unmap-after-close.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][53] ([fdo#111870]) -> [PASS][54] +2 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb1/igt@gem_userptr_blits@sync-unmap-cycles.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb5/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [DMESG-WARN][55] ([fdo#108566]) -> [PASS][56] +4 similar issues
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl4/igt@gem_workarounds@suspend-resume-context.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl3/igt@gem_workarounds@suspend-resume-context.html

  * igt@kms_color@pipe-a-ctm-0-5:
    - shard-skl:          [DMESG-WARN][57] ([fdo#106107]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl5/igt@kms_color@pipe-a-ctm-0-5.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl8/igt@kms_color@pipe-a-ctm-0-5.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-iclb:         [INCOMPLETE][59] ([fdo#107713]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb3/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb5/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_legacy@pipe-c-single-bo:
    - {shard-tglb}:       [INCOMPLETE][61] ([fdo#111747] / [fdo#112035 ]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb3/igt@kms_cursor_legacy@pipe-c-single-bo.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb1/igt@kms_cursor_legacy@pipe-c-single-bo.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff:
    - {shard-tglb}:       [FAIL][63] ([fdo#103167]) -> [PASS][64] +2 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb5/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html

  * igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary:
    - shard-iclb:         [FAIL][65] ([fdo#103167]) -> [PASS][66] +4 similar issues
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-kbl:          [DMESG-WARN][67] ([fdo#108566]) -> [PASS][68] +7 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl3/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-rte:
    - shard-iclb:         [FAIL][69] ([fdo#103167] / [fdo#110378]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html

  * igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt:
    - {shard-tglb}:       [INCOMPLETE][71] -> [PASS][72] +2 similar issues
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb8/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb3/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-skl:          [INCOMPLETE][73] ([fdo#104108]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl8/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl5/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [SKIP][75] ([fdo#109642] / [fdo#111068]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb4/igt@kms_psr2_su@page_flip.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][77] ([fdo#109441]) -> [PASS][78] +3 similar issues
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@kms_psr@suspend:
    - {shard-tglb}:       [INCOMPLETE][79] ([fdo#111832] / [fdo#111850]) -> [PASS][80] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb4/igt@kms_psr@suspend.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb6/igt@kms_psr@suspend.html

  * igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend:
    - {shard-tglb}:       [INCOMPLETE][81] ([fdo#111850]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb8/igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb7/igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         [SKIP][83] ([fdo#109276]) -> [FAIL][84] ([fdo#111330]) +2 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_mocs_settings@mocs-reset-bsd2.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103183]: https://bugs.freedesktop.org/show_bug.cgi?id=103183
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#106107]: https://bugs.freedesktop.org/show_bug.cgi?id=106107
  [fdo#106885]: https://bugs.freedesktop.org/show_bug.cgi?id=106885
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110378]: https://bugs.freedesktop.org/show_bug.cgi?id=110378
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111646]: https://bugs.freedesktop.org/show_bug.cgi?id=111646
  [fdo#111671]: https://bugs.freedesktop.org/show_bug.cgi?id=111671
  [fdo#111703]: https://bugs.freedesktop.org/show_bug.cgi?id=111703
  [fdo#111735]: https://bugs.freedesktop.org/show_bug.cgi?id=111735
  [fdo#111747]: https://bugs.freedesktop.org/show_bug.cgi?id=111747
  [fdo#111780 ]: https://bugs.freedesktop.org/show_bug.cgi?id=111780 
  [fdo#111781]: https://bugs.freedesktop.org/show_bug.cgi?id=111781
  [fdo#111832]: https://bugs.freedesktop.org/show_bug.cgi?id=111832
  [fdo#111850]: https://bugs.freedesktop.org/show_bug.cgi?id=111850
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#112032 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112032 
  [fdo#112035 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112035 
  [fdo#112037]: https://bugs.freedesktop.org/show_bug.cgi?id=112037
  [fdo#112068 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112068 
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7187 -> Patchwork_14988

  CI-20190529: 20190529
  CI_DRM_7187: 9df5aeba240a65ea80008020d3027484bc6055b3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5241: 17b87c378fa155390b13a43f141371fd899d567b @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14988: 8dc38aae2b531b03b88737c35d9247eaaad0f3e9 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [CI,1/7] drm/i915: support creating LMEM objects
@ 2019-10-27 11:26   ` Patchwork
  0 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-10-27 11:26 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,1/7] drm/i915: support creating LMEM objects
URL   : https://patchwork.freedesktop.org/series/68575/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7187_full -> Patchwork_14988_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14988_full:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@perf_pmu@enable-race-vecs0:
    - {shard-tglb}:       [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb2/igt@perf_pmu@enable-race-vecs0.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb4/igt@perf_pmu@enable-race-vecs0.html

  
Known issues
------------

  Here are the changes found in Patchwork_14988_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@vcs1-dirty-switch:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276] / [fdo#112080])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-switch.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb3/igt@gem_ctx_isolation@vcs1-dirty-switch.html

  * igt@gem_exec_parallel@fds:
    - shard-hsw:          [PASS][5] -> [INCOMPLETE][6] ([fdo#103540])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw8/igt@gem_exec_parallel@fds.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw2/igt@gem_exec_parallel@fds.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#111325]) +5 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_exec_schedule@in-order-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb4/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@preempt-other-bsd2:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#109276]) +9 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@gem_exec_schedule@preempt-other-bsd2.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@gem_exec_schedule@preempt-other-bsd2.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-iclb:         [PASS][11] -> [TIMEOUT][12] ([fdo#112068 ])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb4/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb7/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-snb:          [PASS][13] -> [DMESG-WARN][14] ([fdo#111870])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@kms_color@pipe-a-ctm-0-75:
    - shard-skl:          [PASS][15] -> [DMESG-WARN][16] ([fdo#106107]) +1 similar issue
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl7/igt@kms_color@pipe-a-ctm-0-75.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl3/igt@kms_color@pipe-a-ctm-0-75.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [PASS][17] -> [FAIL][18] ([fdo#103167]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-kbl:          [PASS][19] -> [DMESG-WARN][20] ([fdo#108566])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-apl:          [PASS][21] -> [DMESG-WARN][22] ([fdo#108566]) +2 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl8/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_plane_cursor@pipe-c-viewport-size-128:
    - shard-iclb:         [PASS][23] -> [INCOMPLETE][24] ([fdo#107713])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_plane_cursor@pipe-c-viewport-size-128.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb1/igt@kms_plane_cursor@pipe-c-viewport-size-128.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [PASS][25] -> [FAIL][26] ([fdo#103166])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb5/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_plane_multiple@atomic-pipe-b-tiling-yf:
    - shard-skl:          [PASS][27] -> [DMESG-WARN][28] ([fdo#106885])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl8/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl1/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][29] -> [SKIP][30] ([fdo#109441]) +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb3/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@perf@short-reads:
    - shard-kbl:          [PASS][31] -> [TIMEOUT][32] ([fdo#103183])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl1/igt@perf@short-reads.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl2/igt@perf@short-reads.html

  * igt@perf_pmu@init-busy-vcs1:
    - shard-iclb:         [PASS][33] -> [SKIP][34] ([fdo#112080]) +7 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@perf_pmu@init-busy-vcs1.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@perf_pmu@init-busy-vcs1.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@vcs1-s3:
    - shard-iclb:         [SKIP][35] ([fdo#109276] / [fdo#112080]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@gem_ctx_isolation@vcs1-s3.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_ctx_isolation@vcs1-s3.html

  * igt@gem_ctx_shared@q-smoketest-blt:
    - {shard-tglb}:       [INCOMPLETE][37] ([fdo#111735]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb6/igt@gem_ctx_shared@q-smoketest-blt.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb3/igt@gem_ctx_shared@q-smoketest-blt.html

  * igt@gem_ctx_switch@vcs1:
    - shard-iclb:         [SKIP][39] ([fdo#112080]) -> [PASS][40] +13 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_ctx_switch@vcs1.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_ctx_switch@vcs1.html

  * igt@gem_exec_blt@dumb-buf-max:
    - shard-apl:          [INCOMPLETE][41] ([fdo#103927]) -> [PASS][42] +1 similar issue
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl4/igt@gem_exec_blt@dumb-buf-max.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl8/igt@gem_exec_blt@dumb-buf-max.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][43] ([fdo#109276]) -> [PASS][44] +15 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb5/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_exec_schedule@wide-bsd:
    - shard-iclb:         [SKIP][45] ([fdo#111325]) -> [PASS][46] +1 similar issue
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@gem_exec_schedule@wide-bsd.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@gem_exec_schedule@wide-bsd.html

  * igt@gem_mmap_gtt@hang:
    - shard-snb:          [INCOMPLETE][47] ([fdo#105411]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb1/igt@gem_mmap_gtt@hang.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb7/igt@gem_mmap_gtt@hang.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-hsw:          [FAIL][49] ([fdo#112037]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw8/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw8/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_userptr_blits@sync-unmap-after-close:
    - shard-hsw:          [DMESG-WARN][51] ([fdo#111870]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-hsw2/igt@gem_userptr_blits@sync-unmap-after-close.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-hsw4/igt@gem_userptr_blits@sync-unmap-after-close.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][53] ([fdo#111870]) -> [PASS][54] +2 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-snb1/igt@gem_userptr_blits@sync-unmap-cycles.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-snb5/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [DMESG-WARN][55] ([fdo#108566]) -> [PASS][56] +4 similar issues
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-apl4/igt@gem_workarounds@suspend-resume-context.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-apl3/igt@gem_workarounds@suspend-resume-context.html

  * igt@kms_color@pipe-a-ctm-0-5:
    - shard-skl:          [DMESG-WARN][57] ([fdo#106107]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl5/igt@kms_color@pipe-a-ctm-0-5.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl8/igt@kms_color@pipe-a-ctm-0-5.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-iclb:         [INCOMPLETE][59] ([fdo#107713]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb3/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb5/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_legacy@pipe-c-single-bo:
    - {shard-tglb}:       [INCOMPLETE][61] ([fdo#111747] / [fdo#112035 ]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb3/igt@kms_cursor_legacy@pipe-c-single-bo.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb1/igt@kms_cursor_legacy@pipe-c-single-bo.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff:
    - {shard-tglb}:       [FAIL][63] ([fdo#103167]) -> [PASS][64] +2 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb5/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html

  * igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary:
    - shard-iclb:         [FAIL][65] ([fdo#103167]) -> [PASS][66] +4 similar issues
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb6/igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-indfb-scaledprimary.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-kbl:          [DMESG-WARN][67] ([fdo#108566]) -> [PASS][68] +7 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-kbl3/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-rte:
    - shard-iclb:         [FAIL][69] ([fdo#103167] / [fdo#110378]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb8/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html

  * igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt:
    - {shard-tglb}:       [INCOMPLETE][71] -> [PASS][72] +2 similar issues
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb8/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb3/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-skl:          [INCOMPLETE][73] ([fdo#104108]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-skl8/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-skl5/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [SKIP][75] ([fdo#109642] / [fdo#111068]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb4/igt@kms_psr2_su@page_flip.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][77] ([fdo#109441]) -> [PASS][78] +3 similar issues
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@kms_psr@suspend:
    - {shard-tglb}:       [INCOMPLETE][79] ([fdo#111832] / [fdo#111850]) -> [PASS][80] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb4/igt@kms_psr@suspend.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb6/igt@kms_psr@suspend.html

  * igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend:
    - {shard-tglb}:       [INCOMPLETE][81] ([fdo#111850]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-tglb8/igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-tglb7/igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         [SKIP][83] ([fdo#109276]) -> [FAIL][84] ([fdo#111330]) +2 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7187/shard-iclb8/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/shard-iclb2/igt@gem_mocs_settings@mocs-reset-bsd2.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103183]: https://bugs.freedesktop.org/show_bug.cgi?id=103183
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#106107]: https://bugs.freedesktop.org/show_bug.cgi?id=106107
  [fdo#106885]: https://bugs.freedesktop.org/show_bug.cgi?id=106885
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110378]: https://bugs.freedesktop.org/show_bug.cgi?id=110378
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111646]: https://bugs.freedesktop.org/show_bug.cgi?id=111646
  [fdo#111671]: https://bugs.freedesktop.org/show_bug.cgi?id=111671
  [fdo#111703]: https://bugs.freedesktop.org/show_bug.cgi?id=111703
  [fdo#111735]: https://bugs.freedesktop.org/show_bug.cgi?id=111735
  [fdo#111747]: https://bugs.freedesktop.org/show_bug.cgi?id=111747
  [fdo#111780 ]: https://bugs.freedesktop.org/show_bug.cgi?id=111780 
  [fdo#111781]: https://bugs.freedesktop.org/show_bug.cgi?id=111781
  [fdo#111832]: https://bugs.freedesktop.org/show_bug.cgi?id=111832
  [fdo#111850]: https://bugs.freedesktop.org/show_bug.cgi?id=111850
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#112032 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112032 
  [fdo#112035 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112035 
  [fdo#112037]: https://bugs.freedesktop.org/show_bug.cgi?id=112037
  [fdo#112068 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112068 
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7187 -> Patchwork_14988

  CI-20190529: 20190529
  CI_DRM_7187: 9df5aeba240a65ea80008020d3027484bc6055b3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5241: 17b87c378fa155390b13a43f141371fd899d567b @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14988: 8dc38aae2b531b03b88737c35d9247eaaad0f3e9 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14988/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [CI 4/7] drm/i915/selftests: add write-dword test for LMEM
  2019-10-24  9:19 [CI 1/7] " Chris Wilson
@ 2019-10-24  9:19 ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-10-24  9:19 UTC (permalink / raw)
  To: intel-gfx

From: Matthew Auld <matthew.auld@intel.com>

Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../drm/i915/selftests/intel_memory_region.c  | 166 ++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 292489371842..8e14ef3e1e32 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,9 +11,11 @@
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -256,6 +258,125 @@ static int igt_mock_contiguous(void *arg)
 	return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+			    struct i915_vma *vma,
+			    u32 dword,
+			    u32 value)
+{
+	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+			       vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+	unsigned long n;
+	int err;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		return err;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
+
+	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+		u32 __iomem *base;
+		u32 read_val;
+
+		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+		read_val = ioread32(base + dword);
+		io_mapping_unmap_atomic(base);
+		if (read_val != val) {
+			pr_err("n=%lu base[%u]=%u, val=%u\n",
+			       n, dword, read_val, val);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_pages(obj);
+	return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+			 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+	struct i915_gem_engines *engines;
+	struct i915_gem_engines_iter it;
+	struct intel_context *ce;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	unsigned int count;
+	struct i915_vma *vma;
+	int *order;
+	int i, n;
+	int err = 0;
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	n = 0;
+	count = 0;
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		count++;
+		if (!intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		n++;
+	}
+	i915_gem_context_unlock_engines(ctx);
+	if (!n)
+		return 0;
+
+	order = i915_random_order(count * count, &prng);
+	if (!order)
+		return -ENOMEM;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_free;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto out_free;
+
+	i = 0;
+	engines = i915_gem_context_lock_engines(ctx);
+	do {
+		u32 rng = prandom_u32_state(&prng);
+		u32 dword = offset_in_page(rng) / 4;
+
+		ce = engines->engines[order[i] % engines->num_engines];
+		i = (i + 1) % (count * count);
+		if (!ce || !intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		err = igt_gpu_write_dw(ce, vma, dword, rng);
+		if (err)
+			break;
+
+		err = igt_cpu_check(obj, dword, rng);
+		if (err)
+			break;
+	} while (!__igt_timeout(end_time, NULL));
+	i915_gem_context_unlock_engines(ctx);
+
+out_free:
+	kfree(order);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -277,6 +398,50 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_context *ctx;
+	struct drm_file *file;
+	I915_RND_STATE(prng);
+	u32 sz;
+	int err;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_file;
+	}
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+	obj = i915_gem_object_create_lmem(i915, sz, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_file;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	err = igt_gpu_write(ctx, obj);
+	if (err)
+		pr_err("igt_gpu_write failed(%d)\n", err);
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+out_file:
+	mock_file_free(i915, file);
+	return err;
+}
+
 static int igt_lmem_write_cpu(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -421,6 +586,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
 		SUBTEST(igt_lmem_write_cpu),
+		SUBTEST(igt_lmem_write_gpu),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.24.0.rc0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2019-10-27 11:26 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-25 15:37 [CI 1/7] drm/i915: support creating LMEM objects Chris Wilson
2019-10-25 15:37 ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 2/7] drm/i915: setup io-mapping for LMEM Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 3/7] drm/i915/lmem: support kernel mapping Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 4/7] drm/i915/selftests: add write-dword test for LMEM Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 5/7] drm/i915/selftests: extend coverage to include LMEM huge-pages Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 6/7] drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 15:37 ` [CI 7/7] drm/i915/selftests: add sanity selftest for huge-GTT-pages Chris Wilson
2019-10-25 15:37   ` [Intel-gfx] " Chris Wilson
2019-10-25 20:13 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/7] drm/i915: support creating LMEM objects Patchwork
2019-10-25 20:13   ` [Intel-gfx] " Patchwork
2019-10-25 20:16 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-10-25 20:16   ` [Intel-gfx] " Patchwork
2019-10-25 20:32 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-25 20:32   ` [Intel-gfx] " Patchwork
2019-10-27 11:26 ` ✓ Fi.CI.IGT: " Patchwork
2019-10-27 11:26   ` [Intel-gfx] " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2019-10-24  9:19 [CI 1/7] " Chris Wilson
2019-10-24  9:19 ` [CI 4/7] drm/i915/selftests: add write-dword test for LMEM Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.