All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
@ 2019-05-28 19:57 Matthew Auld
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Matthew Auld @ 2019-05-28 19:57 UTC (permalink / raw)
  To: intel-gfx

Some steps in gen6_alloc_va_range require the HW to be awake, so ideally
we should be grabbing the wakeref ourselves and not relying on the
caller already holding it for us.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7496cce0d798..9f7b136219dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1746,10 +1746,13 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
 	struct i915_page_table *pt;
+	intel_wakeref_t wakeref;
 	u64 from = start;
 	unsigned int pde;
 	bool flush = false;
 
+	wakeref = intel_runtime_pm_get(vm->i915);
+
 	gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
 		const unsigned int count = gen6_pte_count(start, length);
 
@@ -1775,12 +1778,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 
 	if (flush) {
 		mark_tlbs_dirty(&ppgtt->base);
-		gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+		gen6_ggtt_invalidate(vm->i915);
 	}
 
+	intel_runtime_pm_put(vm->i915, wakeref);
+
 	return 0;
 
 unwind_out:
+	intel_runtime_pm_put(vm->i915, wakeref);
 	gen6_ppgtt_clear_range(vm, from, start - from);
 	return -ENOMEM;
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v5 2/2] drm/i915: add in-kernel blitter client
  2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
@ 2019-05-28 19:57 ` Matthew Auld
  2019-05-28 20:45   ` Changqing Tang
                     ` (2 more replies)
  2019-05-28 20:44 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Patchwork
                   ` (3 subsequent siblings)
  4 siblings, 3 replies; 10+ messages in thread
From: Matthew Auld @ 2019-05-28 19:57 UTC (permalink / raw)
  To: intel-gfx; +Cc: CQ Tang

The plan is to use the blitter engine for async object clearing when
using local memory, but before we can move the worker to get_pages() we
have to first tame some more of our struct_mutex usage. With this in
mind we should be able to upstream the object clearing as some
selftests, which should serve as a guinea pig for the ongoing locking
rework and upcoming async get_pages() framework.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: CQ Tang <cq.tang@intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   2 +
 .../gpu/drm/i915/gem/i915_gem_client_blt.c    | 306 ++++++++++++++++++
 .../gpu/drm/i915/gem/i915_gem_client_blt.h    |  21 ++
 .../gpu/drm/i915/gem/i915_gem_object_blt.c    | 109 +++++++
 .../gpu/drm/i915/gem/i915_gem_object_blt.h    |  24 ++
 .../i915/gem/selftests/i915_gem_client_blt.c  | 127 ++++++++
 .../i915/gem/selftests/i915_gem_object_blt.c  | 111 +++++++
 drivers/gpu/drm/i915/gt/intel_gpu_commands.h  |   1 +
 .../drm/i915/selftests/i915_live_selftests.h  |   2 +
 9 files changed, 703 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
 create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
 create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 83588e9840f8..a7850bbffbe0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -90,6 +90,7 @@ obj-y += gem/
 gem-y += \
 	gem/i915_gem_busy.o \
 	gem/i915_gem_clflush.o \
+	gem/i915_gem_client_blt.o \
 	gem/i915_gem_context.o \
 	gem/i915_gem_dmabuf.o \
 	gem/i915_gem_domain.o \
@@ -97,6 +98,7 @@ gem-y += \
 	gem/i915_gem_fence.o \
 	gem/i915_gem_internal.o \
 	gem/i915_gem_object.o \
+	gem/i915_gem_object_blt.o \
 	gem/i915_gem_mman.o \
 	gem/i915_gem_pages.o \
 	gem/i915_gem_phys.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
new file mode 100644
index 000000000000..2d8cf29a5796
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#include "i915_gem_client_blt.h"
+
+#include "i915_gem_object_blt.h"
+#include "intel_drv.h"
+
+struct i915_sleeve {
+	struct i915_vma *vma;
+	struct drm_i915_gem_object *obj;
+	struct sg_table *pages;
+	struct i915_page_sizes page_sizes;
+};
+
+static int vma_set_pages(struct i915_vma *vma)
+{
+	struct i915_sleeve *sleeve = vma->private;
+
+	vma->pages = sleeve->pages;
+	vma->page_sizes = sleeve->page_sizes;
+
+	return 0;
+}
+
+static void vma_clear_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!vma->pages);
+	vma->pages = NULL;
+}
+
+static int vma_bind(struct i915_vma *vma,
+		    enum i915_cache_level cache_level,
+		    u32 flags)
+{
+	return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+}
+
+static void vma_unbind(struct i915_vma *vma)
+{
+	vma->vm->vma_ops.unbind_vma(vma);
+}
+
+static const struct i915_vma_ops proxy_vma_ops = {
+	.set_pages = vma_set_pages,
+	.clear_pages = vma_clear_pages,
+	.bind_vma = vma_bind,
+	.unbind_vma = vma_unbind,
+};
+
+static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
+					 struct drm_i915_gem_object *obj,
+					 struct sg_table *pages,
+					 struct i915_page_sizes *page_sizes)
+{
+	struct i915_sleeve *sleeve;
+	struct i915_vma *vma;
+	int err;
+
+	sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
+	if (!sleeve)
+		return ERR_PTR(-ENOMEM);
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_free;
+	}
+
+	vma->private = sleeve;
+	vma->ops = &proxy_vma_ops;
+
+	sleeve->vma = vma;
+	sleeve->obj = i915_gem_object_get(obj);
+	sleeve->pages = pages;
+	sleeve->page_sizes = *page_sizes;
+
+	return sleeve;
+
+err_free:
+	kfree(sleeve);
+	return ERR_PTR(err);
+}
+
+static void destroy_sleeve(struct i915_sleeve *sleeve)
+{
+	i915_gem_object_put(sleeve->obj);
+	kfree(sleeve);
+}
+
+struct clear_pages_work {
+	struct dma_fence dma;
+	struct dma_fence_cb cb;
+	struct i915_sw_fence wait;
+	struct work_struct work;
+	struct irq_work irq_work;
+	struct i915_sleeve *sleeve;
+	struct intel_context *ce;
+	u32 value;
+};
+
+static const char *clear_pages_work_driver_name(struct dma_fence *fence)
+{
+	return DRIVER_NAME;
+}
+
+static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
+{
+	return "clear";
+}
+
+static void clear_pages_work_release(struct dma_fence *fence)
+{
+	struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
+
+	destroy_sleeve(w->sleeve);
+
+	i915_sw_fence_fini(&w->wait);
+
+	BUILD_BUG_ON(offsetof(typeof(*w), dma));
+	dma_fence_free(&w->dma);
+}
+
+static const struct dma_fence_ops clear_pages_work_ops = {
+	.get_driver_name = clear_pages_work_driver_name,
+	.get_timeline_name = clear_pages_work_timeline_name,
+	.release = clear_pages_work_release,
+};
+
+static void clear_pages_signal_irq_worker(struct irq_work *work)
+{
+	struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
+
+	dma_fence_signal(&w->dma);
+	dma_fence_put(&w->dma);
+}
+
+static void clear_pages_dma_fence_cb(struct dma_fence *fence,
+				     struct dma_fence_cb *cb)
+{
+	struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
+
+	if (fence->error)
+		dma_fence_set_error(&w->dma, fence->error);
+
+	/*
+	 * Push the signalling of the fence into yet another worker to avoid
+	 * the nightmare locking around the fence spinlock.
+	 */
+	irq_work_queue(&w->irq_work);
+}
+
+static void clear_pages_worker(struct work_struct *work)
+{
+	struct clear_pages_work *w = container_of(work, typeof(*w), work);
+	struct drm_i915_private *i915 = w->ce->gem_context->i915;
+	struct drm_i915_gem_object *obj = w->sleeve->obj;
+	struct i915_vma *vma = w->sleeve->vma;
+	struct i915_request *rq;
+	int err = w->dma.error;
+
+	if (unlikely(err))
+		goto out_signal;
+
+	if (obj->cache_dirty) {
+		obj->write_domain = 0;
+		if (i915_gem_object_has_struct_page(obj))
+			drm_clflush_sg(w->sleeve->pages);
+		obj->cache_dirty = false;
+	}
+
+	/* XXX: we need to kill this */
+	mutex_lock(&i915->drm.struct_mutex);
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (unlikely(err))
+		goto out_unlock;
+
+	rq = i915_request_create(w->ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_unpin;
+	}
+
+	/* There's no way the fence has signalled */
+	if (dma_fence_add_callback(&rq->fence, &w->cb,
+				   clear_pages_dma_fence_cb))
+		GEM_BUG_ON(1);
+
+	if (w->ce->engine->emit_init_breadcrumb) {
+		err = w->ce->engine->emit_init_breadcrumb(rq);
+		if (unlikely(err))
+			goto out_request;
+	}
+
+	err = intel_emit_vma_fill_blt(rq, vma, w->value);
+	if (unlikely(err))
+		goto out_request;
+
+	/* XXX: more feverish nightmares await */
+	i915_vma_lock(vma);
+	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
+out_request:
+	if (unlikely(err)) {
+		i915_request_skip(rq, err);
+		err = 0;
+	}
+
+	i915_request_add(rq);
+out_unpin:
+	i915_vma_unpin(vma);
+out_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+out_signal:
+	if (unlikely(err)) {
+		dma_fence_set_error(&w->dma, err);
+		dma_fence_signal(&w->dma);
+		dma_fence_put(&w->dma);
+	}
+}
+
+static int __i915_sw_fence_call
+clear_pages_work_notify(struct i915_sw_fence *fence,
+			enum i915_sw_fence_notify state)
+{
+	struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		schedule_work(&w->work);
+		break;
+
+	case FENCE_FREE:
+		dma_fence_put(&w->dma);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static DEFINE_SPINLOCK(fence_lock);
+
+/* XXX: better name please */
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+				     struct intel_context *ce,
+				     struct sg_table *pages,
+				     struct i915_page_sizes *page_sizes,
+				     u32 value)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_gem_context *ctx = ce->gem_context;
+	struct i915_address_space *vm;
+	struct clear_pages_work *work;
+	struct i915_sleeve *sleeve;
+	int err;
+
+	vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+
+	sleeve = create_sleeve(vm, obj, pages, page_sizes);
+	if (IS_ERR(sleeve))
+		return PTR_ERR(sleeve);
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (!work) {
+		destroy_sleeve(sleeve);
+		return -ENOMEM;
+	}
+
+	work->value = value;
+	work->sleeve = sleeve;
+	work->ce = ce;
+
+	INIT_WORK(&work->work, clear_pages_worker);
+
+	init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
+
+	dma_fence_init(&work->dma,
+		       &clear_pages_work_ops,
+		       &fence_lock,
+		       i915->mm.unordered_timeline,
+		       0);
+	i915_sw_fence_init(&work->wait, clear_pages_work_notify);
+
+	i915_gem_object_lock(obj);
+	err = i915_sw_fence_await_reservation(&work->wait,
+					      obj->resv, NULL,
+					      true, I915_FENCE_TIMEOUT,
+					      I915_FENCE_GFP);
+	if (err < 0) {
+		dma_fence_set_error(&work->dma, err);
+	} else {
+		reservation_object_add_excl_fence(obj->resv, &work->dma);
+		err = 0;
+	}
+	i915_gem_object_unlock(obj);
+
+	dma_fence_get(&work->dma);
+	i915_sw_fence_commit(&work->wait);
+
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_client_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
new file mode 100644
index 000000000000..3dbd28c22ff5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#ifndef __I915_GEM_CLIENT_BLT_H__
+#define __I915_GEM_CLIENT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct i915_page_sizes;
+struct intel_context;
+struct sg_table;
+
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+				     struct intel_context *ce,
+				     struct sg_table *pages,
+				     struct i915_page_sizes *page_sizes,
+				     u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
new file mode 100644
index 000000000000..84324b755de6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_gem_object_blt.h"
+
+#include "i915_gem_clflush.h"
+#include "intel_drv.h"
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+			    struct i915_vma *vma,
+			    u32 value)
+{
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 8);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	if (INTEL_GEN(rq->i915) >= 8) {
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7-2);
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+		*cs++ = 0;
+		*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+		*cs++ = lower_32_bits(vma->node.start);
+		*cs++ = upper_32_bits(vma->node.start);
+		*cs++ = value;
+		*cs++ = MI_NOOP;
+	} else {
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6-2);
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+		*cs++ = 0;
+		*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+		*cs++ = vma->node.start;
+		*cs++ = value;
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+	}
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+			     struct intel_context *ce,
+			     u32 value)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_gem_context *ctx = ce->gem_context;
+	struct i915_address_space *vm;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	int err;
+
+	/* XXX: ce->vm please */
+	vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (unlikely(err))
+		return err;
+
+	if (obj->cache_dirty & ~obj->cache_coherent) {
+		i915_gem_object_lock(obj);
+		i915_gem_clflush_object(obj, 0);
+		i915_gem_object_unlock(obj);
+	}
+
+	rq = i915_request_create(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_unpin;
+	}
+
+	err = i915_request_await_object(rq, obj, true);
+	if (unlikely(err))
+		goto out_request;
+
+	if (ce->engine->emit_init_breadcrumb) {
+		err = ce->engine->emit_init_breadcrumb(rq);
+		if (unlikely(err))
+			goto out_request;
+	}
+
+	err = intel_emit_vma_fill_blt(rq, vma, value);
+	if (unlikely(err))
+		goto out_request;
+
+	i915_vma_lock(vma);
+	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
+out_request:
+	if (unlikely(err))
+		i915_request_skip(rq, err);
+
+	i915_request_add(rq);
+out_unpin:
+	i915_vma_unpin(vma);
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_object_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
new file mode 100644
index 000000000000..7ec7de6ac0c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_BLT_H__
+#define __I915_GEM_OBJECT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct intel_context;
+struct i915_request;
+struct i915_vma;
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+			    struct i915_vma *vma,
+			    u32 value);
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+			     struct intel_context *ce,
+			     u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
new file mode 100644
index 000000000000..b650d8656d92
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_client_fill(void *arg)
+{
+	struct intel_context *ce = arg;
+	struct drm_i915_private *i915 = ce->gem_context->i915;
+	struct drm_i915_gem_object *obj;
+	struct rnd_state prng;
+	IGT_TIMEOUT(end);
+	u32 *vaddr;
+	int err = 0;
+
+	prandom_seed_state(&prng, i915_selftest.random_seed);
+
+	do {
+		u32 sz = prandom_u32_state(&prng) % SZ_32M;
+		u32 val = prandom_u32_state(&prng);
+		u32 i;
+
+		sz = round_up(sz, PAGE_SIZE);
+
+		pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+		obj = i915_gem_object_create_internal(i915, sz);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto err_flush;
+		}
+
+		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(vaddr)) {
+			err = PTR_ERR(vaddr);
+			goto err_put;
+		}
+
+		/*
+		 * XXX: The goal is move this to get_pages, so try to dirty the
+		 * CPU cache first to check that we do the required clflush
+		 * before scheduling the blt for !llc platforms. This matches
+		 * some version of reality where at get_pages the pages
+		 * themselves may not yet be coherent with the GPU(swap-in). If
+		 * we are missing the flush then we should see the stale cache
+		 * values after we do the set_to_cpu_domain and pick it up as a
+		 * test failure.
+		 */
+		memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+			obj->cache_dirty = true;
+
+		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
+						       &obj->mm.page_sizes,
+						       val);
+		if (err)
+			goto err_unpin;
+
+		/*
+		 * XXX: For now do the wait without the object resv lock to
+		 * ensure we don't deadlock.
+		 */
+		err = i915_gem_object_wait(obj,
+					   I915_WAIT_INTERRUPTIBLE |
+					   I915_WAIT_ALL,
+					   MAX_SCHEDULE_TIMEOUT);
+		if (err)
+			goto err_unpin;
+
+		i915_gem_object_lock(obj);
+		err = i915_gem_object_set_to_cpu_domain(obj, false);
+		i915_gem_object_unlock(obj);
+		if (err)
+			goto err_unpin;
+
+		for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+			if (vaddr[i] != val) {
+				pr_err("vaddr[%u]=%x, expected=%x\n", i,
+				       vaddr[i], val);
+				err = -EINVAL;
+				goto err_unpin;
+			}
+		}
+
+		i915_gem_object_unpin_map(obj);
+		i915_gem_object_put(obj);
+	} while (!time_after(jiffies, end));
+
+	goto err_flush;
+
+err_unpin:
+	i915_gem_object_unpin_map(obj);
+err_put:
+	i915_gem_object_put(obj);
+err_flush:
+	mutex_lock(&i915->drm.struct_mutex);
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
+int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_client_fill),
+	};
+
+	if (i915_terminally_wedged(i915))
+		return 0;
+
+	if (!HAS_ENGINE(i915, BCS0))
+		return 0;
+
+	return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
new file mode 100644
index 000000000000..717521c8eb0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_fill_blt(void *arg)
+{
+	struct intel_context *ce = arg;
+	struct drm_i915_private *i915 = ce->gem_context->i915;
+	struct drm_i915_gem_object *obj;
+	struct rnd_state prng;
+	IGT_TIMEOUT(end);
+	u32 *vaddr;
+	int err = 0;
+
+	prandom_seed_state(&prng, i915_selftest.random_seed);
+
+	do {
+		u32 sz = prandom_u32_state(&prng) % SZ_32M;
+		u32 val = prandom_u32_state(&prng);
+		u32 i;
+
+		sz = round_up(sz, PAGE_SIZE);
+
+		pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+		obj = i915_gem_object_create_internal(i915, sz);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(vaddr);
+			goto err_flush;
+		}
+
+		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(vaddr)) {
+			err = PTR_ERR(vaddr);
+			goto err_put;
+		}
+
+		/*
+		 * Make sure the potentially async clflush does its job, if
+		 * required.
+		 */
+		memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+			obj->cache_dirty = true;
+
+
+		mutex_lock(&i915->drm.struct_mutex);
+		err = i915_gem_object_fill_blt(obj, ce, val);
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (err)
+			goto err_unpin;
+
+		i915_gem_object_lock(obj);
+		err = i915_gem_object_set_to_cpu_domain(obj, false);
+		i915_gem_object_unlock(obj);
+		if (err)
+			goto err_unpin;
+
+		for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+			if (vaddr[i] != val) {
+				pr_err("vaddr[%u]=%x, expected=%x\n", i,
+				       vaddr[i], val);
+				err = -EINVAL;
+				goto err_unpin;
+			}
+		}
+
+		i915_gem_object_unpin_map(obj);
+		i915_gem_object_put(obj);
+	} while (!time_after(jiffies, end));
+
+	goto err_flush;
+
+err_unpin:
+	i915_gem_object_unpin_map(obj);
+err_put:
+	i915_gem_object_put(obj);
+err_flush:
+	mutex_lock(&i915->drm.struct_mutex);
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	if (err == -ENOMEM)
+		err = 0;
+
+	return err;
+}
+
+int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_fill_blt),
+	};
+
+	if (i915_terminally_wedged(i915))
+		return 0;
+
+	if (!HAS_ENGINE(i915, BCS0))
+		return 0;
+
+	return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index a34ece53a771..7e95827b0726 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -180,6 +180,7 @@
 #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
 
 #define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-2))
+#define XY_COLOR_BLT_CMD		(2<<29 | 0x50<<22)
 #define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
 #define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
 #define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 9bda36a598b3..d5dc4427d664 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -25,6 +25,8 @@ selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
+selftest(blt, i915_gem_object_blt_live_selftests)
+selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
  2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
@ 2019-05-28 20:44 ` Patchwork
  2019-05-28 20:46 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-05-28 20:44 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
URL   : https://patchwork.freedesktop.org/series/61275/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
1cddd0a982f8 drm/i915/gtt: grab wakeref in gen6_alloc_va_range
2dea8c8a1c42 drm/i915: add in-kernel blitter client
-:37: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#37: 
new file mode 100644

-:402: CHECK:SPACING: spaces preferred around that '-' (ctx:VxV)
#402: FILE: drivers/gpu/drm/i915/gem/i915_gem_object_blt.c:22:
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7-2);
 		                                              ^

-:411: CHECK:SPACING: spaces preferred around that '-' (ctx:VxV)
#411: FILE: drivers/gpu/drm/i915/gem/i915_gem_object_blt.c:31:
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6-2);
 		                                              ^

-:543: WARNING:LINE_SPACING: Missing a blank line after declarations
#543: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c:18:
+	struct rnd_state prng;
+	IGT_TIMEOUT(end);

-:676: WARNING:LINE_SPACING: Missing a blank line after declarations
#676: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c:18:
+	struct rnd_state prng;
+	IGT_TIMEOUT(end);

-:712: CHECK:LINE_SPACING: Please don't use multiple blank lines
#712: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c:54:
+
+

-:778: CHECK:SPACING: spaces preferred around that '<<' (ctx:VxV)
#778: FILE: drivers/gpu/drm/i915/gt/intel_gpu_commands.h:183:
+#define XY_COLOR_BLT_CMD		(2<<29 | 0x50<<22)
                         		  ^

-:778: CHECK:SPACING: spaces preferred around that '<<' (ctx:VxV)
#778: FILE: drivers/gpu/drm/i915/gt/intel_gpu_commands.h:183:
+#define XY_COLOR_BLT_CMD		(2<<29 | 0x50<<22)
                         		             ^

total: 0 errors, 3 warnings, 5 checks, 727 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v5 2/2] drm/i915: add in-kernel blitter client
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
@ 2019-05-28 20:45   ` Changqing Tang
  2019-05-28 20:52   ` Chris Wilson
  2019-05-29  9:26   ` kbuild test robot
  2 siblings, 0 replies; 10+ messages in thread
From: Changqing Tang @ 2019-05-28 20:45 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx

On Tue, 2019-05-28 at 20:57 +0100, Matthew Auld wrote:
> The plan is to use the blitter engine for async object clearing when
> using local memory, but before we can move the worker to get_pages()
> we
> have to first tame some more of our struct_mutex usage. With this in
> mind we should be able to upstream the object clearing as some
> selftests, which should serve as a guinea pig for the ongoing locking
> rework and upcoming async get_pages() framework.

Matt,
  I looked this patch, and am thinking if we can make schedule/worker
interface to be more generic so that we can do various async tasks,
such as clearing object pages, swapping object pages, migrating object
pages, etc. Or even get_pages() itself can be running asynchronizely.
  Thoughts?

--CQ

> 
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: CQ Tang <cq.tang@intel.com>
> ---
>  drivers/gpu/drm/i915/Makefile                 |   2 +
>  .../gpu/drm/i915/gem/i915_gem_client_blt.c    | 306
> ++++++++++++++++++
>  .../gpu/drm/i915/gem/i915_gem_client_blt.h    |  21 ++
>  .../gpu/drm/i915/gem/i915_gem_object_blt.c    | 109 +++++++
>  .../gpu/drm/i915/gem/i915_gem_object_blt.h    |  24 ++
>  .../i915/gem/selftests/i915_gem_client_blt.c  | 127 ++++++++
>  .../i915/gem/selftests/i915_gem_object_blt.c  | 111 +++++++
>  drivers/gpu/drm/i915/gt/intel_gpu_commands.h  |   1 +
>  .../drm/i915/selftests/i915_live_selftests.h  |   2 +
>  9 files changed, 703 insertions(+)
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
>  create mode 100644
> drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
>  create mode 100644
> drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> 
> diff --git a/drivers/gpu/drm/i915/Makefile
> b/drivers/gpu/drm/i915/Makefile
> index 83588e9840f8..a7850bbffbe0 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -90,6 +90,7 @@ obj-y += gem/
>  gem-y += \
>  	gem/i915_gem_busy.o \
>  	gem/i915_gem_clflush.o \
> +	gem/i915_gem_client_blt.o \
>  	gem/i915_gem_context.o \
>  	gem/i915_gem_dmabuf.o \
>  	gem/i915_gem_domain.o \
> @@ -97,6 +98,7 @@ gem-y += \
>  	gem/i915_gem_fence.o \
>  	gem/i915_gem_internal.o \
>  	gem/i915_gem_object.o \
> +	gem/i915_gem_object_blt.o \
>  	gem/i915_gem_mman.o \
>  	gem/i915_gem_pages.o \
>  	gem/i915_gem_phys.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> new file mode 100644
> index 000000000000..2d8cf29a5796
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> @@ -0,0 +1,306 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +#include "i915_gem_client_blt.h"
> +
> +#include "i915_gem_object_blt.h"
> +#include "intel_drv.h"
> +
> +struct i915_sleeve {
> +	struct i915_vma *vma;
> +	struct drm_i915_gem_object *obj;
> +	struct sg_table *pages;
> +	struct i915_page_sizes page_sizes;
> +};
> +
> +static int vma_set_pages(struct i915_vma *vma)
> +{
> +	struct i915_sleeve *sleeve = vma->private;
> +
> +	vma->pages = sleeve->pages;
> +	vma->page_sizes = sleeve->page_sizes;
> +
> +	return 0;
> +}
> +
> +static void vma_clear_pages(struct i915_vma *vma)
> +{
> +	GEM_BUG_ON(!vma->pages);
> +	vma->pages = NULL;
> +}
> +
> +static int vma_bind(struct i915_vma *vma,
> +		    enum i915_cache_level cache_level,
> +		    u32 flags)
> +{
> +	return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
> +}
> +
> +static void vma_unbind(struct i915_vma *vma)
> +{
> +	vma->vm->vma_ops.unbind_vma(vma);
> +}
> +
> +static const struct i915_vma_ops proxy_vma_ops = {
> +	.set_pages = vma_set_pages,
> +	.clear_pages = vma_clear_pages,
> +	.bind_vma = vma_bind,
> +	.unbind_vma = vma_unbind,
> +};
> +
> +static struct i915_sleeve *create_sleeve(struct i915_address_space
> *vm,
> +					 struct drm_i915_gem_object
> *obj,
> +					 struct sg_table *pages,
> +					 struct i915_page_sizes
> *page_sizes)
> +{
> +	struct i915_sleeve *sleeve;
> +	struct i915_vma *vma;
> +	int err;
> +
> +	sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
> +	if (!sleeve)
> +		return ERR_PTR(-ENOMEM);
> +
> +	vma = i915_vma_instance(obj, vm, NULL);
> +	if (IS_ERR(vma)) {
> +		err = PTR_ERR(vma);
> +		goto err_free;
> +	}
> +
> +	vma->private = sleeve;
> +	vma->ops = &proxy_vma_ops;
> +
> +	sleeve->vma = vma;
> +	sleeve->obj = i915_gem_object_get(obj);
> +	sleeve->pages = pages;
> +	sleeve->page_sizes = *page_sizes;
> +
> +	return sleeve;
> +
> +err_free:
> +	kfree(sleeve);
> +	return ERR_PTR(err);
> +}
> +
> +static void destroy_sleeve(struct i915_sleeve *sleeve)
> +{
> +	i915_gem_object_put(sleeve->obj);
> +	kfree(sleeve);
> +}
> +
> +struct clear_pages_work {
> +	struct dma_fence dma;
> +	struct dma_fence_cb cb;
> +	struct i915_sw_fence wait;
> +	struct work_struct work;
> +	struct irq_work irq_work;
> +	struct i915_sleeve *sleeve;
> +	struct intel_context *ce;
> +	u32 value;
> +};
> +
> +static const char *clear_pages_work_driver_name(struct dma_fence
> *fence)
> +{
> +	return DRIVER_NAME;
> +}
> +
> +static const char *clear_pages_work_timeline_name(struct dma_fence
> *fence)
> +{
> +	return "clear";
> +}
> +
> +static void clear_pages_work_release(struct dma_fence *fence)
> +{
> +	struct clear_pages_work *w = container_of(fence, typeof(*w),
> dma);
> +
> +	destroy_sleeve(w->sleeve);
> +
> +	i915_sw_fence_fini(&w->wait);
> +
> +	BUILD_BUG_ON(offsetof(typeof(*w), dma));
> +	dma_fence_free(&w->dma);
> +}
> +
> +static const struct dma_fence_ops clear_pages_work_ops = {
> +	.get_driver_name = clear_pages_work_driver_name,
> +	.get_timeline_name = clear_pages_work_timeline_name,
> +	.release = clear_pages_work_release,
> +};
> +
> +static void clear_pages_signal_irq_worker(struct irq_work *work)
> +{
> +	struct clear_pages_work *w = container_of(work, typeof(*w),
> irq_work);
> +
> +	dma_fence_signal(&w->dma);
> +	dma_fence_put(&w->dma);
> +}
> +
> +static void clear_pages_dma_fence_cb(struct dma_fence *fence,
> +				     struct dma_fence_cb *cb)
> +{
> +	struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
> +
> +	if (fence->error)
> +		dma_fence_set_error(&w->dma, fence->error);
> +
> +	/*
> +	 * Push the signalling of the fence into yet another worker to
> avoid
> +	 * the nightmare locking around the fence spinlock.
> +	 */
> +	irq_work_queue(&w->irq_work);
> +}
> +
> +static void clear_pages_worker(struct work_struct *work)
> +{
> +	struct clear_pages_work *w = container_of(work, typeof(*w),
> work);
> +	struct drm_i915_private *i915 = w->ce->gem_context->i915;
> +	struct drm_i915_gem_object *obj = w->sleeve->obj;
> +	struct i915_vma *vma = w->sleeve->vma;
> +	struct i915_request *rq;
> +	int err = w->dma.error;
> +
> +	if (unlikely(err))
> +		goto out_signal;
> +
> +	if (obj->cache_dirty) {
> +		obj->write_domain = 0;
> +		if (i915_gem_object_has_struct_page(obj))
> +			drm_clflush_sg(w->sleeve->pages);
> +		obj->cache_dirty = false;
> +	}
> +
> +	/* XXX: we need to kill this */
> +	mutex_lock(&i915->drm.struct_mutex);
> +	err = i915_vma_pin(vma, 0, 0, PIN_USER);
> +	if (unlikely(err))
> +		goto out_unlock;
> +
> +	rq = i915_request_create(w->ce);
> +	if (IS_ERR(rq)) {
> +		err = PTR_ERR(rq);
> +		goto out_unpin;
> +	}
> +
> +	/* There's no way the fence has signalled */
> +	if (dma_fence_add_callback(&rq->fence, &w->cb,
> +				   clear_pages_dma_fence_cb))
> +		GEM_BUG_ON(1);
> +
> +	if (w->ce->engine->emit_init_breadcrumb) {
> +		err = w->ce->engine->emit_init_breadcrumb(rq);
> +		if (unlikely(err))
> +			goto out_request;
> +	}
> +
> +	err = intel_emit_vma_fill_blt(rq, vma, w->value);
> +	if (unlikely(err))
> +		goto out_request;
> +
> +	/* XXX: more feverish nightmares await */
> +	i915_vma_lock(vma);
> +	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> +	i915_vma_unlock(vma);
> +out_request:
> +	if (unlikely(err)) {
> +		i915_request_skip(rq, err);
> +		err = 0;
> +	}
> +
> +	i915_request_add(rq);
> +out_unpin:
> +	i915_vma_unpin(vma);
> +out_unlock:
> +	mutex_unlock(&i915->drm.struct_mutex);
> +out_signal:
> +	if (unlikely(err)) {
> +		dma_fence_set_error(&w->dma, err);
> +		dma_fence_signal(&w->dma);
> +		dma_fence_put(&w->dma);
> +	}
> +}
> +
> +static int __i915_sw_fence_call
> +clear_pages_work_notify(struct i915_sw_fence *fence,
> +			enum i915_sw_fence_notify state)
> +{
> +	struct clear_pages_work *w = container_of(fence, typeof(*w),
> wait);
> +
> +	switch (state) {
> +	case FENCE_COMPLETE:
> +		schedule_work(&w->work);
> +		break;
> +
> +	case FENCE_FREE:
> +		dma_fence_put(&w->dma);
> +		break;
> +	}
> +
> +	return NOTIFY_DONE;
> +}
> +
> +static DEFINE_SPINLOCK(fence_lock);
> +
> +/* XXX: better name please */
> +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object
> *obj,
> +				     struct intel_context *ce,
> +				     struct sg_table *pages,
> +				     struct i915_page_sizes
> *page_sizes,
> +				     u32 value)
> +{
> +	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	struct i915_gem_context *ctx = ce->gem_context;
> +	struct i915_address_space *vm;
> +	struct clear_pages_work *work;
> +	struct i915_sleeve *sleeve;
> +	int err;
> +
> +	vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
> +
> +	sleeve = create_sleeve(vm, obj, pages, page_sizes);
> +	if (IS_ERR(sleeve))
> +		return PTR_ERR(sleeve);
> +
> +	work = kmalloc(sizeof(*work), GFP_KERNEL);
> +	if (!work) {
> +		destroy_sleeve(sleeve);
> +		return -ENOMEM;
> +	}
> +
> +	work->value = value;
> +	work->sleeve = sleeve;
> +	work->ce = ce;
> +
> +	INIT_WORK(&work->work, clear_pages_worker);
> +
> +	init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
> +
> +	dma_fence_init(&work->dma,
> +		       &clear_pages_work_ops,
> +		       &fence_lock,
> +		       i915->mm.unordered_timeline,
> +		       0);
> +	i915_sw_fence_init(&work->wait, clear_pages_work_notify);
> +
> +	i915_gem_object_lock(obj);
> +	err = i915_sw_fence_await_reservation(&work->wait,
> +					      obj->resv, NULL,
> +					      true, I915_FENCE_TIMEOUT,
> +					      I915_FENCE_GFP);
> +	if (err < 0) {
> +		dma_fence_set_error(&work->dma, err);
> +	} else {
> +		reservation_object_add_excl_fence(obj->resv, &work-
> >dma);
> +		err = 0;
> +	}
> +	i915_gem_object_unlock(obj);
> +
> +	dma_fence_get(&work->dma);
> +	i915_sw_fence_commit(&work->wait);
> +
> +	return err;
> +}
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftests/i915_gem_client_blt.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
> b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
> new file mode 100644
> index 000000000000..3dbd28c22ff5
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
> @@ -0,0 +1,21 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +#ifndef __I915_GEM_CLIENT_BLT_H__
> +#define __I915_GEM_CLIENT_BLT_H__
> +
> +#include <linux/types.h>
> +
> +struct drm_i915_gem_object;
> +struct i915_page_sizes;
> +struct intel_context;
> +struct sg_table;
> +
> +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object
> *obj,
> +				     struct intel_context *ce,
> +				     struct sg_table *pages,
> +				     struct i915_page_sizes
> *page_sizes,
> +				     u32 value);
> +
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
> b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
> new file mode 100644
> index 000000000000..84324b755de6
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
> @@ -0,0 +1,109 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_gem_object_blt.h"
> +
> +#include "i915_gem_clflush.h"
> +#include "intel_drv.h"
> +
> +int intel_emit_vma_fill_blt(struct i915_request *rq,
> +			    struct i915_vma *vma,
> +			    u32 value)
> +{
> +	u32 *cs;
> +
> +	cs = intel_ring_begin(rq, 8);
> +	if (IS_ERR(cs))
> +		return PTR_ERR(cs);
> +
> +	if (INTEL_GEN(rq->i915) >= 8) {
> +		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7-2);
> +		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
> +		*cs++ = 0;
> +		*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
> +		*cs++ = lower_32_bits(vma->node.start);
> +		*cs++ = upper_32_bits(vma->node.start);
> +		*cs++ = value;
> +		*cs++ = MI_NOOP;
> +	} else {
> +		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6-2);
> +		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
> +		*cs++ = 0;
> +		*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
> +		*cs++ = vma->node.start;
> +		*cs++ = value;
> +		*cs++ = MI_NOOP;
> +		*cs++ = MI_NOOP;
> +	}
> +
> +	intel_ring_advance(rq, cs);
> +
> +	return 0;
> +}
> +
> +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
> +			     struct intel_context *ce,
> +			     u32 value)
> +{
> +	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	struct i915_gem_context *ctx = ce->gem_context;
> +	struct i915_address_space *vm;
> +	struct i915_request *rq;
> +	struct i915_vma *vma;
> +	int err;
> +
> +	/* XXX: ce->vm please */
> +	vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
> +
> +	vma = i915_vma_instance(obj, vm, NULL);
> +	if (IS_ERR(vma))
> +		return PTR_ERR(vma);
> +
> +	err = i915_vma_pin(vma, 0, 0, PIN_USER);
> +	if (unlikely(err))
> +		return err;
> +
> +	if (obj->cache_dirty & ~obj->cache_coherent) {
> +		i915_gem_object_lock(obj);
> +		i915_gem_clflush_object(obj, 0);
> +		i915_gem_object_unlock(obj);
> +	}
> +
> +	rq = i915_request_create(ce);
> +	if (IS_ERR(rq)) {
> +		err = PTR_ERR(rq);
> +		goto out_unpin;
> +	}
> +
> +	err = i915_request_await_object(rq, obj, true);
> +	if (unlikely(err))
> +		goto out_request;
> +
> +	if (ce->engine->emit_init_breadcrumb) {
> +		err = ce->engine->emit_init_breadcrumb(rq);
> +		if (unlikely(err))
> +			goto out_request;
> +	}
> +
> +	err = intel_emit_vma_fill_blt(rq, vma, value);
> +	if (unlikely(err))
> +		goto out_request;
> +
> +	i915_vma_lock(vma);
> +	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> +	i915_vma_unlock(vma);
> +out_request:
> +	if (unlikely(err))
> +		i915_request_skip(rq, err);
> +
> +	i915_request_add(rq);
> +out_unpin:
> +	i915_vma_unpin(vma);
> +	return err;
> +}
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftests/i915_gem_object_blt.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
> b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
> new file mode 100644
> index 000000000000..7ec7de6ac0c0
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __I915_GEM_OBJECT_BLT_H__
> +#define __I915_GEM_OBJECT_BLT_H__
> +
> +#include <linux/types.h>
> +
> +struct drm_i915_gem_object;
> +struct intel_context;
> +struct i915_request;
> +struct i915_vma;
> +
> +int intel_emit_vma_fill_blt(struct i915_request *rq,
> +			    struct i915_vma *vma,
> +			    u32 value);
> +
> +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
> +			     struct intel_context *ce,
> +			     u32 value);
> +
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
> b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
> new file mode 100644
> index 000000000000..b650d8656d92
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
> @@ -0,0 +1,127 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "../i915_selftest.h"
> +
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +#include "mock_context.h"
> +
> +static int igt_client_fill(void *arg)
> +{
> +	struct intel_context *ce = arg;
> +	struct drm_i915_private *i915 = ce->gem_context->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct rnd_state prng;
> +	IGT_TIMEOUT(end);
> +	u32 *vaddr;
> +	int err = 0;
> +
> +	prandom_seed_state(&prng, i915_selftest.random_seed);
> +
> +	do {
> +		u32 sz = prandom_u32_state(&prng) % SZ_32M;
> +		u32 val = prandom_u32_state(&prng);
> +		u32 i;
> +
> +		sz = round_up(sz, PAGE_SIZE);
> +
> +		pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
> +
> +		obj = i915_gem_object_create_internal(i915, sz);
> +		if (IS_ERR(obj)) {
> +			err = PTR_ERR(obj);
> +			goto err_flush;
> +		}
> +
> +		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> +		if (IS_ERR(vaddr)) {
> +			err = PTR_ERR(vaddr);
> +			goto err_put;
> +		}
> +
> +		/*
> +		 * XXX: The goal is move this to get_pages, so try to
> dirty the
> +		 * CPU cache first to check that we do the required
> clflush
> +		 * before scheduling the blt for !llc platforms. This
> matches
> +		 * some version of reality where at get_pages the pages
> +		 * themselves may not yet be coherent with the
> GPU(swap-in). If
> +		 * we are missing the flush then we should see the
> stale cache
> +		 * values after we do the set_to_cpu_domain and pick it
> up as a
> +		 * test failure.
> +		 */
> +		memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size /
> sizeof(u32));
> +
> +		if (!(obj->cache_coherent &
> I915_BO_CACHE_COHERENT_FOR_WRITE))
> +			obj->cache_dirty = true;
> +
> +		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj-
> >mm.pages,
> +						       &obj-
> >mm.page_sizes,
> +						       val);
> +		if (err)
> +			goto err_unpin;
> +
> +		/*
> +		 * XXX: For now do the wait without the object resv
> lock to
> +		 * ensure we don't deadlock.
> +		 */
> +		err = i915_gem_object_wait(obj,
> +					   I915_WAIT_INTERRUPTIBLE |
> +					   I915_WAIT_ALL,
> +					   MAX_SCHEDULE_TIMEOUT);
> +		if (err)
> +			goto err_unpin;
> +
> +		i915_gem_object_lock(obj);
> +		err = i915_gem_object_set_to_cpu_domain(obj, false);
> +		i915_gem_object_unlock(obj);
> +		if (err)
> +			goto err_unpin;
> +
> +		for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
> +			if (vaddr[i] != val) {
> +				pr_err("vaddr[%u]=%x, expected=%x\n",
> i,
> +				       vaddr[i], val);
> +				err = -EINVAL;
> +				goto err_unpin;
> +			}
> +		}
> +
> +		i915_gem_object_unpin_map(obj);
> +		i915_gem_object_put(obj);
> +	} while (!time_after(jiffies, end));
> +
> +	goto err_flush;
> +
> +err_unpin:
> +	i915_gem_object_unpin_map(obj);
> +err_put:
> +	i915_gem_object_put(obj);
> +err_flush:
> +	mutex_lock(&i915->drm.struct_mutex);
> +	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +		err = -EIO;
> +	mutex_unlock(&i915->drm.struct_mutex);
> +
> +	if (err == -ENOMEM)
> +		err = 0;
> +
> +	return err;
> +}
> +
> +int i915_gem_client_blt_live_selftests(struct drm_i915_private
> *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(igt_client_fill),
> +	};
> +
> +	if (i915_terminally_wedged(i915))
> +		return 0;
> +
> +	if (!HAS_ENGINE(i915, BCS0))
> +		return 0;
> +
> +	return i915_subtests(tests, i915->engine[BCS0]-
> >kernel_context);
> +}
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> new file mode 100644
> index 000000000000..717521c8eb0a
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> @@ -0,0 +1,111 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "../i915_selftest.h"
> +
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +#include "mock_context.h"
> +
> +static int igt_fill_blt(void *arg)
> +{
> +	struct intel_context *ce = arg;
> +	struct drm_i915_private *i915 = ce->gem_context->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct rnd_state prng;
> +	IGT_TIMEOUT(end);
> +	u32 *vaddr;
> +	int err = 0;
> +
> +	prandom_seed_state(&prng, i915_selftest.random_seed);
> +
> +	do {
> +		u32 sz = prandom_u32_state(&prng) % SZ_32M;
> +		u32 val = prandom_u32_state(&prng);
> +		u32 i;
> +
> +		sz = round_up(sz, PAGE_SIZE);
> +
> +		pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
> +
> +		obj = i915_gem_object_create_internal(i915, sz);
> +		if (IS_ERR(obj)) {
> +			err = PTR_ERR(vaddr);
> +			goto err_flush;
> +		}
> +
> +		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> +		if (IS_ERR(vaddr)) {
> +			err = PTR_ERR(vaddr);
> +			goto err_put;
> +		}
> +
> +		/*
> +		 * Make sure the potentially async clflush does its
> job, if
> +		 * required.
> +		 */
> +		memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size /
> sizeof(u32));
> +
> +		if (!(obj->cache_coherent &
> I915_BO_CACHE_COHERENT_FOR_WRITE))
> +			obj->cache_dirty = true;
> +
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		err = i915_gem_object_fill_blt(obj, ce, val);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +		if (err)
> +			goto err_unpin;
> +
> +		i915_gem_object_lock(obj);
> +		err = i915_gem_object_set_to_cpu_domain(obj, false);
> +		i915_gem_object_unlock(obj);
> +		if (err)
> +			goto err_unpin;
> +
> +		for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
> +			if (vaddr[i] != val) {
> +				pr_err("vaddr[%u]=%x, expected=%x\n",
> i,
> +				       vaddr[i], val);
> +				err = -EINVAL;
> +				goto err_unpin;
> +			}
> +		}
> +
> +		i915_gem_object_unpin_map(obj);
> +		i915_gem_object_put(obj);
> +	} while (!time_after(jiffies, end));
> +
> +	goto err_flush;
> +
> +err_unpin:
> +	i915_gem_object_unpin_map(obj);
> +err_put:
> +	i915_gem_object_put(obj);
> +err_flush:
> +	mutex_lock(&i915->drm.struct_mutex);
> +	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +		err = -EIO;
> +	mutex_unlock(&i915->drm.struct_mutex);
> +
> +	if (err == -ENOMEM)
> +		err = 0;
> +
> +	return err;
> +}
> +
> +int i915_gem_object_blt_live_selftests(struct drm_i915_private
> *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(igt_fill_blt),
> +	};
> +
> +	if (i915_terminally_wedged(i915))
> +		return 0;
> +
> +	if (!HAS_ENGINE(i915, BCS0))
> +		return 0;
> +
> +	return i915_subtests(tests, i915->engine[BCS0]-
> >kernel_context);
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> index a34ece53a771..7e95827b0726 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> @@ -180,6 +180,7 @@
>  #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
>  
>  #define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-
> 2))
> +#define XY_COLOR_BLT_CMD		(2<<29 | 0x50<<22)
>  #define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
>  #define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
>  #define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 9bda36a598b3..d5dc4427d664 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -25,6 +25,8 @@ selftest(gem, i915_gem_live_selftests)
>  selftest(evict, i915_gem_evict_live_selftests)
>  selftest(hugepages, i915_gem_huge_page_live_selftests)
>  selftest(contexts, i915_gem_context_live_selftests)
> +selftest(blt, i915_gem_object_blt_live_selftests)
> +selftest(client, i915_gem_client_blt_live_selftests)
>  selftest(reset, intel_reset_live_selftests)
>  selftest(hangcheck, intel_hangcheck_live_selftests)
>  selftest(execlists, intel_execlists_live_selftests)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
  2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
  2019-05-28 20:44 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Patchwork
@ 2019-05-28 20:46 ` Patchwork
  2019-05-28 21:00 ` ✓ Fi.CI.BAT: success " Patchwork
  2019-05-29  8:01 ` ✓ Fi.CI.IGT: " Patchwork
  4 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-05-28 20:46 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
URL   : https://patchwork.freedesktop.org/series/61275/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915/gtt: grab wakeref in gen6_alloc_va_range
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1753:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1753:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1756:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1756:9: warning: expression using sizeof(void)

Commit: drm/i915: add in-kernel blitter client
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:20: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/linux/reservation.h:220:45: warning: dereference of noderef expression
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v5 2/2] drm/i915: add in-kernel blitter client
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
  2019-05-28 20:45   ` Changqing Tang
@ 2019-05-28 20:52   ` Chris Wilson
  2019-05-28 22:32     ` Chris Wilson
  2019-05-29  9:26   ` kbuild test robot
  2 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2019-05-28 20:52 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx; +Cc: CQ Tang

Quoting Matthew Auld (2019-05-28 20:57:24)
> The plan is to use the blitter engine for async object clearing when
> using local memory, but before we can move the worker to get_pages() we
> have to first tame some more of our struct_mutex usage. With this in
> mind we should be able to upstream the object clearing as some
> selftests, which should serve as a guinea pig for the ongoing locking
> rework and upcoming async get_pages() framework.
> 
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: CQ Tang <cq.tang@intel.com>
> ---
>  drivers/gpu/drm/i915/Makefile                 |   2 +
>  .../gpu/drm/i915/gem/i915_gem_client_blt.c    | 306 ++++++++++++++++++
>  .../gpu/drm/i915/gem/i915_gem_client_blt.h    |  21 ++
>  .../gpu/drm/i915/gem/i915_gem_object_blt.c    | 109 +++++++
>  .../gpu/drm/i915/gem/i915_gem_object_blt.h    |  24 ++
>  .../i915/gem/selftests/i915_gem_client_blt.c  | 127 ++++++++
>  .../i915/gem/selftests/i915_gem_object_blt.c  | 111 +++++++
>  drivers/gpu/drm/i915/gt/intel_gpu_commands.h  |   1 +
>  .../drm/i915/selftests/i915_live_selftests.h  |   2 +
>  9 files changed, 703 insertions(+)
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
>  create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
>  create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index 83588e9840f8..a7850bbffbe0 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -90,6 +90,7 @@ obj-y += gem/
>  gem-y += \
>         gem/i915_gem_busy.o \
>         gem/i915_gem_clflush.o \
> +       gem/i915_gem_client_blt.o \
>         gem/i915_gem_context.o \
>         gem/i915_gem_dmabuf.o \
>         gem/i915_gem_domain.o \
> @@ -97,6 +98,7 @@ gem-y += \
>         gem/i915_gem_fence.o \
>         gem/i915_gem_internal.o \
>         gem/i915_gem_object.o \
> +       gem/i915_gem_object_blt.o \
>         gem/i915_gem_mman.o \
>         gem/i915_gem_pages.o \
>         gem/i915_gem_phys.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> new file mode 100644
> index 000000000000..2d8cf29a5796
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> @@ -0,0 +1,306 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +#include "i915_gem_client_blt.h"
> +
> +#include "i915_gem_object_blt.h"
> +#include "intel_drv.h"
> +
> +struct i915_sleeve {
> +       struct i915_vma *vma;
> +       struct drm_i915_gem_object *obj;
> +       struct sg_table *pages;
> +       struct i915_page_sizes page_sizes;
> +};
> +
> +static int vma_set_pages(struct i915_vma *vma)
> +{
> +       struct i915_sleeve *sleeve = vma->private;
> +
> +       vma->pages = sleeve->pages;
> +       vma->page_sizes = sleeve->page_sizes;
> +
> +       return 0;
> +}
> +
> +static void vma_clear_pages(struct i915_vma *vma)
> +{
> +       GEM_BUG_ON(!vma->pages);
> +       vma->pages = NULL;
> +}
> +
> +static int vma_bind(struct i915_vma *vma,
> +                   enum i915_cache_level cache_level,
> +                   u32 flags)
> +{
> +       return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
> +}
> +
> +static void vma_unbind(struct i915_vma *vma)
> +{
> +       vma->vm->vma_ops.unbind_vma(vma);
> +}
> +
> +static const struct i915_vma_ops proxy_vma_ops = {
> +       .set_pages = vma_set_pages,
> +       .clear_pages = vma_clear_pages,
> +       .bind_vma = vma_bind,
> +       .unbind_vma = vma_unbind,
> +};
> +
> +static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
> +                                        struct drm_i915_gem_object *obj,
> +                                        struct sg_table *pages,
> +                                        struct i915_page_sizes *page_sizes)
> +{
> +       struct i915_sleeve *sleeve;
> +       struct i915_vma *vma;
> +       int err;
> +
> +       sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
> +       if (!sleeve)
> +               return ERR_PTR(-ENOMEM);
> +
> +       vma = i915_vma_instance(obj, vm, NULL);
> +       if (IS_ERR(vma)) {
> +               err = PTR_ERR(vma);
> +               goto err_free;
> +       }
> +
> +       vma->private = sleeve;
> +       vma->ops = &proxy_vma_ops;
> +
> +       sleeve->vma = vma;
> +       sleeve->obj = i915_gem_object_get(obj);
> +       sleeve->pages = pages;
> +       sleeve->page_sizes = *page_sizes;
> +
> +       return sleeve;
> +
> +err_free:
> +       kfree(sleeve);
> +       return ERR_PTR(err);
> +}
> +
> +static void destroy_sleeve(struct i915_sleeve *sleeve)
> +{
> +       i915_gem_object_put(sleeve->obj);
> +       kfree(sleeve);
> +}
> +
> +struct clear_pages_work {
> +       struct dma_fence dma;
> +       struct dma_fence_cb cb;
> +       struct i915_sw_fence wait;
> +       struct work_struct work;
> +       struct irq_work irq_work;
> +       struct i915_sleeve *sleeve;
> +       struct intel_context *ce;
> +       u32 value;
> +};
> +
> +static const char *clear_pages_work_driver_name(struct dma_fence *fence)
> +{
> +       return DRIVER_NAME;
> +}
> +
> +static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
> +{
> +       return "clear";
> +}
> +
> +static void clear_pages_work_release(struct dma_fence *fence)
> +{
> +       struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
> +
> +       destroy_sleeve(w->sleeve);
> +
> +       i915_sw_fence_fini(&w->wait);
> +
> +       BUILD_BUG_ON(offsetof(typeof(*w), dma));
> +       dma_fence_free(&w->dma);
> +}
> +
> +static const struct dma_fence_ops clear_pages_work_ops = {
> +       .get_driver_name = clear_pages_work_driver_name,
> +       .get_timeline_name = clear_pages_work_timeline_name,
> +       .release = clear_pages_work_release,
> +};
> +
> +static void clear_pages_signal_irq_worker(struct irq_work *work)
> +{
> +       struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
> +
> +       dma_fence_signal(&w->dma);
> +       dma_fence_put(&w->dma);
> +}
> +
> +static void clear_pages_dma_fence_cb(struct dma_fence *fence,
> +                                    struct dma_fence_cb *cb)
> +{
> +       struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
> +
> +       if (fence->error)
> +               dma_fence_set_error(&w->dma, fence->error);
> +
> +       /*
> +        * Push the signalling of the fence into yet another worker to avoid
> +        * the nightmare locking around the fence spinlock.
> +        */
> +       irq_work_queue(&w->irq_work);
> +}
> +
> +static void clear_pages_worker(struct work_struct *work)
> +{
> +       struct clear_pages_work *w = container_of(work, typeof(*w), work);
> +       struct drm_i915_private *i915 = w->ce->gem_context->i915;
> +       struct drm_i915_gem_object *obj = w->sleeve->obj;
> +       struct i915_vma *vma = w->sleeve->vma;
> +       struct i915_request *rq;
> +       int err = w->dma.error;
> +
> +       if (unlikely(err))
> +               goto out_signal;
> +
> +       if (obj->cache_dirty) {
> +               obj->write_domain = 0;
> +               if (i915_gem_object_has_struct_page(obj))
> +                       drm_clflush_sg(w->sleeve->pages);
> +               obj->cache_dirty = false;
> +       }
> +
> +       /* XXX: we need to kill this */
> +       mutex_lock(&i915->drm.struct_mutex);
> +       err = i915_vma_pin(vma, 0, 0, PIN_USER);

Getting closer, day by day.

> +       if (unlikely(err))
> +               goto out_unlock;
> +
> +       rq = i915_request_create(w->ce);
> +       if (IS_ERR(rq)) {
> +               err = PTR_ERR(rq);
> +               goto out_unpin;
> +       }
> +
> +       /* There's no way the fence has signalled */
> +       if (dma_fence_add_callback(&rq->fence, &w->cb,
> +                                  clear_pages_dma_fence_cb))
> +               GEM_BUG_ON(1);
> +
> +       if (w->ce->engine->emit_init_breadcrumb) {
> +               err = w->ce->engine->emit_init_breadcrumb(rq);
> +               if (unlikely(err))
> +                       goto out_request;
> +       }
> +
> +       err = intel_emit_vma_fill_blt(rq, vma, w->value);
> +       if (unlikely(err))
> +               goto out_request;
> +
> +       /* XXX: more feverish nightmares await */
> +       i915_vma_lock(vma);
> +       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> +       i915_vma_unlock(vma);

Right. That is actually going to be quite tricky. Good to have it
spelled out. How well do we detect the lock inversion if you remove the
other XXX is a good question.

In general I think it is better to the move_to_active before emitting
the commands that may write into the vma -- but it doesn't matter in
this case because you use i915_request_skip, however it keeps on
triggering alarm bells :)

> +out_request:
> +       if (unlikely(err)) {
> +               i915_request_skip(rq, err);
> +               err = 0;
> +       }
> +
> +       i915_request_add(rq);
> +out_unpin:
> +       i915_vma_unpin(vma);
> +out_unlock:
> +       mutex_unlock(&i915->drm.struct_mutex);
> +out_signal:
> +       if (unlikely(err)) {
> +               dma_fence_set_error(&w->dma, err);
> +               dma_fence_signal(&w->dma);
> +               dma_fence_put(&w->dma);
> +       }
> +}
> +
> +static int __i915_sw_fence_call
> +clear_pages_work_notify(struct i915_sw_fence *fence,
> +                       enum i915_sw_fence_notify state)
> +{
> +       struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
> +
> +       switch (state) {
> +       case FENCE_COMPLETE:
> +               schedule_work(&w->work);
> +               break;
> +
> +       case FENCE_FREE:
> +               dma_fence_put(&w->dma);
> +               break;
> +       }
> +
> +       return NOTIFY_DONE;
> +}
> +
> +static DEFINE_SPINLOCK(fence_lock);
> +
> +/* XXX: better name please */
> +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
> +                                    struct intel_context *ce,
> +                                    struct sg_table *pages,
> +                                    struct i915_page_sizes *page_sizes,
> +                                    u32 value)
> +{
> +       struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +       struct i915_gem_context *ctx = ce->gem_context;
> +       struct i915_address_space *vm;
> +       struct clear_pages_work *work;
> +       struct i915_sleeve *sleeve;
> +       int err;
> +
> +       vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
> +
> +       sleeve = create_sleeve(vm, obj, pages, page_sizes);
> +       if (IS_ERR(sleeve))
> +               return PTR_ERR(sleeve);
> +
> +       work = kmalloc(sizeof(*work), GFP_KERNEL);
> +       if (!work) {
> +               destroy_sleeve(sleeve);
> +               return -ENOMEM;
> +       }
> +
> +       work->value = value;
> +       work->sleeve = sleeve;
> +       work->ce = ce;
> +
> +       INIT_WORK(&work->work, clear_pages_worker);
> +
> +       init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
> +
> +       dma_fence_init(&work->dma,
> +                      &clear_pages_work_ops,
> +                      &fence_lock,
> +                      i915->mm.unordered_timeline,
> +                      0);
> +       i915_sw_fence_init(&work->wait, clear_pages_work_notify);
> +
> +       i915_gem_object_lock(obj);
> +       err = i915_sw_fence_await_reservation(&work->wait,
> +                                             obj->resv, NULL,
> +                                             true, I915_FENCE_TIMEOUT,
> +                                             I915_FENCE_GFP);
> +       if (err < 0) {
> +               dma_fence_set_error(&work->dma, err);
> +       } else {
> +               reservation_object_add_excl_fence(obj->resv, &work->dma);
> +               err = 0;
> +       }
> +       i915_gem_object_unlock(obj);
> +
> +       dma_fence_get(&work->dma);
> +       i915_sw_fence_commit(&work->wait);
> +
> +       return err;
> +}
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftests/i915_gem_client_blt.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
> new file mode 100644
> index 000000000000..3dbd28c22ff5
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
> @@ -0,0 +1,21 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +#ifndef __I915_GEM_CLIENT_BLT_H__
> +#define __I915_GEM_CLIENT_BLT_H__
> +
> +#include <linux/types.h>
> +
> +struct drm_i915_gem_object;
> +struct i915_page_sizes;
> +struct intel_context;
> +struct sg_table;
> +
> +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
> +                                    struct intel_context *ce,
> +                                    struct sg_table *pages,
> +                                    struct i915_page_sizes *page_sizes,
> +                                    u32 value);
> +
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
> new file mode 100644
> index 000000000000..84324b755de6
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
> @@ -0,0 +1,109 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_gem_object_blt.h"
> +
> +#include "i915_gem_clflush.h"
> +#include "intel_drv.h"
> +
> +int intel_emit_vma_fill_blt(struct i915_request *rq,
> +                           struct i915_vma *vma,
> +                           u32 value)
> +{
> +       u32 *cs;
> +
> +       cs = intel_ring_begin(rq, 8);
> +       if (IS_ERR(cs))
> +               return PTR_ERR(cs);
> +
> +       if (INTEL_GEN(rq->i915) >= 8) {
> +               *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7-2);
> +               *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
> +               *cs++ = 0;
> +               *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
> +               *cs++ = lower_32_bits(vma->node.start);
> +               *cs++ = upper_32_bits(vma->node.start);
> +               *cs++ = value;
> +               *cs++ = MI_NOOP;
> +       } else {
> +               *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6-2);
> +               *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
> +               *cs++ = 0;
> +               *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
> +               *cs++ = vma->node.start;
> +               *cs++ = value;
> +               *cs++ = MI_NOOP;
> +               *cs++ = MI_NOOP;
> +       }
> +
> +       intel_ring_advance(rq, cs);
> +
> +       return 0;
> +}
> +
> +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
> +                            struct intel_context *ce,
> +                            u32 value)
> +{
> +       struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +       struct i915_gem_context *ctx = ce->gem_context;
> +       struct i915_address_space *vm;
> +       struct i915_request *rq;
> +       struct i915_vma *vma;
> +       int err;
> +
> +       /* XXX: ce->vm please */
> +       vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
> +
> +       vma = i915_vma_instance(obj, vm, NULL);
> +       if (IS_ERR(vma))
> +               return PTR_ERR(vma);
> +
> +       err = i915_vma_pin(vma, 0, 0, PIN_USER);
> +       if (unlikely(err))
> +               return err;
> +
> +       if (obj->cache_dirty & ~obj->cache_coherent) {
> +               i915_gem_object_lock(obj);
> +               i915_gem_clflush_object(obj, 0);
> +               i915_gem_object_unlock(obj);
> +       }
> +
> +       rq = i915_request_create(ce);
> +       if (IS_ERR(rq)) {
> +               err = PTR_ERR(rq);
> +               goto out_unpin;
> +       }
> +
> +       err = i915_request_await_object(rq, obj, true);
> +       if (unlikely(err))
> +               goto out_request;
> +
> +       if (ce->engine->emit_init_breadcrumb) {
> +               err = ce->engine->emit_init_breadcrumb(rq);
> +               if (unlikely(err))
> +                       goto out_request;
> +       }
> +
> +       err = intel_emit_vma_fill_blt(rq, vma, value);
> +       if (unlikely(err))
> +               goto out_request;
> +
> +       i915_vma_lock(vma);
> +       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> +       i915_vma_unlock(vma);
> +out_request:
> +       if (unlikely(err))
> +               i915_request_skip(rq, err);
> +
> +       i915_request_add(rq);
> +out_unpin:
> +       i915_vma_unpin(vma);
> +       return err;
> +}
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftests/i915_gem_object_blt.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
> new file mode 100644
> index 000000000000..7ec7de6ac0c0
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef __I915_GEM_OBJECT_BLT_H__
> +#define __I915_GEM_OBJECT_BLT_H__
> +
> +#include <linux/types.h>
> +
> +struct drm_i915_gem_object;
> +struct intel_context;
> +struct i915_request;
> +struct i915_vma;
> +
> +int intel_emit_vma_fill_blt(struct i915_request *rq,
> +                           struct i915_vma *vma,
> +                           u32 value);
> +
> +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
> +                            struct intel_context *ce,
> +                            u32 value);
> +
> +#endif
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
> new file mode 100644
> index 000000000000..b650d8656d92
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
> @@ -0,0 +1,127 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "../i915_selftest.h"
> +
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +#include "mock_context.h"
> +
> +static int igt_client_fill(void *arg)
> +{
> +       struct intel_context *ce = arg;
> +       struct drm_i915_private *i915 = ce->gem_context->i915;
> +       struct drm_i915_gem_object *obj;
> +       struct rnd_state prng;
> +       IGT_TIMEOUT(end);
> +       u32 *vaddr;
> +       int err = 0;
> +
> +       prandom_seed_state(&prng, i915_selftest.random_seed);
> +
> +       do {
> +               u32 sz = prandom_u32_state(&prng) % SZ_32M;
> +               u32 val = prandom_u32_state(&prng);
> +               u32 i;
> +
> +               sz = round_up(sz, PAGE_SIZE);
> +
> +               pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
> +
> +               obj = i915_gem_object_create_internal(i915, sz);
> +               if (IS_ERR(obj)) {
> +                       err = PTR_ERR(obj);
> +                       goto err_flush;
> +               }
> +
> +               vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> +               if (IS_ERR(vaddr)) {
> +                       err = PTR_ERR(vaddr);
> +                       goto err_put;
> +               }
> +
> +               /*
> +                * XXX: The goal is move this to get_pages, so try to dirty the
> +                * CPU cache first to check that we do the required clflush
> +                * before scheduling the blt for !llc platforms. This matches
> +                * some version of reality where at get_pages the pages
> +                * themselves may not yet be coherent with the GPU(swap-in). If
> +                * we are missing the flush then we should see the stale cache
> +                * values after we do the set_to_cpu_domain and pick it up as a
> +                * test failure.
> +                */
> +               memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
> +
> +               if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
> +                       obj->cache_dirty = true;
> +
> +               err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
> +                                                      &obj->mm.page_sizes,
> +                                                      val);
> +               if (err)
> +                       goto err_unpin;
> +
> +               /*
> +                * XXX: For now do the wait without the object resv lock to
> +                * ensure we don't deadlock.
> +                */
> +               err = i915_gem_object_wait(obj,
> +                                          I915_WAIT_INTERRUPTIBLE |
> +                                          I915_WAIT_ALL,
> +                                          MAX_SCHEDULE_TIMEOUT);
> +               if (err)
> +                       goto err_unpin;
> +
> +               i915_gem_object_lock(obj);
> +               err = i915_gem_object_set_to_cpu_domain(obj, false);
> +               i915_gem_object_unlock(obj);
> +               if (err)
> +                       goto err_unpin;
> +
> +               for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
> +                       if (vaddr[i] != val) {
> +                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
> +                                      vaddr[i], val);
> +                               err = -EINVAL;
> +                               goto err_unpin;
> +                       }
> +               }
> +
> +               i915_gem_object_unpin_map(obj);
> +               i915_gem_object_put(obj);
> +       } while (!time_after(jiffies, end));
> +
> +       goto err_flush;
> +
> +err_unpin:
> +       i915_gem_object_unpin_map(obj);
> +err_put:
> +       i915_gem_object_put(obj);
> +err_flush:
> +       mutex_lock(&i915->drm.struct_mutex);
> +       if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +               err = -EIO;
> +       mutex_unlock(&i915->drm.struct_mutex);
> +
> +       if (err == -ENOMEM)
> +               err = 0;
> +
> +       return err;
> +}
> +
> +int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
> +{
> +       static const struct i915_subtest tests[] = {
> +               SUBTEST(igt_client_fill),
> +       };
> +
> +       if (i915_terminally_wedged(i915))
> +               return 0;
> +
> +       if (!HAS_ENGINE(i915, BCS0))
> +               return 0;
> +
> +       return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
> +}
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> new file mode 100644
> index 000000000000..717521c8eb0a
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> @@ -0,0 +1,111 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "../i915_selftest.h"
> +
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +#include "mock_context.h"
> +
> +static int igt_fill_blt(void *arg)
> +{
> +       struct intel_context *ce = arg;
> +       struct drm_i915_private *i915 = ce->gem_context->i915;
> +       struct drm_i915_gem_object *obj;
> +       struct rnd_state prng;
> +       IGT_TIMEOUT(end);
> +       u32 *vaddr;
> +       int err = 0;
> +
> +       prandom_seed_state(&prng, i915_selftest.random_seed);
> +
> +       do {
> +               u32 sz = prandom_u32_state(&prng) % SZ_32M;
> +               u32 val = prandom_u32_state(&prng);
> +               u32 i;
> +
> +               sz = round_up(sz, PAGE_SIZE);
> +
> +               pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
> +
> +               obj = i915_gem_object_create_internal(i915, sz);
> +               if (IS_ERR(obj)) {
> +                       err = PTR_ERR(vaddr);
> +                       goto err_flush;
> +               }
> +
> +               vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> +               if (IS_ERR(vaddr)) {
> +                       err = PTR_ERR(vaddr);
> +                       goto err_put;
> +               }
> +
> +               /*
> +                * Make sure the potentially async clflush does its job, if
> +                * required.
> +                */
> +               memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
> +
> +               if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
> +                       obj->cache_dirty = true;
> +
> +
> +               mutex_lock(&i915->drm.struct_mutex);
> +               err = i915_gem_object_fill_blt(obj, ce, val);
> +               mutex_unlock(&i915->drm.struct_mutex);
> +               if (err)
> +                       goto err_unpin;
> +
> +               i915_gem_object_lock(obj);
> +               err = i915_gem_object_set_to_cpu_domain(obj, false);
> +               i915_gem_object_unlock(obj);
> +               if (err)
> +                       goto err_unpin;
> +
> +               for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
> +                       if (vaddr[i] != val) {
> +                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
> +                                      vaddr[i], val);
> +                               err = -EINVAL;
> +                               goto err_unpin;
> +                       }
> +               }
> +
> +               i915_gem_object_unpin_map(obj);
> +               i915_gem_object_put(obj);
> +       } while (!time_after(jiffies, end));
> +
> +       goto err_flush;
> +
> +err_unpin:
> +       i915_gem_object_unpin_map(obj);
> +err_put:
> +       i915_gem_object_put(obj);
> +err_flush:
> +       mutex_lock(&i915->drm.struct_mutex);
> +       if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +               err = -EIO;
> +       mutex_unlock(&i915->drm.struct_mutex);
> +
> +       if (err == -ENOMEM)
> +               err = 0;
> +
> +       return err;
> +}
> +
> +int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
> +{
> +       static const struct i915_subtest tests[] = {
> +               SUBTEST(igt_fill_blt),
> +       };
> +
> +       if (i915_terminally_wedged(i915))
> +               return 0;
> +
> +       if (!HAS_ENGINE(i915, BCS0))
> +               return 0;
> +
> +       return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> index a34ece53a771..7e95827b0726 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
> @@ -180,6 +180,7 @@
>  #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
>  
>  #define COLOR_BLT_CMD                  (2<<29 | 0x40<<22 | (5-2))
> +#define XY_COLOR_BLT_CMD               (2<<29 | 0x50<<22)
>  #define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22)|4)
>  #define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
>  #define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 9bda36a598b3..d5dc4427d664 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -25,6 +25,8 @@ selftest(gem, i915_gem_live_selftests)
>  selftest(evict, i915_gem_evict_live_selftests)
>  selftest(hugepages, i915_gem_huge_page_live_selftests)
>  selftest(contexts, i915_gem_context_live_selftests)
> +selftest(blt, i915_gem_object_blt_live_selftests)
> +selftest(client, i915_gem_client_blt_live_selftests)
>  selftest(reset, intel_reset_live_selftests)
>  selftest(hangcheck, intel_hangcheck_live_selftests)
>  selftest(execlists, intel_execlists_live_selftests)

Tons of XXX to be worked on, but that is the point of the patch: to
start spelling out the work that needs to be done for this approach to
even work. And there's plenty more layers we need to add tests as we
remove the lock impediments.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
  2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
                   ` (2 preceding siblings ...)
  2019-05-28 20:46 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-05-28 21:00 ` Patchwork
  2019-05-29  8:01 ` ✓ Fi.CI.IGT: " Patchwork
  4 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-05-28 21:00 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
URL   : https://patchwork.freedesktop.org/series/61275/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6159 -> Patchwork_13118
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/

New tests
---------

  New tests have been introduced between CI_DRM_6159 and Patchwork_13118:

### New IGT tests (2) ###

  * igt@i915_selftest@live_blt:
    - Statuses : 41 pass(s)
    - Exec time: [0.41, 1.93] s

  * igt@i915_selftest@live_client:
    - Statuses : 41 pass(s)
    - Exec time: [0.39, 2.00] s

  

Known issues
------------

  Here are the changes found in Patchwork_13118 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_pm_rpm@module-reload:
    - fi-skl-6770hq:      [PASS][1] -> [FAIL][2] ([fdo#108511])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html

  
  [fdo#108511]: https://bugs.freedesktop.org/show_bug.cgi?id=108511


Participating hosts (48 -> 41)
------------------------------

  Missing    (7): fi-kbl-soraka fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_6159 -> Patchwork_13118

  CI_DRM_6159: f280d33ae895624a247d4431ece09b5088e6f021 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5021: 2d64cb6808075b0d0696a89d2ce290220e6eff8e @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13118: 2dea8c8a1c423b32a8d08b188c6878cacd434cc4 @ git://anongit.freedesktop.org/gfx-ci/linux


== Kernel 32bit build ==

Warning: Kernel 32bit buildtest failed:
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/build_32bit.log

  CALL    scripts/checksyscalls.sh
  CALL    scripts/atomic/check-atomics.sh
  CHK     include/generated/compile.h
  AR      drivers/gpu/drm/i915/built-in.a
  CC [M]  drivers/gpu/drm/i915/header_test_i915_active_types.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_debugfs.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_drv.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_irq.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_params.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_priolist_types.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_reg.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_scheduler_types.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_timeline_types.o
  CC [M]  drivers/gpu/drm/i915/header_test_i915_utils.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_acpi.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_atomic.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_atomic_plane.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_audio.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_bios.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_cdclk.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_color.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_combo_phy.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_connector.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_crt.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_csr.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_ddi.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dp.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dp_aux_backlight.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dp_link_training.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dp_mst.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dpio_phy.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dpll_mgr.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_drv.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dsi.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dsi_dcs_backlight.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dvo.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_dvo_dev.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_fbc.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_fbdev.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_fifo_underrun.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_frontbuffer.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_gmbus.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_hdcp.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_hdmi.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_hotplug.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_lpe_audio.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_lspcon.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_lvds.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_overlay.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_panel.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_pipe_crc.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_pm.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_psr.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_quirks.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_runtime_pm.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_sdvo.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_sideband.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_sprite.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_tv.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_uncore.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_vdsc.o
  CC [M]  drivers/gpu/drm/i915/header_test_intel_wakeref.o
  CC [M]  drivers/gpu/drm/i915/gem/i915_gem_client_blt.o
In file included from drivers/gpu/drm/i915/gem/i915_gem_client_blt.c:305:0:
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c:6:10: fatal error: ../i915_selftest.h: No such file or directory
 #include "../i915_selftest.h"
          ^~~~~~~~~~~~~~~~~~~~
compilation terminated.
scripts/Makefile.build:278: recipe for target 'drivers/gpu/drm/i915/gem/i915_gem_client_blt.o' failed
make[4]: *** [drivers/gpu/drm/i915/gem/i915_gem_client_blt.o] Error 1
scripts/Makefile.build:489: recipe for target 'drivers/gpu/drm/i915' failed
make[3]: *** [drivers/gpu/drm/i915] Error 2
scripts/Makefile.build:489: recipe for target 'drivers/gpu/drm' failed
make[2]: *** [drivers/gpu/drm] Error 2
scripts/Makefile.build:489: recipe for target 'drivers/gpu' failed
make[1]: *** [drivers/gpu] Error 2
Makefile:1071: recipe for target 'drivers' failed
make: *** [drivers] Error 2


== Linux commits ==

2dea8c8a1c42 drm/i915: add in-kernel blitter client
1cddd0a982f8 drm/i915/gtt: grab wakeref in gen6_alloc_va_range

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v5 2/2] drm/i915: add in-kernel blitter client
  2019-05-28 20:52   ` Chris Wilson
@ 2019-05-28 22:32     ` Chris Wilson
  0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2019-05-28 22:32 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx; +Cc: CQ Tang

Quoting Chris Wilson (2019-05-28 21:52:04)
> Quoting Matthew Auld (2019-05-28 20:57:24)
> Tons of XXX to be worked on, but that is the point of the patch: to
> start spelling out the work that needs to be done for this approach to
> even work. And there's plenty more layers we need to add tests as we
> remove the lock impediments.

We can also add tests that we expect to work eventually but currently
die due to lockdep (or other reasons) under CONFIG_BROKEN.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
  2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
                   ` (3 preceding siblings ...)
  2019-05-28 21:00 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-05-29  8:01 ` Patchwork
  4 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-05-29  8:01 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range
URL   : https://patchwork.freedesktop.org/series/61275/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6159_full -> Patchwork_13118_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

New tests
---------

  New tests have been introduced between CI_DRM_6159_full and Patchwork_13118_full:

### New IGT tests (2) ###

  * igt@i915_selftest@live_blt:
    - Statuses : 6 pass(s)
    - Exec time: [1.39, 2.86] s

  * igt@i915_selftest@live_client:
    - Statuses : 6 pass(s)
    - Exec time: [1.39, 2.92] s

  

Known issues
------------

  Here are the changes found in Patchwork_13118_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [PASS][1] -> [INCOMPLETE][2] ([fdo#103665])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-kbl1/igt@gem_ctx_isolation@rcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@i915_suspend@fence-restore-untiled:
    - shard-apl:          [PASS][3] -> [DMESG-WARN][4] ([fdo#108566]) +6 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-apl2/igt@i915_suspend@fence-restore-untiled.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-apl6/igt@i915_suspend@fence-restore-untiled.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([fdo#110741])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_flip@plain-flip-ts-check:
    - shard-glk:          [PASS][7] -> [FAIL][8] ([fdo#100368])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-glk3/igt@kms_flip@plain-flip-ts-check.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-glk8/igt@kms_flip@plain-flip-ts-check.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt:
    - shard-hsw:          [PASS][9] -> [SKIP][10] ([fdo#109271]) +33 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-hsw1/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-hsw1/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-kbl:          [PASS][11] -> [DMESG-WARN][12] ([fdo#108566])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-kbl2/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-kbl7/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][13] -> [FAIL][14] ([fdo#108145] / [fdo#110403])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl4/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl8/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][15] -> [FAIL][16] ([fdo#99912])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-apl4/igt@kms_setmode@basic.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-apl2/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-c-ts-continuation-suspend:
    - shard-skl:          [PASS][17] -> [INCOMPLETE][18] ([fdo#104108])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl10/igt@kms_vblank@pipe-c-ts-continuation-suspend.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl8/igt@kms_vblank@pipe-c-ts-continuation-suspend.html

  
#### Possible fixes ####

  * igt@gem_eio@in-flight-suspend:
    - shard-skl:          [INCOMPLETE][19] ([fdo#104108]) -> [PASS][20] +1 similar issue
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl3/igt@gem_eio@in-flight-suspend.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl3/igt@gem_eio@in-flight-suspend.html

  * igt@i915_pm_rpm@debugfs-read:
    - shard-skl:          [INCOMPLETE][21] ([fdo#107807]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl5/igt@i915_pm_rpm@debugfs-read.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl5/igt@i915_pm_rpm@debugfs-read.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [DMESG-WARN][23] ([fdo#108566]) -> [PASS][24] +4 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-apl3/igt@i915_suspend@debugfs-reader.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-apl3/igt@i915_suspend@debugfs-reader.html

  * igt@kms_flip@2x-flip-vs-blocking-wf-vblank:
    - shard-hsw:          [SKIP][25] ([fdo#109271]) -> [PASS][26] +2 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-hsw1/igt@kms_flip@2x-flip-vs-blocking-wf-vblank.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-hsw4/igt@kms_flip@2x-flip-vs-blocking-wf-vblank.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [FAIL][27] ([fdo#105363]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/shard-skl6/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/shard-skl10/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  
  [fdo#100368]: https://bugs.freedesktop.org/show_bug.cgi?id=100368
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#107807]: https://bugs.freedesktop.org/show_bug.cgi?id=107807
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#110403]: https://bugs.freedesktop.org/show_bug.cgi?id=110403
  [fdo#110741]: https://bugs.freedesktop.org/show_bug.cgi?id=110741
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (9 -> 9)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_6159 -> Patchwork_13118

  CI_DRM_6159: f280d33ae895624a247d4431ece09b5088e6f021 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5021: 2d64cb6808075b0d0696a89d2ce290220e6eff8e @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13118: 2dea8c8a1c423b32a8d08b188c6878cacd434cc4 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13118/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v5 2/2] drm/i915: add in-kernel blitter client
  2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
  2019-05-28 20:45   ` Changqing Tang
  2019-05-28 20:52   ` Chris Wilson
@ 2019-05-29  9:26   ` kbuild test robot
  2 siblings, 0 replies; 10+ messages in thread
From: kbuild test robot @ 2019-05-29  9:26 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx, kbuild-all, CQ Tang

[-- Attachment #1: Type: text/plain, Size: 1643 bytes --]

Hi Matthew,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on drm-intel/for-linux-next]
[cannot apply to v5.2-rc2 next-20190528]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Matthew-Auld/drm-i915-gtt-grab-wakeref-in-gen6_alloc_va_range/20190529-132242
base:   git://anongit.freedesktop.org/drm-intel for-linux-next
config: i386-randconfig-n018201921 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from drivers/gpu/drm/i915/gem/i915_gem_client_blt.c:305:0:
>> drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c:6:10: fatal error: ../i915_selftest.h: No such file or directory
    #include "../i915_selftest.h"
             ^~~~~~~~~~~~~~~~~~~~
   compilation terminated.
--
   In file included from drivers/gpu/drm/i915/gem/i915_gem_object_blt.c:108:0:
>> drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c:6:10: fatal error: ../i915_selftest.h: No such file or directory
    #include "../i915_selftest.h"
             ^~~~~~~~~~~~~~~~~~~~
   compilation terminated.

vim +6 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c

   > 6	#include "../i915_selftest.h"
     7	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 34737 bytes --]

[-- Attachment #3: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-05-29  9:26 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-28 19:57 [PATCH v5 1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Matthew Auld
2019-05-28 19:57 ` [PATCH v5 2/2] drm/i915: add in-kernel blitter client Matthew Auld
2019-05-28 20:45   ` Changqing Tang
2019-05-28 20:52   ` Chris Wilson
2019-05-28 22:32     ` Chris Wilson
2019-05-29  9:26   ` kbuild test robot
2019-05-28 20:44 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v5,1/2] drm/i915/gtt: grab wakeref in gen6_alloc_va_range Patchwork
2019-05-28 20:46 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-05-28 21:00 ` ✓ Fi.CI.BAT: success " Patchwork
2019-05-29  8:01 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.