All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] Add i915_gem_object_vmap
@ 2016-02-17 23:18 yu.dai
  2016-02-17 23:18 ` [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space yu.dai
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: yu.dai @ 2016-02-17 23:18 UTC (permalink / raw)
  To: intel-gfx; +Cc: daniel.vetter

From: Alex Dai <yu.dai@intel.com>

There are several places in driver that a GEM object is mapped to kernel
virtual space. Now add a common function i915_gem_object_vmap to do the vmap
work for such use case.

Alex Dai (2):
  drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space
  drm/i915/guc: Simplify code by keeping vmap of guc_client object

 drivers/gpu/drm/i915/i915_cmd_parser.c     | 28 +---------------
 drivers/gpu/drm/i915/i915_drv.h            |  3 ++
 drivers/gpu/drm/i915/i915_gem.c            | 44 +++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_dmabuf.c     | 15 ++-------
 drivers/gpu/drm/i915/i915_guc_submission.c | 53 +++++++++---------------------
 drivers/gpu/drm/i915/intel_guc.h           |  3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 24 ++------------
 7 files changed, 70 insertions(+), 100 deletions(-)

-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space
  2016-02-17 23:18 [PATCH 0/2] Add i915_gem_object_vmap yu.dai
@ 2016-02-17 23:18 ` yu.dai
  2016-02-18 10:49   ` Tvrtko Ursulin
  2016-02-17 23:18 ` [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object yu.dai
  2016-02-19 14:22 ` ✗ Fi.CI.BAT: failure for Add i915_gem_object_vmap Patchwork
  2 siblings, 1 reply; 6+ messages in thread
From: yu.dai @ 2016-02-17 23:18 UTC (permalink / raw)
  To: intel-gfx; +Cc: daniel.vetter

From: Alex Dai <yu.dai@intel.com>

There are several places inside driver where a GEM object is mapped to
kernel virtual space. The mapping is either done for the whole object
or certain page range of it.

This patch introduces a function i915_gem_object_vmap to do such job.

Signed-off-by: Alex Dai <yu.dai@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c  | 28 +--------------------
 drivers/gpu/drm/i915/i915_drv.h         |  3 +++
 drivers/gpu/drm/i915/i915_gem.c         | 44 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_dmabuf.c  | 15 ++---------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++----------------
 5 files changed, 52 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894..915e8c1 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -863,37 +863,11 @@ find_reg(const struct drm_i915_reg_descriptor *table,
 static u32 *vmap_batch(struct drm_i915_gem_object *obj,
 		       unsigned start, unsigned len)
 {
-	int i;
-	void *addr = NULL;
-	struct sg_page_iter sg_iter;
 	int first_page = start >> PAGE_SHIFT;
 	int last_page = (len + start + 4095) >> PAGE_SHIFT;
 	int npages = last_page - first_page;
-	struct page **pages;
-
-	pages = drm_malloc_ab(npages, sizeof(*pages));
-	if (pages == NULL) {
-		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
-		goto finish;
-	}
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
-		pages[i++] = sg_page_iter_page(&sg_iter);
-		if (i == npages)
-			break;
-	}
-
-	addr = vmap(pages, i, 0, PAGE_KERNEL);
-	if (addr == NULL) {
-		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
-		goto finish;
-	}
 
-finish:
-	if (pages)
-		drm_free_large(pages);
-	return (u32*)addr;
+	return (u32*)i915_gem_object_vmap(obj, first_page, npages);
 }
 
 /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6644c2e..5b00a6a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2899,6 +2899,9 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
 		struct drm_device *dev, const void *data, size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
+void *i915_gem_object_vmap(struct drm_i915_gem_object *obj,
+			   unsigned int first,
+			   unsigned int npages);
 
 /* Flags used by pin/bind&friends. */
 #define PIN_MAPPABLE	(1<<0)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f68f346..a6f465b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5356,3 +5356,47 @@ fail:
 	drm_gem_object_unreference(&obj->base);
 	return ERR_PTR(ret);
 }
+
+/**
+ * i915_gem_object_vmap - map a GEM obj into kernel virtual space
+ * @obj: the GEM obj to be mapped
+ * @first: index of the first page where mapping starts
+ * @npages: how many pages to be mapped, starting from first page
+ *
+ * Map a given page range of GEM obj into kernel virtual space. The caller must
+ * make sure the associated pages are gathered and pinned before calling this
+ * function. vunmap should be called after use.
+ *
+ * NULL will be returned if fails.
+ */
+void *i915_gem_object_vmap(struct drm_i915_gem_object *obj,
+			   unsigned int first,
+			   unsigned int npages)
+{
+	struct sg_page_iter sg_iter;
+	struct page **pages;
+	void *addr;
+	int i;
+
+	if (first + npages > obj->pages->nents) {
+		DRM_DEBUG_DRIVER("Invalid page count\n");
+		return NULL;
+	}
+
+	pages = drm_malloc_ab(npages, sizeof(*pages));
+	if (pages == NULL) {
+		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+		return NULL;
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first)
+		pages[i++] = sg_page_iter_page(&sg_iter);
+
+	addr = vmap(pages, npages, 0, PAGE_KERNEL);
+	if (addr == NULL)
+		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+	drm_free_large(pages);
+
+	return addr;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 1f3eef6..d269957 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -110,9 +110,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	struct drm_device *dev = obj->base.dev;
-	struct sg_page_iter sg_iter;
-	struct page **pages;
-	int ret, i;
+	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
@@ -131,16 +129,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 
 	ret = -ENOMEM;
 
-	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-	if (pages == NULL)
-		goto err_unpin;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-		pages[i++] = sg_page_iter_page(&sg_iter);
-
-	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
-	drm_free_large(pages);
+	obj->dma_buf_vmapping = i915_gem_object_vmap(obj, 0, obj->pages->nents);
 
 	if (!obj->dma_buf_vmapping)
 		goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..fe6d14a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 	i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	struct page **pages;
-	void *addr;
-	int i;
-
-	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-	if (pages == NULL)
-		return NULL;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-		pages[i++] = sg_page_iter_page(&sg_iter);
-
-	addr = vmap(pages, i, 0, PAGE_KERNEL);
-	drm_free_large(pages);
-
-	return addr;
-}
-
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 				     struct intel_ringbuffer *ringbuf)
 {
@@ -2103,7 +2082,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 			return ret;
 		}
 
-		ringbuf->virtual_start = vmap_obj(obj);
+		ringbuf->virtual_start =
+			i915_gem_object_vmap(obj, 0, obj->pages->nents);
 		if (ringbuf->virtual_start == NULL) {
 			i915_gem_object_ggtt_unpin(obj);
 			return -ENOMEM;
-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object
  2016-02-17 23:18 [PATCH 0/2] Add i915_gem_object_vmap yu.dai
  2016-02-17 23:18 ` [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space yu.dai
@ 2016-02-17 23:18 ` yu.dai
  2016-02-18 11:12   ` Tvrtko Ursulin
  2016-02-19 14:22 ` ✗ Fi.CI.BAT: failure for Add i915_gem_object_vmap Patchwork
  2 siblings, 1 reply; 6+ messages in thread
From: yu.dai @ 2016-02-17 23:18 UTC (permalink / raw)
  To: intel-gfx; +Cc: daniel.vetter

From: Alex Dai <yu.dai@intel.com>

GuC client object is always pinned during its life cycle. We cache
the vmap of client object, which includes guc_process_desc, doorbell
and work queue. By doing so, we can simplify the code where driver
communicate with GuC.

As a result, this patch removes the kmap_atomic in wq_check_space,
where usleep_range could be called while kmap_atomic is held. This
fixes issue below.

[   34.098798] BUG: scheduling while atomic: gem_close_race/1941/0x00000002
[   34.098822] Modules linked in: hid_generic usbhid i915 asix usbnet libphy mii i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea drm coretemp i2c_hid hid video pinctrl_sunrisepoint pinctrl_intel acpi_pad nls_iso8859_1 e1000e ptp psmouse pps_core ahci libahci
[   34.098824] CPU: 0 PID: 1941 Comm: gem_close_race Tainted: G     U          4.4.0-160121+ #123
[   34.098824] Hardware name: Intel Corporation Skylake Client platform/Skylake AIO DDR3L RVP10, BIOS SKLSE2R1.R00.X100.B01.1509220551 09/22/2015
[   34.098825]  0000000000013e40 ffff880166c27a78 ffffffff81280d02 ffff880172c13e40
[   34.098826]  ffff880166c27a88 ffffffff810c203a ffff880166c27ac8 ffffffff814ec808
[   34.098827]  ffff88016b7c6000 ffff880166c28000 00000000000f4240 0000000000000001
[   34.098827] Call Trace:
[   34.098831]  [<ffffffff81280d02>] dump_stack+0x4b/0x79
[   34.098833]  [<ffffffff810c203a>] __schedule_bug+0x41/0x4f
[   34.098834]  [<ffffffff814ec808>] __schedule+0x5a8/0x690
[   34.098835]  [<ffffffff814ec927>] schedule+0x37/0x80
[   34.098836]  [<ffffffff814ef3fd>] schedule_hrtimeout_range_clock+0xad/0x130
[   34.098837]  [<ffffffff81090be0>] ? hrtimer_init+0x10/0x10
[   34.098838]  [<ffffffff814ef3f1>] ? schedule_hrtimeout_range_clock+0xa1/0x130
[   34.098839]  [<ffffffff814ef48e>] schedule_hrtimeout_range+0xe/0x10
[   34.098840]  [<ffffffff814eef9b>] usleep_range+0x3b/0x40
[   34.098853]  [<ffffffffa01ec109>] i915_guc_wq_check_space+0x119/0x210 [i915]
[   34.098861]  [<ffffffffa01da47c>] intel_logical_ring_alloc_request_extras+0x5c/0x70 [i915]
[   34.098869]  [<ffffffffa01cdbf1>] i915_gem_request_alloc+0x91/0x170 [i915]
[   34.098875]  [<ffffffffa01c1c07>] i915_gem_do_execbuffer.isra.25+0xbc7/0x12a0 [i915]
[   34.098882]  [<ffffffffa01cb785>] ? i915_gem_object_get_pages_gtt+0x225/0x3c0 [i915]
[   34.098889]  [<ffffffffa01d1fb6>] ? i915_gem_pwrite_ioctl+0xd6/0x9f0 [i915]
[   34.098895]  [<ffffffffa01c2e68>] i915_gem_execbuffer2+0xa8/0x250 [i915]
[   34.098900]  [<ffffffffa00f65d8>] drm_ioctl+0x258/0x4f0 [drm]
[   34.098906]  [<ffffffffa01c2dc0>] ? i915_gem_execbuffer+0x340/0x340 [i915]
[   34.098908]  [<ffffffff8111590d>] do_vfs_ioctl+0x2cd/0x4a0
[   34.098909]  [<ffffffff8111eac2>] ? __fget+0x72/0xb0
[   34.098910]  [<ffffffff81115b1c>] SyS_ioctl+0x3c/0x70
[   34.098911]  [<ffffffff814effd7>] entry_SYSCALL_64_fastpath+0x12/0x6a
[   34.100208] ------------[ cut here ]------------

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93847
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Alex Dai <yu.dai@intel.com>
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 53 +++++++++---------------------
 drivers/gpu/drm/i915/intel_guc.h           |  3 +-
 2 files changed, 18 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543ef..3d6a1f7 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -195,11 +195,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 	struct guc_process_desc *desc;
 	union guc_doorbell_qw db_cmp, db_exc, db_ret;
 	union guc_doorbell_qw *db;
-	void *base;
 	int attempt = 2, ret = -EAGAIN;
 
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-	desc = base + gc->proc_desc_offset;
+	desc = gc->client_base + gc->proc_desc_offset;
 
 	/* Update the tail so it is visible to GuC */
 	desc->tail = gc->wq_tail;
@@ -215,7 +213,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 		db_exc.cookie = 1;
 
 	/* pointer of current doorbell cacheline */
-	db = base + gc->doorbell_offset;
+	db = gc->client_base + gc->doorbell_offset;
 
 	while (attempt--) {
 		/* lets ring the doorbell */
@@ -244,10 +242,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 			db_exc.cookie = 1;
 	}
 
-	/* Finally, update the cached copy of the GuC's WQ head */
-	gc->wq_head = desc->head;
-
-	kunmap_atomic(base);
 	return ret;
 }
 
@@ -341,10 +335,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 			       struct i915_guc_client *client)
 {
 	struct guc_process_desc *desc;
-	void *base;
 
-	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-	desc = base + client->proc_desc_offset;
+	desc = client->client_base + client->proc_desc_offset;
 
 	memset(desc, 0, sizeof(*desc));
 
@@ -361,8 +353,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 	desc->wq_size_bytes = client->wq_size;
 	desc->wq_status = WQ_STATUS_ACTIVE;
 	desc->priority = client->priority;
-
-	kunmap_atomic(base);
 }
 
 /*
@@ -474,25 +464,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
 int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
 	struct guc_process_desc *desc;
-	void *base;
 	u32 size = sizeof(struct guc_wq_item);
 	int ret = -ETIMEDOUT, timeout_counter = 200;
 
 	if (!gc)
 		return 0;
 
-	/* Quickly return if wq space is available since last time we cache the
-	 * head position. */
-	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
-		return 0;
-
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-	desc = base + gc->proc_desc_offset;
+	desc = gc->client_base + gc->proc_desc_offset;
 
 	while (timeout_counter-- > 0) {
-		gc->wq_head = desc->head;
-
-		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
 			ret = 0;
 			break;
 		}
@@ -501,24 +482,23 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
 			usleep_range(1000, 2000);
 	};
 
-	kunmap_atomic(base);
-
 	return ret;
 }
 
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
 				  struct drm_i915_gem_request *rq)
 {
+	struct guc_process_desc *desc;
 	struct guc_wq_item *wqi;
-	void *base;
-	u32 tail, wq_len, wq_off, space;
+	u32 tail, wq_len, wqi_off, space;
 
-	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+	desc = gc->client_base + gc->proc_desc_offset;
+	space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
 	if (WARN_ON(space < sizeof(struct guc_wq_item)))
 		return -ENOSPC; /* shouldn't happen */
 
 	/* postincrement WQ tail for next time */
-	wq_off = gc->wq_tail;
+	wqi_off = gc->wq_tail;
 	gc->wq_tail += sizeof(struct guc_wq_item);
 	gc->wq_tail &= gc->wq_size - 1;
 
@@ -530,13 +510,10 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
 	 * workqueue buffer dw by dw.
 	 */
 	WARN_ON(sizeof(struct guc_wq_item) != 16);
-	WARN_ON(wq_off & 3);
+	WARN_ON(wqi_off & 3);
 
 	/* wq starts from the page after doorbell / process_desc */
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
-			(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
-	wq_off &= PAGE_SIZE - 1;
-	wqi = (struct guc_wq_item *)((char *)base + wq_off);
+	wqi = gc->client_base + gc->wq_offset + wqi_off;
 
 	/* len does not include the header */
 	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
@@ -553,8 +530,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
 	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
 	wqi->fence_id = 0; /*XXX: what fence to be here */
 
-	kunmap_atomic(base);
-
 	return 0;
 }
 
@@ -675,6 +650,8 @@ static void guc_client_free(struct drm_device *dev,
 	 * Be sure to drop any locks
 	 */
 
+	vunmap(client->client_base);
+
 	gem_release_guc_obj(client->client_obj);
 
 	if (client->ctx_index != GUC_INVALID_CTX_ID) {
@@ -727,6 +704,8 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 	if (!obj)
 		goto err;
 
+	client->client_base = i915_gem_object_vmap(obj, 0, obj->pages->nents);
+
 	client->client_obj = obj;
 	client->wq_offset = GUC_DB_SIZE;
 	client->wq_size = GUC_WQ_SIZE;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 73002e9..9f08bd7 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -34,6 +34,8 @@ struct i915_guc_client {
 	uint32_t priority;
 	uint32_t ctx_index;
 
+	void *client_base;
+
 	uint32_t proc_desc_offset;
 	uint32_t doorbell_offset;
 	uint32_t cookie;
@@ -43,7 +45,6 @@ struct i915_guc_client {
 	uint32_t wq_offset;
 	uint32_t wq_size;
 	uint32_t wq_tail;
-	uint32_t wq_head;
 
 	/* GuC submission statistics & status */
 	uint64_t submissions[GUC_MAX_ENGINES_NUM];
-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space
  2016-02-17 23:18 ` [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space yu.dai
@ 2016-02-18 10:49   ` Tvrtko Ursulin
  0 siblings, 0 replies; 6+ messages in thread
From: Tvrtko Ursulin @ 2016-02-18 10:49 UTC (permalink / raw)
  To: yu.dai, intel-gfx; +Cc: daniel.vetter


Hi,

On 17/02/16 23:18, yu.dai@intel.com wrote:
> From: Alex Dai <yu.dai@intel.com>
>
> There are several places inside driver where a GEM object is mapped to
> kernel virtual space. The mapping is either done for the whole object
> or certain page range of it.
>
> This patch introduces a function i915_gem_object_vmap to do such job.
>
> Signed-off-by: Alex Dai <yu.dai@intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_cmd_parser.c  | 28 +--------------------
>   drivers/gpu/drm/i915/i915_drv.h         |  3 +++
>   drivers/gpu/drm/i915/i915_gem.c         | 44 +++++++++++++++++++++++++++++++++
>   drivers/gpu/drm/i915/i915_gem_dmabuf.c  | 15 ++---------
>   drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++----------------
>   5 files changed, 52 insertions(+), 62 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
> index 814d894..915e8c1 100644
> --- a/drivers/gpu/drm/i915/i915_cmd_parser.c
> +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
> @@ -863,37 +863,11 @@ find_reg(const struct drm_i915_reg_descriptor *table,
>   static u32 *vmap_batch(struct drm_i915_gem_object *obj,
>   		       unsigned start, unsigned len)
>   {
> -	int i;
> -	void *addr = NULL;
> -	struct sg_page_iter sg_iter;
>   	int first_page = start >> PAGE_SHIFT;
>   	int last_page = (len + start + 4095) >> PAGE_SHIFT;
>   	int npages = last_page - first_page;
> -	struct page **pages;
> -
> -	pages = drm_malloc_ab(npages, sizeof(*pages));
> -	if (pages == NULL) {
> -		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
> -		goto finish;
> -	}
> -
> -	i = 0;
> -	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
> -		pages[i++] = sg_page_iter_page(&sg_iter);
> -		if (i == npages)
> -			break;
> -	}
> -
> -	addr = vmap(pages, i, 0, PAGE_KERNEL);
> -	if (addr == NULL) {
> -		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
> -		goto finish;
> -	}
>
> -finish:
> -	if (pages)
> -		drm_free_large(pages);
> -	return (u32*)addr;
> +	return (u32*)i915_gem_object_vmap(obj, first_page, npages);
>   }
>
>   /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 6644c2e..5b00a6a 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2899,6 +2899,9 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
>   		struct drm_device *dev, const void *data, size_t size);
>   void i915_gem_free_object(struct drm_gem_object *obj);
>   void i915_gem_vma_destroy(struct i915_vma *vma);
> +void *i915_gem_object_vmap(struct drm_i915_gem_object *obj,
> +			   unsigned int first,
> +			   unsigned int npages);
>
>   /* Flags used by pin/bind&friends. */
>   #define PIN_MAPPABLE	(1<<0)
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f68f346..a6f465b 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -5356,3 +5356,47 @@ fail:
>   	drm_gem_object_unreference(&obj->base);
>   	return ERR_PTR(ret);
>   }
> +
> +/**
> + * i915_gem_object_vmap - map a GEM obj into kernel virtual space
> + * @obj: the GEM obj to be mapped
> + * @first: index of the first page where mapping starts
> + * @npages: how many pages to be mapped, starting from first page
> + *
> + * Map a given page range of GEM obj into kernel virtual space. The caller must
> + * make sure the associated pages are gathered and pinned before calling this
> + * function. vunmap should be called after use.
> + *
> + * NULL will be returned if fails.
> + */
> +void *i915_gem_object_vmap(struct drm_i915_gem_object *obj,
> +			   unsigned int first,
> +			   unsigned int npages)
> +{
> +	struct sg_page_iter sg_iter;
> +	struct page **pages;
> +	void *addr;
> +	int i;
> +
> +	if (first + npages > obj->pages->nents) {
> +		DRM_DEBUG_DRIVER("Invalid page count\n");
> +		return NULL;
> +	}
> +
> +	pages = drm_malloc_ab(npages, sizeof(*pages));
> +	if (pages == NULL) {
> +		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
> +		return NULL;
> +	}
> +
> +	i = 0;
> +	for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first)
> +		pages[i++] = sg_page_iter_page(&sg_iter);

I think there is a gotcha with for_each_sg_page where the nents has to 
be larger than offset for it to return anything. Or in other words, it 
will account all skipped pages against npages, even before it gets to 
the desired offset.

Looking at the implementation, I even think nents is not equivalent to 
npages when page coalescing is active which this helper should handle.

So I think you would need to do your own checking against npages and 
just pass in the maximum nents for the object and the offset.

Just basically add "if (i == npages") break;" like the vmap_batch 
implementation was doing.

> +
> +	addr = vmap(pages, npages, 0, PAGE_KERNEL);
> +	if (addr == NULL)
> +		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
> +	drm_free_large(pages);
> +
> +	return addr;
> +}
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index 1f3eef6..d269957 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -110,9 +110,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
>   {
>   	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
>   	struct drm_device *dev = obj->base.dev;
> -	struct sg_page_iter sg_iter;
> -	struct page **pages;
> -	int ret, i;
> +	int ret;
>
>   	ret = i915_mutex_lock_interruptible(dev);
>   	if (ret)
> @@ -131,16 +129,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
>
>   	ret = -ENOMEM;
>
> -	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
> -	if (pages == NULL)
> -		goto err_unpin;
> -
> -	i = 0;
> -	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
> -		pages[i++] = sg_page_iter_page(&sg_iter);
> -
> -	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
> -	drm_free_large(pages);
> +	obj->dma_buf_vmapping = i915_gem_object_vmap(obj, 0, obj->pages->nents);

This is also not correct, obj->pages->nents can be smaller than 
obj->base.size >> PAGE_SHIFT when coalescing is used. So I think you 
need to pass in the latter here.

>
>   	if (!obj->dma_buf_vmapping)
>   		goto err_unpin;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 45ce45a..fe6d14a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>   	i915_gem_object_ggtt_unpin(ringbuf->obj);
>   }
>
> -static u32 *vmap_obj(struct drm_i915_gem_object *obj)
> -{
> -	struct sg_page_iter sg_iter;
> -	struct page **pages;
> -	void *addr;
> -	int i;
> -
> -	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
> -	if (pages == NULL)
> -		return NULL;
> -
> -	i = 0;
> -	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
> -		pages[i++] = sg_page_iter_page(&sg_iter);
> -
> -	addr = vmap(pages, i, 0, PAGE_KERNEL);
> -	drm_free_large(pages);
> -
> -	return addr;
> -}
> -
>   int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
>   				     struct intel_ringbuffer *ringbuf)
>   {
> @@ -2103,7 +2082,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
>   			return ret;
>   		}
>
> -		ringbuf->virtual_start = vmap_obj(obj);
> +		ringbuf->virtual_start =
> +			i915_gem_object_vmap(obj, 0, obj->pages->nents);
>   		if (ringbuf->virtual_start == NULL) {
>   			i915_gem_object_ggtt_unpin(obj);
>   			return -ENOMEM;
>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object
  2016-02-17 23:18 ` [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object yu.dai
@ 2016-02-18 11:12   ` Tvrtko Ursulin
  0 siblings, 0 replies; 6+ messages in thread
From: Tvrtko Ursulin @ 2016-02-18 11:12 UTC (permalink / raw)
  To: yu.dai, intel-gfx; +Cc: daniel.vetter


Hi,

On 17/02/16 23:18, yu.dai@intel.com wrote:
> From: Alex Dai <yu.dai@intel.com>
>
> GuC client object is always pinned during its life cycle. We cache
> the vmap of client object, which includes guc_process_desc, doorbell
> and work queue. By doing so, we can simplify the code where driver
> communicate with GuC.
>
> As a result, this patch removes the kmap_atomic in wq_check_space,
> where usleep_range could be called while kmap_atomic is held. This
> fixes issue below.
>
> [   34.098798] BUG: scheduling while atomic: gem_close_race/1941/0x00000002
> [   34.098822] Modules linked in: hid_generic usbhid i915 asix usbnet libphy mii i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea drm coretemp i2c_hid hid video pinctrl_sunrisepoint pinctrl_intel acpi_pad nls_iso8859_1 e1000e ptp psmouse pps_core ahci libahci
> [   34.098824] CPU: 0 PID: 1941 Comm: gem_close_race Tainted: G     U          4.4.0-160121+ #123
> [   34.098824] Hardware name: Intel Corporation Skylake Client platform/Skylake AIO DDR3L RVP10, BIOS SKLSE2R1.R00.X100.B01.1509220551 09/22/2015
> [   34.098825]  0000000000013e40 ffff880166c27a78 ffffffff81280d02 ffff880172c13e40
> [   34.098826]  ffff880166c27a88 ffffffff810c203a ffff880166c27ac8 ffffffff814ec808
> [   34.098827]  ffff88016b7c6000 ffff880166c28000 00000000000f4240 0000000000000001
> [   34.098827] Call Trace:
> [   34.098831]  [<ffffffff81280d02>] dump_stack+0x4b/0x79
> [   34.098833]  [<ffffffff810c203a>] __schedule_bug+0x41/0x4f
> [   34.098834]  [<ffffffff814ec808>] __schedule+0x5a8/0x690
> [   34.098835]  [<ffffffff814ec927>] schedule+0x37/0x80
> [   34.098836]  [<ffffffff814ef3fd>] schedule_hrtimeout_range_clock+0xad/0x130
> [   34.098837]  [<ffffffff81090be0>] ? hrtimer_init+0x10/0x10
> [   34.098838]  [<ffffffff814ef3f1>] ? schedule_hrtimeout_range_clock+0xa1/0x130
> [   34.098839]  [<ffffffff814ef48e>] schedule_hrtimeout_range+0xe/0x10
> [   34.098840]  [<ffffffff814eef9b>] usleep_range+0x3b/0x40
> [   34.098853]  [<ffffffffa01ec109>] i915_guc_wq_check_space+0x119/0x210 [i915]
> [   34.098861]  [<ffffffffa01da47c>] intel_logical_ring_alloc_request_extras+0x5c/0x70 [i915]
> [   34.098869]  [<ffffffffa01cdbf1>] i915_gem_request_alloc+0x91/0x170 [i915]
> [   34.098875]  [<ffffffffa01c1c07>] i915_gem_do_execbuffer.isra.25+0xbc7/0x12a0 [i915]
> [   34.098882]  [<ffffffffa01cb785>] ? i915_gem_object_get_pages_gtt+0x225/0x3c0 [i915]
> [   34.098889]  [<ffffffffa01d1fb6>] ? i915_gem_pwrite_ioctl+0xd6/0x9f0 [i915]
> [   34.098895]  [<ffffffffa01c2e68>] i915_gem_execbuffer2+0xa8/0x250 [i915]
> [   34.098900]  [<ffffffffa00f65d8>] drm_ioctl+0x258/0x4f0 [drm]
> [   34.098906]  [<ffffffffa01c2dc0>] ? i915_gem_execbuffer+0x340/0x340 [i915]
> [   34.098908]  [<ffffffff8111590d>] do_vfs_ioctl+0x2cd/0x4a0
> [   34.098909]  [<ffffffff8111eac2>] ? __fget+0x72/0xb0
> [   34.098910]  [<ffffffff81115b1c>] SyS_ioctl+0x3c/0x70
> [   34.098911]  [<ffffffff814effd7>] entry_SYSCALL_64_fastpath+0x12/0x6a
> [   34.100208] ------------[ cut here ]------------
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93847
> Cc: Dave Gordon <david.s.gordon@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Alex Dai <yu.dai@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_guc_submission.c | 53 +++++++++---------------------
>   drivers/gpu/drm/i915/intel_guc.h           |  3 +-
>   2 files changed, 18 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index d7543ef..3d6a1f7 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -195,11 +195,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   	struct guc_process_desc *desc;
>   	union guc_doorbell_qw db_cmp, db_exc, db_ret;
>   	union guc_doorbell_qw *db;
> -	void *base;
>   	int attempt = 2, ret = -EAGAIN;
>
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
> -	desc = base + gc->proc_desc_offset;
> +	desc = gc->client_base + gc->proc_desc_offset;
>
>   	/* Update the tail so it is visible to GuC */
>   	desc->tail = gc->wq_tail;
> @@ -215,7 +213,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   		db_exc.cookie = 1;
>
>   	/* pointer of current doorbell cacheline */
> -	db = base + gc->doorbell_offset;
> +	db = gc->client_base + gc->doorbell_offset;
>
>   	while (attempt--) {
>   		/* lets ring the doorbell */
> @@ -244,10 +242,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   			db_exc.cookie = 1;
>   	}
>
> -	/* Finally, update the cached copy of the GuC's WQ head */
> -	gc->wq_head = desc->head;
> -
> -	kunmap_atomic(base);
>   	return ret;
>   }
>
> @@ -341,10 +335,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
>   			       struct i915_guc_client *client)
>   {
>   	struct guc_process_desc *desc;
> -	void *base;
>
> -	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
> -	desc = base + client->proc_desc_offset;
> +	desc = client->client_base + client->proc_desc_offset;
>
>   	memset(desc, 0, sizeof(*desc));
>
> @@ -361,8 +353,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
>   	desc->wq_size_bytes = client->wq_size;
>   	desc->wq_status = WQ_STATUS_ACTIVE;
>   	desc->priority = client->priority;
> -
> -	kunmap_atomic(base);
>   }
>
>   /*
> @@ -474,25 +464,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
>   int i915_guc_wq_check_space(struct i915_guc_client *gc)
>   {
>   	struct guc_process_desc *desc;
> -	void *base;
>   	u32 size = sizeof(struct guc_wq_item);
>   	int ret = -ETIMEDOUT, timeout_counter = 200;
>
>   	if (!gc)
>   		return 0;
>
> -	/* Quickly return if wq space is available since last time we cache the
> -	 * head position. */
> -	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
> -		return 0;
> -
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
> -	desc = base + gc->proc_desc_offset;
> +	desc = gc->client_base + gc->proc_desc_offset;
>
>   	while (timeout_counter-- > 0) {
> -		gc->wq_head = desc->head;
> -
> -		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
> +		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
>   			ret = 0;
>   			break;
>   		}
> @@ -501,24 +482,23 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
>   			usleep_range(1000, 2000);
>   	};
>
> -	kunmap_atomic(base);
> -
>   	return ret;
>   }
>
>   static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   				  struct drm_i915_gem_request *rq)
>   {
> +	struct guc_process_desc *desc;
>   	struct guc_wq_item *wqi;
> -	void *base;
> -	u32 tail, wq_len, wq_off, space;
> +	u32 tail, wq_len, wqi_off, space;
>
> -	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
> +	desc = gc->client_base + gc->proc_desc_offset;
> +	space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
>   	if (WARN_ON(space < sizeof(struct guc_wq_item)))
>   		return -ENOSPC; /* shouldn't happen */
>
>   	/* postincrement WQ tail for next time */
> -	wq_off = gc->wq_tail;
> +	wqi_off = gc->wq_tail;
>   	gc->wq_tail += sizeof(struct guc_wq_item);
>   	gc->wq_tail &= gc->wq_size - 1;
>
> @@ -530,13 +510,10 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   	 * workqueue buffer dw by dw.
>   	 */
>   	WARN_ON(sizeof(struct guc_wq_item) != 16);
> -	WARN_ON(wq_off & 3);
> +	WARN_ON(wqi_off & 3);
>
>   	/* wq starts from the page after doorbell / process_desc */
> -	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
> -			(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
> -	wq_off &= PAGE_SIZE - 1;
> -	wqi = (struct guc_wq_item *)((char *)base + wq_off);
> +	wqi = gc->client_base + gc->wq_offset + wqi_off;
>
>   	/* len does not include the header */
>   	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
> @@ -553,8 +530,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
>   	wqi->fence_id = 0; /*XXX: what fence to be here */
>
> -	kunmap_atomic(base);
> -
>   	return 0;
>   }
>
> @@ -675,6 +650,8 @@ static void guc_client_free(struct drm_device *dev,
>   	 * Be sure to drop any locks
>   	 */
>
> +	vunmap(client->client_base);
> +
>   	gem_release_guc_obj(client->client_obj);
>
>   	if (client->ctx_index != GUC_INVALID_CTX_ID) {
> @@ -727,6 +704,8 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
>   	if (!obj)
>   		goto err;
>
> +	client->client_base = i915_gem_object_vmap(obj, 0, obj->pages->nents);

As per comments in the previous patch, obj->pages->nents need to be 
obj->base.size >> PAGE_SHIFT.

And also error checking?

> +
>   	client->client_obj = obj;
>   	client->wq_offset = GUC_DB_SIZE;
>   	client->wq_size = GUC_WQ_SIZE;
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 73002e9..9f08bd7 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -34,6 +34,8 @@ struct i915_guc_client {
>   	uint32_t priority;
>   	uint32_t ctx_index;
>
> +	void *client_base;
> +
>   	uint32_t proc_desc_offset;
>   	uint32_t doorbell_offset;
>   	uint32_t cookie;
> @@ -43,7 +45,6 @@ struct i915_guc_client {
>   	uint32_t wq_offset;
>   	uint32_t wq_size;
>   	uint32_t wq_tail;
> -	uint32_t wq_head;
>
>   	/* GuC submission statistics & status */
>   	uint64_t submissions[GUC_MAX_ENGINES_NUM];
>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.BAT: failure for Add i915_gem_object_vmap
  2016-02-17 23:18 [PATCH 0/2] Add i915_gem_object_vmap yu.dai
  2016-02-17 23:18 ` [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space yu.dai
  2016-02-17 23:18 ` [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object yu.dai
@ 2016-02-19 14:22 ` Patchwork
  2 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2016-02-19 14:22 UTC (permalink / raw)
  To: yu.dai; +Cc: intel-gfx

== Summary ==

Series 3554v1 Add i915_gem_object_vmap
http://patchwork.freedesktop.org/api/1.0/series/3554/revisions/1/mbox/

Test gem_ctx_param_basic:
        Subgroup basic-default:
                pass       -> DMESG-WARN (bdw-nuci7)
        Subgroup invalid-param-set:
                pass       -> DMESG-WARN (bdw-nuci7)
Test gem_ringfill:
        Subgroup basic-default-forked:
                pass       -> INCOMPLETE (bdw-ultra)
        Subgroup basic-default-interruptible:
                pass       -> INCOMPLETE (bdw-nuci7)
Test kms_addfb_basic:
        Subgroup basic-x-tiled:
                pass       -> DMESG-WARN (bdw-nuci7)
        Subgroup bo-too-small:
                pass       -> DMESG-WARN (bdw-ultra)
        Subgroup too-wide:
                pass       -> DMESG-WARN (bdw-nuci7)
Test kms_flip:
        Subgroup basic-flip-vs-modeset:
                pass       -> DMESG-WARN (ilk-hp8440p) UNSTABLE
Test kms_pipe_crc_basic:
        Subgroup nonblocking-crc-pipe-b-frame-sequence:
                pass       -> DMESG-WARN (bdw-ultra)
        Subgroup read-crc-pipe-b-frame-sequence:
                pass       -> INCOMPLETE (ilk-hp8440p)
        Subgroup suspend-read-crc-pipe-a:
                dmesg-warn -> PASS       (skl-i5k-2) UNSTABLE
        Subgroup suspend-read-crc-pipe-b:
                pass       -> DMESG-WARN (skl-i5k-2) UNSTABLE
Test pm_rpm:
        Subgroup basic-rte:
                dmesg-warn -> PASS       (byt-nuc) UNSTABLE

bdw-nuci7        total:5    pass:0    dwarn:4   dfail:0   fail:0   skip:0  
bdw-ultra        total:4    pass:0    dwarn:2   dfail:0   fail:0   skip:1  
bsw-nuc-2        total:167  pass:135  dwarn:2   dfail:0   fail:0   skip:30 
byt-nuc          total:167  pass:142  dwarn:0   dfail:0   fail:0   skip:25 
hsw-gt2          total:167  pass:156  dwarn:0   dfail:0   fail:1   skip:10 
ilk-hp8440p      total:66   pass:42   dwarn:1   dfail:0   fail:1   skip:21 
ivb-t430s        total:167  pass:152  dwarn:0   dfail:0   fail:1   skip:14 
skl-i5k-2        total:167  pass:150  dwarn:1   dfail:0   fail:0   skip:16 
snb-dellxps      total:167  pass:144  dwarn:0   dfail:0   fail:1   skip:22 

Results at /archive/results/CI_IGT_test/Patchwork_1430/

631053ba117c294f0cdfe704718a040b4406a240 drm-intel-nightly: 2016y-02m-17d-21h-05m-38s UTC integration manifest
d8d12a8e90e879111de5e89ac1fecce5fee0078b drm/i915/guc: Simplify code by keeping vmap of guc_client object
9e775f7f4ea7924154774b9a9b850b70ed4a4a7c drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-02-19 14:23 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-17 23:18 [PATCH 0/2] Add i915_gem_object_vmap yu.dai
2016-02-17 23:18 ` [PATCH 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space yu.dai
2016-02-18 10:49   ` Tvrtko Ursulin
2016-02-17 23:18 ` [PATCH 2/2] drm/i915/guc: Simplify code by keeping vmap of guc_client object yu.dai
2016-02-18 11:12   ` Tvrtko Ursulin
2016-02-19 14:22 ` ✗ Fi.CI.BAT: failure for Add i915_gem_object_vmap Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.