All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs
@ 2016-06-18 15:20 Chris Wilson
  2016-06-18 15:20 ` [PATCH 2/3] drm/vgem: Fix mmaping Chris Wilson
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-18 15:20 UTC (permalink / raw)
  To: dri-devel

Rendering operations to the dma-buf are tracked implicitly via the
reservation_object (dmabuf->resv). The dmabuf sync ioctl allows
userspace to wait upon outstanding rendering and prepare the object for
CPU access (provided by the prime dma_buf_ops.begin_cpu_access). Fill
this out for the generic drm_gem_prime by waiting on outstanding
rendering via dmabuf->resv. (This offers an alternative to using poll
that is consistent with other drivers that may need to more work to
prepare the object for access by the CPU.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/drm_prime.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 780589b420a4..479ff7cc3634 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -28,6 +28,7 @@
 
 #include <linux/export.h>
 #include <linux/dma-buf.h>
+#include <linux/reservation.h>
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
@@ -288,6 +289,22 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 	return dev->driver->gem_prime_mmap(obj, vma);
 }
 
+static int drm_gem_begin_cpu_access(struct dma_buf *dma_buf,
+				    enum dma_data_direction direction)
+{
+	bool write = (direction == DMA_BIDIRECTIONAL ||
+		      direction == DMA_TO_DEVICE);
+	struct reservation_object *resv = dma_buf->resv;
+	long ret;
+
+	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+						  MAX_SCHEDULE_TIMEOUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 	.attach = drm_gem_map_attach,
 	.detach = drm_gem_map_detach,
@@ -301,6 +318,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 	.mmap = drm_gem_dmabuf_mmap,
 	.vmap = drm_gem_dmabuf_vmap,
 	.vunmap = drm_gem_dmabuf_vunmap,
+	.begin_cpu_access = drm_gem_begin_cpu_access,
 };
 
 /**
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/3] drm/vgem: Fix mmaping
  2016-06-18 15:20 [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Chris Wilson
@ 2016-06-18 15:20 ` Chris Wilson
  2016-06-21 21:04   ` Chris Wilson
  2016-06-18 15:20 ` [PATCH 3/3] drm/vgem: Enable dmabuf interface for export Chris Wilson
  2016-06-18 22:37 ` [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Daniel Vetter
  2 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2016-06-18 15:20 UTC (permalink / raw)
  To: dri-devel

The vGEM mmap code has bitrotted slightly and now immediately BUGs.
Since vGEM was last updated, there are new core GEM facilities to
provide more common functions, so let's use those here.

Testcase: igt/vgem_basic/mmap
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 163 +++++++++++++++-------------------------
 drivers/gpu/drm/vgem/vgem_drv.h |   6 --
 2 files changed, 61 insertions(+), 108 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 1b4cc8b27080..4747b7f98e7a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,39 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
-{
-	drm_gem_put_pages(&obj->base, obj->pages, false, false);
-	obj->pages = NULL;
-}
-
 static void vgem_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 
 	drm_gem_free_mmap_offset(obj);
-
-	if (vgem_obj->use_dma_buf && obj->dma_buf) {
-		dma_buf_put(obj->dma_buf);
-		obj->dma_buf = NULL;
-	}
-
 	drm_gem_object_release(obj);
-
-	if (vgem_obj->pages)
-		vgem_gem_put_pages(vgem_obj);
-
-	vgem_obj->pages = NULL;
-
 	kfree(vgem_obj);
 }
 
-int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
-{
-	struct page **pages;
-
-	if (obj->pages || obj->use_dma_buf)
-		return 0;
-
-	pages = drm_gem_get_pages(&obj->base);
-	if (IS_ERR(pages)) {
-		return PTR_ERR(pages);
-	}
-
-	obj->pages = pages;
-
-	return 0;
-}
-
 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
-	loff_t num_pages;
-	pgoff_t page_offset;
-	int ret;
-
 	/* We don't use vmf->pgoff since that has the fake offset */
-	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
-		PAGE_SHIFT;
-
-	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
-	if (page_offset > num_pages)
-		return VM_FAULT_SIGBUS;
-
-	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
-			     obj->pages[page_offset]);
-	switch (ret) {
-	case 0:
-		return VM_FAULT_NOPAGE;
-	case -ENOMEM:
-		return VM_FAULT_OOM;
-	case -EBUSY:
-		return VM_FAULT_RETRY;
-	case -EFAULT:
-	case -EINVAL:
-		return VM_FAULT_SIGBUS;
-	default:
-		WARN_ON(1);
-		return VM_FAULT_SIGBUS;
+	unsigned long vaddr = (unsigned long)vmf->virtual_address;
+	struct page *page;
+
+	page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
+				       (vaddr - vma->vm_start) >> PAGE_SHIFT);
+	if (!IS_ERR(page)) {
+		vmf->page = page;
+		return VM_FAULT_LOCKED;
+	} else switch (PTR_ERR(page)) {
+		case -ENOSPC:
+		case -ENOMEM:
+			return VM_FAULT_OOM;
+		case -EBUSY:
+			return VM_FAULT_RETRY;
+		case -EFAULT:
+		case -EINVAL:
+			return VM_FAULT_SIGBUS;
+		default:
+			WARN_ON_ONCE(PTR_ERR(page));
+			return VM_FAULT_SIGBUS;
 	}
 }
 
@@ -134,57 +92,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 					      unsigned long size)
 {
 	struct drm_vgem_gem_object *obj;
-	struct drm_gem_object *gem_object;
-	int err;
-
-	size = roundup(size, PAGE_SIZE);
+	int ret;
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
 
-	gem_object = &obj->base;
-
-	err = drm_gem_object_init(dev, gem_object, size);
-	if (err)
-		goto out;
-
-	err = vgem_gem_get_pages(obj);
-	if (err)
-		goto out;
-
-	err = drm_gem_handle_create(file, gem_object, handle);
-	if (err)
-		goto handle_out;
+	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
+	if (ret)
+		goto err_free;
 
-	drm_gem_object_unreference_unlocked(gem_object);
+	ret = drm_gem_handle_create(file, &obj->base, handle);
+	drm_gem_object_unreference_unlocked(&obj->base);
+	if (ret)
+		goto err;
 
-	return gem_object;
+	return &obj->base;
 
-handle_out:
-	drm_gem_object_release(gem_object);
-out:
+err_free:
 	kfree(obj);
-	return ERR_PTR(err);
+err:
+	return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 				struct drm_mode_create_dumb *args)
 {
 	struct drm_gem_object *gem_object;
-	uint64_t size;
-	uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+	u64 pitch, size;
 
+	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
 	size = args->height * pitch;
 	if (size == 0)
 		return -EINVAL;
 
 	gem_object = vgem_gem_create(dev, file, &args->handle, size);
-
-	if (IS_ERR(gem_object)) {
-		DRM_DEBUG_DRIVER("object creation failed\n");
+	if (IS_ERR(gem_object))
 		return PTR_ERR(gem_object);
-	}
 
 	args->size = gem_object->size;
 	args->pitch = pitch;
@@ -194,26 +138,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	return 0;
 }
 
-int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
-		      uint32_t handle, uint64_t *offset)
+static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+			     uint32_t handle, uint64_t *offset)
 {
-	int ret = 0;
 	struct drm_gem_object *obj;
+	int ret;
 
 	obj = drm_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
 
+	if (!obj->filp) {
+		ret = -EINVAL;
+		goto unref;
+	}
+
 	ret = drm_gem_create_mmap_offset(obj);
 	if (ret)
 		goto unref;
 
-	BUG_ON(!obj->filp);
-
-	obj->filp->private_data = obj;
-
 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
-
 unref:
 	drm_gem_object_unreference_unlocked(obj);
 
@@ -223,10 +167,26 @@ unref:
 static struct drm_ioctl_desc vgem_ioctls[] = {
 };
 
+static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long flags = vma->vm_flags;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
+	 * are ordinary and not special.
+	 */
+	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
+	return 0;
+}
+
 static const struct file_operations vgem_driver_fops = {
 	.owner		= THIS_MODULE,
 	.open		= drm_open,
-	.mmap		= drm_gem_mmap,
+	.mmap		= vgem_mmap,
 	.poll		= drm_poll,
 	.read		= drm_read,
 	.unlocked_ioctl = drm_ioctl,
@@ -248,7 +208,7 @@ static struct drm_driver vgem_driver = {
 	.minor	= DRIVER_MINOR,
 };
 
-struct drm_device *vgem_device;
+static struct drm_device *vgem_device;
 
 static int __init vgem_init(void)
 {
@@ -263,7 +223,6 @@ static int __init vgem_init(void)
 	drm_dev_set_unique(vgem_device, "vgem");
 
 	ret  = drm_dev_register(vgem_device, 0);
-
 	if (ret)
 		goto out_unref;
 
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee275..988cbaae7588 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -35,12 +35,6 @@
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
-	struct page **pages;
-	bool use_dma_buf;
 };
 
-/* vgem_drv.c */
-extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
-extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
-
 #endif
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/3] drm/vgem: Enable dmabuf interface for export
  2016-06-18 15:20 [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Chris Wilson
  2016-06-18 15:20 ` [PATCH 2/3] drm/vgem: Fix mmaping Chris Wilson
@ 2016-06-18 15:20 ` Chris Wilson
  2016-06-19  6:18   ` [PATCH v2] " Chris Wilson
  2016-06-18 22:37 ` [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Daniel Vetter
  2 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2016-06-18 15:20 UTC (permalink / raw)
  To: dri-devel

Enable the standard GEM dma-buf interface provided by the DRM core, but
only for exporting the VGEM object. This allows passing around the VGEM
objects created from the dumb interface and using them as sources
elsewhere. Creating a VGEM object for a foriegn handle is not supported.

Testcase: igt/vgem_basic/dmabuf-mmap
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 29 ++++++++++++++++++++++++++++-
 1 file changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 4747b7f98e7a..27f9a7625a87 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -193,14 +193,41 @@ static const struct file_operations vgem_driver_fops = {
 	.release	= drm_release,
 };
 
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+			   struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->filp)
+		return -ENODEV;
+
+	ret = obj->filp->f_op->mmap(obj->filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->filp);
+
+	return 0;
+}
+
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
 	.fops				= &vgem_driver_fops,
+
 	.dumb_create			= vgem_gem_dumb_create,
 	.dumb_map_offset		= vgem_gem_dumb_map,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_mmap = vgem_prime_mmap,
+
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
 	.date	= DRIVER_DATE,
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs
  2016-06-18 15:20 [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Chris Wilson
  2016-06-18 15:20 ` [PATCH 2/3] drm/vgem: Fix mmaping Chris Wilson
  2016-06-18 15:20 ` [PATCH 3/3] drm/vgem: Enable dmabuf interface for export Chris Wilson
@ 2016-06-18 22:37 ` Daniel Vetter
  2 siblings, 0 replies; 12+ messages in thread
From: Daniel Vetter @ 2016-06-18 22:37 UTC (permalink / raw)
  To: Chris Wilson; +Cc: dri-devel

On Sat, Jun 18, 2016 at 04:20:47PM +0100, Chris Wilson wrote:
> Rendering operations to the dma-buf are tracked implicitly via the
> reservation_object (dmabuf->resv). The dmabuf sync ioctl allows
> userspace to wait upon outstanding rendering and prepare the object for
> CPU access (provided by the prime dma_buf_ops.begin_cpu_access). Fill
> this out for the generic drm_gem_prime by waiting on outstanding
> rendering via dmabuf->resv. (This offers an alternative to using poll
> that is consistent with other drivers that may need to more work to
> prepare the object for access by the CPU.)
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/drm_prime.c | 18 ++++++++++++++++++
>  1 file changed, 18 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> index 780589b420a4..479ff7cc3634 100644
> --- a/drivers/gpu/drm/drm_prime.c
> +++ b/drivers/gpu/drm/drm_prime.c
> @@ -28,6 +28,7 @@
>  
>  #include <linux/export.h>
>  #include <linux/dma-buf.h>
> +#include <linux/reservation.h>
>  #include <drm/drmP.h>
>  #include <drm/drm_gem.h>
>  
> @@ -288,6 +289,22 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
>  	return dev->driver->gem_prime_mmap(obj, vma);
>  }
>  
> +static int drm_gem_begin_cpu_access(struct dma_buf *dma_buf,
> +				    enum dma_data_direction direction)
> +{
> +	bool write = (direction == DMA_BIDIRECTIONAL ||
> +		      direction == DMA_TO_DEVICE);
> +	struct reservation_object *resv = dma_buf->resv;
> +	long ret;
> +
> +	ret = reservation_object_wait_timeout_rcu(resv, write, true,
> +						  MAX_SCHEDULE_TIMEOUT);
> +	if (ret < 0)
> +		return ret;
> +
> +	return 0;
> +}

Maybe we even want this in the dma-buf layer as default function if
nothing else is provided? After all this one here is entirely generic, and
uses neither gem nor even drm_prime knowledge. Either way:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> +
>  static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
>  	.attach = drm_gem_map_attach,
>  	.detach = drm_gem_map_detach,
> @@ -301,6 +318,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
>  	.mmap = drm_gem_dmabuf_mmap,
>  	.vmap = drm_gem_dmabuf_vmap,
>  	.vunmap = drm_gem_dmabuf_vunmap,
> +	.begin_cpu_access = drm_gem_begin_cpu_access,
>  };
>  
>  /**
> -- 
> 2.8.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2] drm/vgem: Enable dmabuf interface for export
  2016-06-18 15:20 ` [PATCH 3/3] drm/vgem: Enable dmabuf interface for export Chris Wilson
@ 2016-06-19  6:18   ` Chris Wilson
  2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
  2016-06-20 20:06     ` [PATCH v3] drm/vgem: Enable dmabuf interface for export Chris Wilson
  0 siblings, 2 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-19  6:18 UTC (permalink / raw)
  To: dri-devel

Enable the standard GEM dma-buf interface provided by the DRM core, but
only for exporting the VGEM object. This allows passing around the VGEM
objects created from the dumb interface and using them as sources
elsewhere. Creating a VGEM object for a foriegn handle is not supported.

Testcase: igt/vgem_basic/dmabuf-mmap
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 101 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 100 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 4747b7f98e7a..32e2f51ed55f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -193,14 +193,113 @@ static const struct file_operations vgem_driver_fops = {
 	.release	= drm_release,
 };
 
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->filp)->i_mapping;
+	long n_pages = obj->size >> PAGE_SHIFT, i;
+	struct sg_table *st;
+	struct scatterlist *sg;
+
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (sg_alloc_table(st, n_pages, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg = st->sgl;
+	for (i = 0; i < n_pages; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page)) {
+			sg_mark_end(sg);
+			goto err_unwind;
+		}
+
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+		sg = sg_next(sg);
+	}
+
+	return st;
+
+err_unwind:
+	for (sg = st->sgl; sg; sg = sg_next(sg))
+		put_page(sg_page(sg));
+	sg_free_table(st);
+	kfree(st);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->filp)->i_mapping;
+	long n_pages = obj->size >> PAGE_SHIFT, i;
+	struct page **pages;
+	void *addr = NULL;
+
+	pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+	if (!pages)
+		return NULL;
+
+	for (i = 0; i < n_pages; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page))
+			goto out;
+	}
+
+	addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));
+out:
+	while (i--)
+		put_page(pages[i]);
+	drm_free_large(pages);
+
+	return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+			   struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->filp)
+		return -ENODEV;
+
+	ret = obj->filp->f_op->mmap(obj->filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->filp);
+
+	return 0;
+}
+
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
 	.fops				= &vgem_driver_fops,
+
 	.dumb_create			= vgem_gem_dumb_create,
 	.dumb_map_offset		= vgem_gem_dumb_map,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
+	.gem_prime_vmap = vgem_prime_vmap,
+	.gem_prime_vunmap = vgem_prime_vunmap,
+	.gem_prime_mmap = vgem_prime_mmap,
+
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
 	.date	= DRIVER_DATE,
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 1/3] dmabuf
  2016-06-19  6:18   ` [PATCH v2] " Chris Wilson
@ 2016-06-20 20:04     ` Chris Wilson
  2016-06-20 20:04       ` [PATCH v3 2/3] drm: Prevent NULL deref in drm_name_info() Chris Wilson
                         ` (2 more replies)
  2016-06-20 20:06     ` [PATCH v3] drm/vgem: Enable dmabuf interface for export Chris Wilson
  1 sibling, 3 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-20 20:04 UTC (permalink / raw)
  To: dri-devel

---
 drivers/gpu/drm/i915/i915_gem_dmabuf.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 49e7ff9840bd..c3f177231f6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -332,6 +332,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 	obj->base.import_attach = attach;
+	obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+	obj->base.write_domain = 0;
 
 	return &obj->base;
 
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 2/3] drm: Prevent NULL deref in drm_name_info()
  2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
@ 2016-06-20 20:04       ` Chris Wilson
  2016-06-20 20:04       ` [PATCH v3 3/3] meh Chris Wilson
  2016-06-20 20:06       ` [PATCH v3 1/3] dmabuf Chris Wilson
  2 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-20 20:04 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter

If a driver does not have a parent, or never sets the unique name for
itself, then we may proceed to chase a NULL dereference through
debugfs/.../name.

Testcase: igt/vgem_basic/debugfs
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/drm_info.c | 21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 0090d5987801..e2d2543d5bd0 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -51,17 +51,16 @@ int drm_name_info(struct seq_file *m, void *data)
 	struct drm_minor *minor = node->minor;
 	struct drm_device *dev = minor->dev;
 	struct drm_master *master = minor->master;
-	if (!master)
-		return 0;
-
-	if (master->unique) {
-		seq_printf(m, "%s %s %s\n",
-			   dev->driver->name,
-			   dev_name(dev->dev), master->unique);
-	} else {
-		seq_printf(m, "%s %s\n",
-			   dev->driver->name, dev_name(dev->dev));
-	}
+
+	seq_printf(m, "%s", dev->driver->name);
+	if (dev->dev)
+		seq_printf(m, " dev=%s", dev_name(dev->dev));
+	if (master && master->unique)
+		seq_printf(m, " master=%s", master->unique);
+	if (dev->unique)
+		seq_printf(m, " unique=%s", dev->unique);
+	seq_printf(m, "\n");
+
 	return 0;
 }
 
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 3/3] meh
  2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
  2016-06-20 20:04       ` [PATCH v3 2/3] drm: Prevent NULL deref in drm_name_info() Chris Wilson
@ 2016-06-20 20:04       ` Chris Wilson
  2016-06-20 20:06       ` [PATCH v3 1/3] dmabuf Chris Wilson
  2 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-20 20:04 UTC (permalink / raw)
  To: dri-devel

---
 drivers/gpu/drm/i915/i915_debugfs.c        | 162 +---------------
 drivers/gpu/drm/i915/i915_drv.h            |  84 ++++++---
 drivers/gpu/drm/i915/i915_gem.c            | 144 +++------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   3 +-
 drivers/gpu/drm/i915/i915_gem_request.c    | 287 +++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_request.h    |   5 +
 drivers/gpu/drm/i915/i915_gpu_error.c      |  60 +-----
 drivers/gpu/drm/i915/intel_lrc.c           |   4 +-
 drivers/gpu/drm/i915/intel_pm.c            |   3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   4 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  14 +-
 11 files changed, 251 insertions(+), 519 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6454e61f8ac3..3fe4f73916b5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -366,28 +366,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 			   stats.unbound); \
 } while (0)
 
-static void print_batch_pool_stats(struct seq_file *m,
-				   struct drm_i915_private *dev_priv)
-{
-	struct drm_i915_gem_object *obj;
-	struct file_stats stats;
-	struct intel_engine_cs *engine;
-	int j;
-
-	memset(&stats, 0, sizeof(stats));
-
-	for_each_engine(engine, dev_priv) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				per_file_stats(0, obj, &stats);
-		}
-	}
-
-	print_file_stats(m, "[k]batch pool", stats);
-}
-
 static int per_file_ctx_stats(int id, void *ptr, void *data)
 {
 	struct i915_gem_context *ctx = ptr;
@@ -545,7 +523,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
 
 	seq_putc(m, '\n');
-	print_batch_pool_stats(m, dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	mutex_lock(&dev->filelist_mutex);
@@ -655,10 +632,9 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 			if (work->flip_queued_req) {
 				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
 
-				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+				seq_printf(m, "Flip queued on %s at seqno %x, current breadcrumb %x, completed? %d\n",
 					   engine->name,
 					   i915_gem_request_get_seqno(work->flip_queued_req),
-					   dev_priv->next_seqno,
 					   intel_engine_get_seqno(engine),
 					   i915_gem_request_completed(work->flip_queued_req));
 			} else
@@ -688,99 +664,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	int total = 0;
-	int ret, j;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	for_each_engine(engine, dev_priv) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			int count;
-
-			count = 0;
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				count++;
-			seq_printf(m, "%s cache[%d]: %d objects\n",
-				   engine->name, j, count);
-
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link) {
-				seq_puts(m, "   ");
-				describe_obj(m, obj);
-				seq_putc(m, '\n');
-			}
-
-			total += count;
-		}
-	}
-
-	seq_printf(m, "total: %d\n", total);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_request *req;
-	int ret, any;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	any = 0;
-	for_each_engine(engine, dev_priv) {
-		int count;
-
-		count = 0;
-		list_for_each_entry(req, &engine->request_list, link)
-			count++;
-		if (count == 0)
-			continue;
-
-		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->request_list, link) {
-			struct pid *pid = req->ctx->pid;
-			struct task_struct *task;
-
-			rcu_read_lock();
-			task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
-			seq_printf(m, "    %x @ %d: %s [%d]\n",
-				   req->fence.seqno,
-				   (int) (jiffies - req->emitted_jiffies),
-				   task ? task->comm : "<unknown>",
-				   task ? task->pid : -1);
-			rcu_read_unlock();
-		}
-
-		any++;
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	if (any == 0)
-		seq_puts(m, "No requests\n");
-
-	return 0;
-}
-
 static void i915_ring_seqno_info(struct seq_file *m,
 				 struct intel_engine_cs *engine)
 {
@@ -1150,43 +1033,6 @@ static const struct file_operations i915_error_state_fops = {
 	.release = i915_error_state_release,
 };
 
-static int
-i915_next_seqno_get(void *data, u64 *val)
-{
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	*val = dev_priv->next_seqno;
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
-	struct drm_device *dev = data;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	ret = i915_gem_set_seqno(dev, val);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
-			i915_next_seqno_get, i915_next_seqno_set,
-			"0x%llx\n");
-
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -2349,8 +2195,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 	struct drm_file *file;
 
 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
-	seq_printf(m, "GPU busy? %s [%x]\n",
-		   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
 	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
 		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2688,7 +2532,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 	if (!HAS_RUNTIME_PM(dev_priv))
 		seq_puts(m, "Runtime power management not supported\n");
 
-	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
 	seq_printf(m, "IRQs disabled: %s\n",
 		   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
@@ -5269,7 +5112,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
 	{"i915_gem_stolen", i915_gem_stolen_list_info },
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
-	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
@@ -5277,7 +5119,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
-	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
 	{"i915_guc_info", i915_guc_info, 0},
 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
@@ -5329,7 +5170,6 @@ static const struct i915_debugfs_files {
 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 	{"i915_error_state", &i915_error_state_fops},
-	{"i915_next_seqno", &i915_next_seqno_fops},
 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f88133e5d725..ba5007653f00 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -856,6 +856,58 @@ struct i915_ctx_hang_stats {
 	bool banned;
 };
 
+struct i915_vma;
+
+struct i915_timeline {
+	u32 id;
+	u32 next_seqno;
+
+	struct mutex mutex;
+	struct list_head link;
+	struct drm_i915_private *i915;
+
+	struct i915_timeline_engine {
+		u32 mask;
+
+		struct i915_timeline *timeline;
+		struct list_head requests;
+
+		u32 last_submitted_seqno;
+
+		/*
+		 * A pool of objects to use as shadow copies of client batch
+		 * buffers when the command parser is enabled. Prevents the
+		 * client from modifying the batch contents after software
+		 * parsing.
+		 */
+		struct i915_gem_batch_pool batch_pool;
+	} engine[I915_NUM_ENGINES];
+
+	struct {
+		struct i915_vma *vma;
+		uint32_t *map;
+	} hws;
+
+	/**
+	 * Is the GPU currently considered idle, or busy executing
+	 * userspace requests? Whilst idle, we allow runtime power
+	 * management to power down the hardware and display clocks.
+	 * In order to reduce the effect on performance, there
+	 * is a slight delay before we do so.
+	 */
+	unsigned active_engines;
+
+	/**
+	 * We leave the user IRQ off as much as possible,
+	 * but this means that requests will finish and never
+	 * be retired once the system goes idle. Set a timer to
+	 * fire periodically while the ring is running. When it
+	 * fires, go retire requests.
+	 */
+	struct delayed_work retire_work;
+};
+
+
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
 
@@ -1778,8 +1830,8 @@ struct drm_i915_private {
 	struct pci_dev *bridge_dev;
 	struct i915_gem_context *kernel_context;
 	struct intel_engine_cs engine[I915_NUM_ENGINES];
+	struct i915_timeline kernel_timeline;
 	struct i915_vma *semaphore_vma;
-	uint32_t next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
@@ -2030,33 +2082,9 @@ struct drm_i915_private {
 		int (*init_engines)(struct drm_device *dev);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-		/**
-		 * Is the GPU currently considered idle, or busy executing
-		 * userspace requests? Whilst idle, we allow runtime power
-		 * management to power down the hardware and display clocks.
-		 * In order to reduce the effect on performance, there
-		 * is a slight delay before we do so.
-		 */
-		unsigned active_engines;
-		bool awake;
+		struct list_head timelines;
 
-		/**
-		 * We leave the user IRQ off as much as possible,
-		 * but this means that requests will finish and never
-		 * be retired once the system goes idle. Set a timer to
-		 * fire periodically while the ring is running. When it
-		 * fires, go retire requests.
-		 */
-		struct delayed_work retire_work;
-
-		/**
-		 * When we detect an idle GPU, we want to turn on
-		 * powersaving features. So once we see that there
-		 * are no more requests outstanding and no more
-		 * arrive within a small period of time, we fire
-		 * off the idle_work.
-		 */
-		struct delayed_work idle_work;
+		atomic_t active;
 	} gt;
 
 	/* perform PHY state sanity checks? */
@@ -3207,7 +3235,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
+i915_gem_find_active_request(struct i915_timeline_engine *te);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fc46ee10e5cd..4b4a8e5833a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2410,7 +2410,7 @@ static void i915_set_reset_status(struct i915_gem_context *ctx,
 }
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
+i915_gem_find_active_request(struct i915_timeline_engine *te)
 {
 	struct drm_i915_gem_request *request;
 
@@ -2422,7 +2422,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	list_for_each_entry(request, &engine->request_list, link) {
+	list_for_each_entry(request, &te->requests, link) {
 		if (i915_gem_request_completed(request))
 			continue;
 
@@ -2432,35 +2432,37 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void i915_gem_reset_timeline_status(struct i915_timeline *timeline)
 {
-	struct drm_i915_gem_request *request;
+	int i;
 
-	request = i915_gem_find_active_request(engine);
-	if (request == NULL)
-		return;
+	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+		struct i915_timeline_engine *te = &timeline->engine[i];
+		struct drm_i915_gem_request *request;
+
+		if (list_empty(&te->requests))
+			continue;
+
+		request = i915_gem_find_active_request(te);
+		if (request) {
+			i915_set_reset_status(request->ctx,
+					      i915_gem_request_started(request));
+			list_for_each_entry_continue(request, &te->requests, link)
+				i915_set_reset_status(request->ctx, false);
+		}
 
-	i915_set_reset_status(request->ctx,
-			      i915_gem_request_started(request));
-	list_for_each_entry_continue(request, &engine->request_list, link)
-		i915_set_reset_status(request->ctx, false);
+		request = list_last_entry(&te->requests,
+					  struct drm_i915_gem_request,
+					  link);
+		i915_gem_request_retire_upto(request);
+		//intel_engine_init_seqno(te, te->last_submitted_seqno);
+	}
 }
 
 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_request *request;
 	struct intel_ring *ring;
 
-	request = i915_gem_active_peek(&engine->last_request,
-				       &engine->i915->dev->struct_mutex);
-
-	/* Mark all pending requests as complete so that any concurrent
-	 * (lockless) lookup doesn't try and wait upon the request as we
-	 * reset it.
-	 */
-	if (request)
-		intel_engine_init_seqno(engine, request->fence.seqno);
-
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
 	 * are the ones that keep the context and ringbuffer backing objects
@@ -2478,17 +2480,6 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 					NULL);
 	}
 
-	/*
-	 * We must free the requests after all the corresponding objects have
-	 * been moved off active lists. Which is the same order as the normal
-	 * retire_requests function does. This is important if object hold
-	 * implicit references on things like e.g. ppgtt address spaces through
-	 * the request.
-	 */
-	if (request)
-		i915_gem_request_retire_upto(request);
-	GEM_BUG_ON(intel_engine_is_active(engine));
-
 	/* Having flushed all requests from all queues, we know that all
 	 * ringbuffers must now be empty. However, since we do not reclaim
 	 * all space when retiring the request (to prevent HEADs colliding
@@ -2506,14 +2497,15 @@ void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *engine;
+	struct i915_timeline *timeline;
 
 	/*
 	 * Before we free the objects from the requests, we need to inspect
 	 * them for finding the guilty party. As the requests only borrow
 	 * their reference to the objects, the inspection must be done first.
 	 */
-	for_each_engine(engine, dev_priv)
-		i915_gem_reset_engine_status(engine);
+	list_for_each_entry(timeline, &dev_priv->gt.timelines, link)
+		i915_gem_reset_timeline_status(timeline);
 
 	for_each_engine(engine, dev_priv)
 		i915_gem_reset_engine_cleanup(engine);
@@ -2523,57 +2515,6 @@ void i915_gem_reset(struct drm_device *dev)
 	i915_gem_restore_fences(dev);
 }
 
-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.retire_work.work);
-	struct drm_device *dev = dev_priv->dev;
-
-	/* Come back later if the device is busy... */
-	if (mutex_trylock(&dev->struct_mutex)) {
-		i915_gem_retire_requests(dev_priv);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	/* Keep the retire handler running until we are finally idle.
-	 * We do not need to do this test under locking as in the worst-case
-	 * we queue the retire worker once too often.
-	 */
-	if (READ_ONCE(dev_priv->gt.awake))
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.retire_work,
-				   round_jiffies_up_relative(HZ));
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
-	struct drm_device *dev = dev_priv->dev;
-	struct intel_engine_cs *engine;
-
-	if (!READ_ONCE(dev_priv->gt.awake))
-		return;
-
-	mutex_lock(&dev->struct_mutex);
-	if (dev_priv->gt.active_engines)
-		goto out;
-
-	for_each_engine(engine, dev_priv)
-		i915_gem_batch_pool_fini(&engine->batch_pool);
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-	dev_priv->gt.awake = false;
-
-	if (INTEL_INFO(dev_priv)->gen >= 6)
-		gen6_rps_idle(dev_priv);
-	intel_runtime_pm_put(dev_priv);
-out:
-	mutex_unlock(&dev->struct_mutex);
-}
-
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -4128,22 +4069,10 @@ i915_gem_suspend(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
-	cancel_delayed_work_sync(&dev_priv->gt.idle_work);
+	//cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+	//flush_delayed_work(&dev_priv->gt.idle_work);
 	flush_work(&dev_priv->mm.free_work);
 
-	mutex_lock(&dev_priv->dev->struct_mutex);
-
-	/* Assert that we sucessfully flushed all the work and
-	 * reset the GPU back to its idle, low power state.
-	 */
-	if (dev_priv->gt.awake) {
-		if (INTEL_INFO(dev_priv)->gen >= 6)
-			gen6_rps_idle(dev_priv);
-		intel_runtime_pm_put(dev_priv);
-		dev_priv->gt.awake = false;
-	}
-
 	i915_gem_context_lost(dev_priv);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
@@ -4383,13 +4312,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
 		dev_priv->gt.cleanup_engine(engine);
 }
 
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
-	/* Early initialisation so that core GEM works during engine setup */
-	INIT_LIST_HEAD(&engine->request_list);
-}
-
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
@@ -4425,7 +4347,6 @@ void
 i915_gem_load_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
 
 	dev_priv->objects =
 		kmem_cache_create("i915_gem_object",
@@ -4451,12 +4372,7 @@ i915_gem_load_init(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	for (i = 0; i < I915_NUM_ENGINES; i++)
-		init_engine_lists(&dev_priv->engine[i]);
-	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
-			  i915_gem_retire_work_handler);
-	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
-			  i915_gem_idle_work_handler);
+	INIT_LIST_HEAD(&dev_priv->gt.timelines);
 	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eff9c81c8f3..4fdf995b9808 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -60,6 +60,7 @@ struct i915_execbuffer {
 	struct drm_i915_gem_exec_object2 *exec;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
+	struct i915_timeline_engine *timeline;
 	struct i915_address_space *vm;
 	struct i915_vma *batch_vma;
 	struct drm_i915_gem_request *request;
@@ -1560,7 +1561,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 	struct i915_vma *vma;
 	int ret;
 
-	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
+	shadow_batch_obj = i915_gem_batch_pool_get(&eb->timeline->batch_pool,
 						   PAGE_ALIGN(eb->args->batch_len));
 	if (IS_ERR(shadow_batch_obj))
 		return ERR_CAST(shadow_batch_obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 43b5d4fa480a..5e6b9116e098 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -26,6 +26,125 @@
 
 #include "i915_drv.h"
 
+static void i915_gem_request_retire(struct drm_i915_gem_request *request);
+
+static bool
+i915_timeline_engine_retire(struct i915_timeline_engine *te)
+{
+	struct drm_i915_gem_request *request, *next;
+
+	list_for_each_entry_safe(request, next, &te->requests, link) {
+		if (!i915_gem_request_completed(request))
+			return false;
+
+		i915_gem_request_retire(request);
+	}
+	return true;
+}
+
+static bool i915_timeline_retire_requests(struct i915_timeline *tl)
+{
+	int i;
+
+	if (READ_ONCE(tl->active_engines) == 0)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+		if (i915_timeline_engine_retire(&tl->engine[i]))
+			tl->active_engines &= ~(1 << i);
+	}
+
+	if (tl->active_engines == 0 &&
+	    atomic_dec_and_test(&tl->i915->gt.active)) {
+		if (INTEL_GEN(tl->i915) >= 6)
+			gen6_rps_idle(tl->i915);
+		intel_runtime_pm_put(tl->i915);
+	}
+
+	return true;
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+	struct i915_timeline *timeline;
+
+	lockdep_assert_held(&dev_priv->dev->struct_mutex);
+	list_for_each_entry(timeline, &dev_priv->gt.timelines, link)
+		i915_timeline_retire_requests(timeline);
+}
+
+static void
+i915_timeline_retire_work(struct work_struct *work)
+{
+	struct i915_timeline *tl =
+		container_of(work, typeof(*tl), retire_work.work);
+
+	/* Keep the retire handler running until we are finally idle.
+	 * We do not need to do this test under locking as in the worst-case
+	 * we queue the retire worker once too often.
+	 */
+	if (i915_timeline_retire_requests(tl))
+		queue_delayed_work(tl->i915->wq, &tl->retire_work,
+				   round_jiffies_up_relative(HZ));
+}
+
+static int i915_timeline_init_seqno(struct i915_timeline *tl, u32 seqno)
+{
+	struct intel_engine_cs *engine;
+	int ret;
+
+	if (tl->id)
+		return 0;
+
+	/* Carefully retire all requests without writing to the rings */
+	for_each_engine(engine, tl->i915) {
+		ret = intel_engine_idle(engine);
+		if (ret)
+			return ret;
+	}
+	i915_timeline_retire_requests(tl);
+
+	/* Finally reset hw state */
+	for_each_engine(engine, tl->i915)
+		intel_engine_init_seqno(engine, seqno);
+
+	return 0;
+}
+
+static int
+i915_timeline_get_seqno(struct i915_timeline *tl, u32 *seqno)
+{
+	/* reserve 0 for non-seqno */
+	if (unlikely(tl->next_seqno == 0)) {
+		int ret = i915_timeline_init_seqno(tl, 0);
+		if (ret)
+			return ret;
+
+		tl->next_seqno = 2;
+	}
+
+	/* Each request uses a start / stop sequence */
+	GEM_BUG_ON(tl->next_seqno & 1);
+	*seqno = tl->next_seqno;
+	tl->next_seqno += 2;
+	return 0;
+}
+
+void i915_timeline_init(struct drm_i915_private *i915,
+			struct i915_timeline *tl)
+{
+	int i;
+
+	tl->i915 = i915;
+	list_add(&tl->link, &i915->gt.timelines);
+	tl->next_seqno = 2;
+
+	for (i = 0; i < ARRAY_SIZE(tl->engine); i++)
+		INIT_LIST_HEAD(&tl->engine[i].requests);
+
+	INIT_DELAYED_WORK(&tl->retire_work, i915_timeline_retire_work);
+}
+
 static inline struct drm_i915_gem_request *
 to_i915_request(struct fence *fence)
 {
@@ -183,16 +302,14 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 
 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct i915_timeline_engine *te = req->timeline;
 	struct drm_i915_gem_request *tmp;
 
 	lockdep_assert_held(&req->i915->dev->struct_mutex);
 	GEM_BUG_ON(list_empty(&req->link));
 
 	do {
-		tmp = list_first_entry(&engine->request_list,
-				       typeof(*tmp), link);
-
+		tmp = list_first_entry(&te->requests, typeof(*tmp), link);
 		i915_gem_request_retire(tmp);
 	} while (tmp != req);
 }
@@ -214,71 +331,6 @@ static int i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
 	return 0;
 }
 
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
-{
-	struct intel_engine_cs *engine;
-	int ret;
-
-	/* Carefully retire all requests without writing to the rings */
-	for_each_engine(engine, dev_priv) {
-		ret = intel_engine_idle(engine);
-		if (ret)
-			return ret;
-	}
-	i915_gem_retire_requests(dev_priv);
-
-	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-	if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-		while (intel_kick_waiters(dev_priv) ||
-		       intel_kick_signalers(dev_priv))
-			yield();
-	}
-
-	/* Finally reset hw state */
-	for_each_engine(engine, dev_priv)
-		intel_engine_init_seqno(engine, seqno);
-
-	return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
-	seqno = (seqno + 1) & ~1;
-	if (seqno == 0)
-		return -EINVAL;
-
-	/* HWS page needs to be set less than what we
-	 * will inject to ring
-	 */
-	ret = i915_gem_init_seqno(dev_priv, seqno - 2);
-	if (ret)
-		return ret;
-
-	dev_priv->next_seqno = seqno;
-	return 0;
-}
-
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
-{
-	/* reserve 0 for non-seqno */
-	if (unlikely(dev_priv->next_seqno == 0)) {
-		int ret = i915_gem_init_seqno(dev_priv, 0);
-		if (ret)
-			return ret;
-
-		dev_priv->next_seqno = 2;
-	}
-
-	/* Each request uses a start / stop sequence */
-	GEM_BUG_ON(dev_priv->next_seqno & 1);
-	*seqno = dev_priv->next_seqno;
-	dev_priv->next_seqno += 2;
-	return 0;
-}
-
 static void __kfence_call submit_notify(struct kfence *fence)
 {
 	struct drm_i915_gem_request *request =
@@ -311,6 +363,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
 	struct drm_i915_gem_request *req, *prev;
+	struct i915_timeline *tl = &dev_priv->kernel_timeline;
+	struct i915_timeline_engine *te = &tl->engine[engine->id];
 	u32 seqno;
 	int ret;
 
@@ -322,9 +376,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (!list_empty(&engine->request_list)) {
-		req = list_first_entry(&engine->request_list,
-				       typeof(*req), link);
+	if (!list_empty(&te->requests)) {
+		req = list_first_entry(&te->requests, typeof(*req), link);
 		if (i915_gem_request_completed(req))
 			i915_gem_request_retire(req);
 	}
@@ -333,7 +386,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (req == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_gem_get_seqno(dev_priv, &seqno);
+	ret = i915_timeline_get_seqno(tl, &seqno);
 	if (ret)
 		goto err;
 
@@ -349,6 +402,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	req->i915 = dev_priv;
 	req->file_priv = NULL;
 	req->engine = engine;
+	req->timeline = te;
 	req->signaling.wait.tsk = NULL;
 	req->reset_counter = reset_counter;
 	req->ctx = ctx;
@@ -398,24 +452,28 @@ err:
 	return ERR_PTR(ret);
 }
 
-static void i915_gem_mark_busy(struct drm_i915_private *dev_priv,
-			       const struct intel_engine_cs *engine)
+static void i915_gem_mark_busy(struct drm_i915_gem_request *req)
 {
-	dev_priv->gt.active_engines |= intel_engine_flag(engine);
-	if (dev_priv->gt.awake)
-		return;
+	struct i915_timeline *timeline = req->timeline->timeline;
 
-	intel_runtime_pm_get_noresume(dev_priv);
-	dev_priv->gt.awake = true;
+	if (timeline->active_engines & req->timeline->mask)
+		return;
 
-	intel_enable_gt_powersave(dev_priv);
-	i915_update_gfx_val(dev_priv);
-	if (INTEL_INFO(dev_priv)->gen >= 6)
-		gen6_rps_busy(dev_priv);
+	if (timeline->active_engines == 0) {
+		queue_delayed_work(req->i915->wq,
+				   &timeline->retire_work,
+				   round_jiffies_up_relative(HZ));
+
+		if (atomic_inc_return(&req->i915->gt.active) == 1) {
+			intel_runtime_pm_get_noresume(req->i915);
+			intel_enable_gt_powersave(req->i915);
+			i915_update_gfx_val(req->i915);
+			if (INTEL_GEN(req->i915) >= 6)
+				gen6_rps_busy(req->i915);
+		}
+	}
 
-	queue_delayed_work(dev_priv->wq,
-			   &dev_priv->gt.retire_work,
-			   round_jiffies_up_relative(HZ));
+	timeline->active_engines |= req->timeline->mask;
 }
 
 static void i915_gem_request_cancel(struct drm_i915_gem_request *request)
@@ -485,7 +543,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 */
 	request->emitted_jiffies = jiffies;
 	i915_gem_active_set(&engine->last_request, request);
-	list_add_tail(&request->link, &engine->request_list);
+	list_add_tail(&request->link, &request->timeline->requests);
 
 	/* Record the position of the start of the breadcrumb so that
 	 * should we detect the updated seqno part-way through the
@@ -505,7 +563,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 		  "for adding the request (%d bytes)\n",
 		  reserved_tail, ret);
 
-	i915_gem_mark_busy(request->i915, engine);
+	i915_gem_mark_busy(request);
 
 	kfence_signal(&request->submit);
 	kfence_put(&request->submit);
@@ -638,7 +696,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 	 * forcing the clocks too high for the whole system, we only allow
 	 * each client to waitboost once in a busy period.
 	 */
-	if (!IS_ERR(rps) && INTEL_INFO(req->i915)->gen >= 6)
+	if (!IS_ERR(rps) && INTEL_GEN(req->i915) >= 6)
 		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
 
 	/* Optimistic spin for the next ~jiffie before touching IRQs */
@@ -731,7 +789,6 @@ complete:
 
 static int wait_for_space(struct intel_ring *ring, int bytes)
 {
-	struct intel_engine_cs *engine = ring->engine;
 	struct drm_i915_gem_request *target;
 	int ret;
 
@@ -739,17 +796,9 @@ static int wait_for_space(struct intel_ring *ring, int bytes)
 	if (ring->space >= bytes)
 		return 0;
 
-	list_for_each_entry(target, &engine->request_list, link) {
+	list_for_each_entry(target, &ring->timeline->requests, link) {
 		unsigned space;
 
-		/*
-		 * The request queue is per-engine, so can contain requests
-		 * from multiple ring. Here, we must ignore any that
-		 * aren't from the ring we're considering.
-		 */
-		if (target->ring != ring)
-			continue;
-
 		/* Would completion of this request free enough space? */
 		space = __intel_ring_space(target->postfix,
 					   ring->tail, ring->size);
@@ -757,7 +806,7 @@ static int wait_for_space(struct intel_ring *ring, int bytes)
 			break;
 	}
 
-	if (WARN_ON(&target->link == &engine->request_list))
+	if (WARN_ON(&target->link == &ring->timeline->requests))
 		return -ENOSPC;
 
 	ret = __i915_wait_request(target, true, NULL, NULL);
@@ -862,39 +911,3 @@ int i915_gem_request_align(struct drm_i915_gem_request *req)
 	memset(out, 0, bytes);
 	return 0;
 }
-
-static bool i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
-{
-	struct drm_i915_gem_request *request, *next;
-
-	list_for_each_entry_safe(request, next, &engine->request_list, link) {
-		if (!i915_gem_request_completed(request))
-			return false;
-
-		i915_gem_request_retire(request);
-	}
-
-	return true;
-}
-
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-
-	lockdep_assert_held(&dev_priv->dev->struct_mutex);
-
-	if (dev_priv->gt.active_engines == 0)
-		return;
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-
-	for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines) {
-		if (i915_gem_retire_requests_ring(engine))
-			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-	}
-
-	if (dev_priv->gt.active_engines == 0)
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.idle_work,
-				   msecs_to_jiffies(100));
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 5bf350183883..4933a9c7886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -40,6 +40,9 @@ struct intel_signal_node {
 	struct intel_wait wait;
 };
 
+struct i915_timeline;
+struct i915_timeline_engine;
+
 /**
  * Request queue structure.
  *
@@ -75,6 +78,8 @@ struct drm_i915_gem_request {
 	struct intel_ring *ring;
 	struct intel_signal_node signaling;
 
+	struct i915_timeline_engine *timeline;
+
 	unsigned reset_counter;
 	struct kfence submit;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 05a5fb328c13..b7dfcc74223e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -978,61 +978,6 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void engine_record_requests(struct intel_engine_cs *engine,
-				   struct drm_i915_gem_request *first,
-				   struct drm_i915_error_ring *ering)
-{
-	struct drm_i915_gem_request *request;
-	int count;
-
-	count = 0;
-	request = first;
-	list_for_each_entry_from(request, &engine->request_list, link)
-		count++;
-
-	ering->requests = NULL;
-	kcalloc(count, sizeof(*ering->requests),
-		GFP_ATOMIC);
-	if (ering->requests == NULL)
-		return;
-	ering->num_requests = count;
-
-	count = 0;
-	request = first;
-	list_for_each_entry_from(request, &engine->request_list, link) {
-		struct drm_i915_error_request *erq;
-
-		if (count >= ering->num_requests) {
-			/*
-			 * If the ring request list was changed in
-			 * between the point where the error request
-			 * list was created and dimensioned and this
-			 * point then just exit early to avoid crashes.
-			 *
-			 * We don't need to communicate that the
-			 * request list changed state during error
-			 * state capture and that the error state is
-			 * slightly incorrect as a consequence since we
-			 * are typically only interested in the request
-			 * list state at the point of error state
-			 * capture, not in any changes happening during
-			 * the capture.
-			 */
-			break;
-		}
-
-		erq = &ering->requests[count++];
-		erq->seqno = request->fence.seqno;
-		erq->jiffies = request->emitted_jiffies;
-		erq->head = request->head;
-		erq->tail = request->tail;
-
-		rcu_read_lock();
-		erq->pid = request->ctx ? pid_nr(request->ctx->pid) : 0;
-		rcu_read_unlock();
-	}
-}
-
 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				  struct drm_i915_error_state *error)
 {
@@ -1054,7 +999,8 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		i915_record_ring_state(dev_priv, error, engine, ering);
 		engine_record_waiters(engine, ering);
 
-		request = i915_gem_find_active_request(engine);
+		//request = i915_gem_find_active_request(engine);
+		request = NULL;
 		if (request) {
 			struct i915_address_space *vm;
 			struct intel_ring *ring;
@@ -1102,8 +1048,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			ering->cpu_ring_tail = ring->tail;
 			ering->ringbuffer =
 				i915_error_object_create(dev_priv, ring->vma);
-
-			engine_record_requests(engine, request, ering);
 		}
 
 		ering->hws_page =
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 54895b16c764..0bfa0654b0a4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1563,7 +1563,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 
 	i915_cmd_parser_fini_ring(engine);
 	i915_gem_render_state_fini(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
+	//i915_gem_batch_pool_fini(&engine->batch_pool);
 
 	intel_engine_fini_breadcrumbs(engine);
 
@@ -1708,7 +1708,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 	logical_ring_default_irqs(engine, info->irq_shift);
 
 	intel_engine_init_requests(engine);
-	i915_gem_batch_pool_init(engine, &engine->batch_pool);
+	//i915_gem_batch_pool_init(engine, &engine->batch_pool);
 	i915_cmd_parser_init_ring(engine);
 
 	return engine;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6f4e298a1aa1..17ff67ba8e45 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4883,8 +4883,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
 	/* This is intentionally racy! We peek at the state here, then
 	 * validate inside the RPS worker.
 	 */
-	if (!(dev_priv->gt.awake &&
-	      dev_priv->rps.enabled &&
+	if (!(dev_priv->rps.enabled &&
 	      dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
 		return;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8fcbd59d4f36..329c7c9b971d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2039,7 +2039,7 @@ static int intel_init_engine(struct drm_device *dev,
 	engine->fence_context = fence_context_alloc(1);
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	INIT_LIST_HEAD(&engine->buffers);
-	i915_gem_batch_pool_init(engine, &engine->batch_pool);
+	//i915_gem_batch_pool_init(engine, &engine->batch_pool);
 	memset(engine->semaphore.sync_seqno, 0,
 	       sizeof(engine->semaphore.sync_seqno));
 
@@ -2138,7 +2138,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 
 	i915_gem_render_state_fini(engine);
 	i915_cmd_parser_fini_ring(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
 	intel_engine_fini_breadcrumbs(engine);
 
 	intel_ring_context_unpin(dev_priv->kernel_context, engine);
@@ -2155,7 +2154,6 @@ intel_engine_retire(struct i915_gem_active *active,
 void intel_engine_init_requests(struct intel_engine_cs *engine)
 {
 	init_request_active(&engine->last_request, intel_engine_retire);
-	INIT_LIST_HEAD(&engine->request_list);
 }
 
 int intel_engine_idle(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bdb4f15024d1..29e156064e21 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -77,6 +77,7 @@ struct intel_ring {
 	void *vaddr;
 
 	struct intel_engine_cs *engine;
+	struct i915_timeline_engine *timeline;
 	struct list_head link;
 
 	u32 head;
@@ -175,13 +176,6 @@ struct intel_engine_cs {
 		bool rpm_wakelock;
 	} breadcrumbs;
 
-	/*
-	 * A pool of objects to use as shadow copies of client batch buffers
-	 * when the command parser is enabled. Prevents the client from
-	 * modifying the batch contents after software parsing.
-	 */
-	struct i915_gem_batch_pool batch_pool;
-
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
 
@@ -287,12 +281,6 @@ struct intel_engine_cs {
 	bool preempt_wa;
 	u32 ctx_desc_template;
 
-	/**
-	 * List of breadcrumbs associated with GPU requests currently
-	 * outstanding.
-	 */
-	struct list_head request_list;
-
 	/* An RCU guarded pointer to the last request. No reference is
 	 * held to the request, users must carefully acquire a reference to
 	 * the request using i915_gem_active_get_request_rcu(), or hold the
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 1/3] dmabuf
  2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
  2016-06-20 20:04       ` [PATCH v3 2/3] drm: Prevent NULL deref in drm_name_info() Chris Wilson
  2016-06-20 20:04       ` [PATCH v3 3/3] meh Chris Wilson
@ 2016-06-20 20:06       ` Chris Wilson
  2 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-20 20:06 UTC (permalink / raw)
  To: dri-devel

On Mon, Jun 20, 2016 at 09:04:32PM +0100, Chris Wilson wrote:

Eek, appologies. This was meant to git-send-email -v3 but mangled into
-3 instead.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v3] drm/vgem: Enable dmabuf interface for export
  2016-06-19  6:18   ` [PATCH v2] " Chris Wilson
  2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
@ 2016-06-20 20:06     ` Chris Wilson
  2016-06-27 17:57       ` Zach Reizner
  1 sibling, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2016-06-20 20:06 UTC (permalink / raw)
  To: dri-devel; +Cc: Zach Reizner

Enable the standard GEM dma-buf interface provided by the DRM core, but
only for exporting the VGEM object. This allows passing around the VGEM
objects created from the dumb interface and using them as sources
elsewhere. Creating a VGEM object for a foriegn handle is not supported.

v2: With additional completeness.
v3: Need to clear the CPU cache upon exporting the dma-addresses.

Testcase: igt/vgem_basic/dmabuf-*
Testcase: igt/prime_vgem
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 112 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 111 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index e1a697d0662f..db48e837992d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -193,14 +193,124 @@ static const struct file_operations vgem_driver_fops = {
 	.release	= drm_release,
 };
 
+static void __put_pages(struct page **pages, long n_pages)
+{
+	while (n_pages--)
+		put_page(pages[n_pages]);
+	drm_free_large(pages);
+}
+
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages;
+
+	/* Flush the object from the CPU cache so that importers
+	 * can rely on coherent indirect access via access the
+	 * exported dma-address.
+	 */
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
+	drm_clflush_pages(pages, n_pages);
+	__put_pages(pages, n_pages);
+
+	return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct sg_table *st;
+	struct page **pages;
+	int ret;
+
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto err;
+	}
+
+	ret = sg_alloc_table_from_pages(st, pages, n_pages,
+					0, obj->size, GFP_KERNEL);
+	__put_pages(pages, n_pages);
+	if (ret)
+		goto err;
+
+	return st;
+
+err:
+	kfree(st);
+	return ERR_PTR(ret);
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages;
+	void *addr;
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages))
+		return NULL;
+
+	addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));
+	__put_pages(pages, n_pages);
+
+	return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+			   struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->filp)
+		return -ENODEV;
+
+	ret = obj->filp->f_op->mmap(obj->filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->filp);
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	return 0;
+}
+
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
 	.fops				= &vgem_driver_fops,
+
 	.dumb_create			= vgem_gem_dumb_create,
 	.dumb_map_offset		= vgem_gem_dumb_map,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_pin = vgem_prime_pin,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
+	.gem_prime_vmap = vgem_prime_vmap,
+	.gem_prime_vunmap = vgem_prime_vunmap,
+	.gem_prime_mmap = vgem_prime_mmap,
+
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
 	.date	= DRIVER_DATE,
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] drm/vgem: Fix mmaping
  2016-06-18 15:20 ` [PATCH 2/3] drm/vgem: Fix mmaping Chris Wilson
@ 2016-06-21 21:04   ` Chris Wilson
  0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2016-06-21 21:04 UTC (permalink / raw)
  To: dri-devel

On Sat, Jun 18, 2016 at 04:20:48PM +0100, Chris Wilson wrote:
> The vGEM mmap code has bitrotted slightly and now immediately BUGs.
> Since vGEM was last updated, there are new core GEM facilities to
> provide more common functions, so let's use those here.
 
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=96603
> Testcase: igt/vgem_basic/mmap
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by:  Humberto Israel Perez Rodriguez <humberto.i.perez.rodriguez@intel.com>
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3] drm/vgem: Enable dmabuf interface for export
  2016-06-20 20:06     ` [PATCH v3] drm/vgem: Enable dmabuf interface for export Chris Wilson
@ 2016-06-27 17:57       ` Zach Reizner
  0 siblings, 0 replies; 12+ messages in thread
From: Zach Reizner @ 2016-06-27 17:57 UTC (permalink / raw)
  To: Chris Wilson, dri-devel


[-- Attachment #1.1: Type: text/plain, Size: 5007 bytes --]

On Mon, Jun 20, 2016 at 1:07 PM Chris Wilson <chris@chris-wilson.co.uk>
wrote:

> Enable the standard GEM dma-buf interface provided by the DRM core, but
> only for exporting the VGEM object. This allows passing around the VGEM
> objects created from the dumb interface and using them as sources
> elsewhere. Creating a VGEM object for a foriegn handle is not supported.
>
> v2: With additional completeness.
> v3: Need to clear the CPU cache upon exporting the dma-addresses.
>
> Testcase: igt/vgem_basic/dmabuf-*
> Testcase: igt/prime_vgem
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Zach Reizner <zachr@google.com>
> ---
>  drivers/gpu/drm/vgem/vgem_drv.c | 112
> +++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 111 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c
> b/drivers/gpu/drm/vgem/vgem_drv.c
> index e1a697d0662f..db48e837992d 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -193,14 +193,124 @@ static const struct file_operations
> vgem_driver_fops = {
>         .release        = drm_release,
>  };
>
> +static void __put_pages(struct page **pages, long n_pages)
> +{
> +       while (n_pages--)
> +               put_page(pages[n_pages]);
> +       drm_free_large(pages);
> +}
> +
> +static int vgem_prime_pin(struct drm_gem_object *obj)
> +{
> +       long n_pages = obj->size >> PAGE_SHIFT;
> +       struct page **pages;
> +
> +       /* Flush the object from the CPU cache so that importers
> +        * can rely on coherent indirect access via access the
> +        * exported dma-address.
> +        */
> +       pages = drm_gem_get_pages(obj);
> +       if (IS_ERR(pages))
> +               return PTR_ERR(pages);
> +
> +       drm_clflush_pages(pages, n_pages);
> +       __put_pages(pages, n_pages);
> +
> +       return 0;
> +}
> +
> +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object
> *obj)
> +{
> +       long n_pages = obj->size >> PAGE_SHIFT;
> +       struct sg_table *st;
> +       struct page **pages;
> +       int ret;
> +
> +       st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> +       if (st == NULL)
> +               return ERR_PTR(-ENOMEM);
> +
> +       pages = drm_gem_get_pages(obj);
> +       if (IS_ERR(pages)) {
> +               ret = PTR_ERR(pages);
> +               goto err;
> +       }
> +
> +       ret = sg_alloc_table_from_pages(st, pages, n_pages,
> +                                       0, obj->size, GFP_KERNEL);
> +       __put_pages(pages, n_pages);
> +       if (ret)
> +               goto err;
> +
> +       return st;
> +
> +err:
> +       kfree(st);
> +       return ERR_PTR(ret);
> +}
> +
> +static void *vgem_prime_vmap(struct drm_gem_object *obj)
> +{
> +       long n_pages = obj->size >> PAGE_SHIFT;
> +       struct page **pages;
> +       void *addr;
> +
> +       pages = drm_gem_get_pages(obj);
> +       if (IS_ERR(pages))
> +               return NULL;
> +
> +       addr = vmap(pages, n_pages, 0,
> pgprot_writecombine(PAGE_KERNEL_IO));
> +       __put_pages(pages, n_pages);
> +
> +       return addr;
> +}
> +
> +static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> +{
> +       vunmap(vaddr);
> +}
> +
> +static int vgem_prime_mmap(struct drm_gem_object *obj,
> +                          struct vm_area_struct *vma)
> +{
> +       int ret;
> +
> +       if (obj->size < vma->vm_end - vma->vm_start)
> +               return -EINVAL;
> +
> +       if (!obj->filp)
> +               return -ENODEV;
> +
> +       ret = obj->filp->f_op->mmap(obj->filp, vma);
> +       if (ret)
> +               return ret;
> +
> +       fput(vma->vm_file);
> +       vma->vm_file = get_file(obj->filp);
> +       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
> +       vma->vm_page_prot =
> pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> +
> +       return 0;
> +}
> +
>  static struct drm_driver vgem_driver = {
> -       .driver_features                = DRIVER_GEM,
> +       .driver_features                = DRIVER_GEM | DRIVER_PRIME,
>         .gem_free_object_unlocked       = vgem_gem_free_object,
>         .gem_vm_ops                     = &vgem_gem_vm_ops,
>         .ioctls                         = vgem_ioctls,
>         .fops                           = &vgem_driver_fops,
> +
>         .dumb_create                    = vgem_gem_dumb_create,
>         .dumb_map_offset                = vgem_gem_dumb_map,
> +
> +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> +       .gem_prime_pin = vgem_prime_pin,
> +       .gem_prime_export = drm_gem_prime_export,
> +       .gem_prime_get_sg_table = vgem_prime_get_sg_table,
> +       .gem_prime_vmap = vgem_prime_vmap,
> +       .gem_prime_vunmap = vgem_prime_vunmap,
> +       .gem_prime_mmap = vgem_prime_mmap,
> +
>         .name   = DRIVER_NAME,
>         .desc   = DRIVER_DESC,
>         .date   = DRIVER_DATE,
> --
> 2.8.1
>
>
Acked-by: Zach Reizner <zachr@google.com>

[-- Attachment #1.2: Type: text/html, Size: 6504 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-06-27 17:57 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-18 15:20 [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Chris Wilson
2016-06-18 15:20 ` [PATCH 2/3] drm/vgem: Fix mmaping Chris Wilson
2016-06-21 21:04   ` Chris Wilson
2016-06-18 15:20 ` [PATCH 3/3] drm/vgem: Enable dmabuf interface for export Chris Wilson
2016-06-19  6:18   ` [PATCH v2] " Chris Wilson
2016-06-20 20:04     ` [PATCH v3 1/3] dmabuf Chris Wilson
2016-06-20 20:04       ` [PATCH v3 2/3] drm: Prevent NULL deref in drm_name_info() Chris Wilson
2016-06-20 20:04       ` [PATCH v3 3/3] meh Chris Wilson
2016-06-20 20:06       ` [PATCH v3 1/3] dmabuf Chris Wilson
2016-06-20 20:06     ` [PATCH v3] drm/vgem: Enable dmabuf interface for export Chris Wilson
2016-06-27 17:57       ` Zach Reizner
2016-06-18 22:37 ` [PATCH 1/3] drm: Wait on the reservation object when sync'ing dmabufs Daniel Vetter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.