All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
@ 2016-07-12 12:46 Chris Wilson
  2016-07-12 16:13 ` ✗ Ro.CI.BAT: failure for " Patchwork
                   ` (2 more replies)
  0 siblings, 3 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-12 12:46 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter, intel-gfx, Zach Reizner, Gustavo Padovan

vGEM buffers are useful for passing data between software clients and
hardware renders. By allowing the user to create and attach fences to
the exported vGEM buffers (on the dma-buf), the user can implement a
deferred renderer and queue hardware operations like flipping and then
signal the buffer readiness (i.e. this allows the user to schedule
operations out-of-order, but have them complete in-order).

This also makes it much easier to write tightly controlled testcases for
dma-buf fencing and signaling between hardware drivers.

v2: Don't pretend the fences exist in an ordered timeline, but allocate
a separate fence-context for each fence so that the fences are
unordered.

Testcase: igt/vgem_basic/dmabuf-fence
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Zach Reizner <zachr@google.com>
---
 drivers/gpu/drm/vgem/Makefile     |   2 +-
 drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++++
 drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
 drivers/gpu/drm/vgem/vgem_fence.c | 207 ++++++++++++++++++++++++++++++++++++++
 include/uapi/drm/vgem_drm.h       |  62 ++++++++++++
 5 files changed, 320 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
 create mode 100644 include/uapi/drm/vgem_drm.h

diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842028..bfcdea1330e6 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
 
 obj-$(CONFIG_DRM_VGEM)	+= vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 29c2aab3c1a7..c15bafb06665 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
 	.close = drm_gem_vm_close,
 };
 
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile;
+	int ret;
+
+	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+	if (!vfile)
+		return -ENOMEM;
+
+	file->driver_priv = vfile;
+
+	ret = vgem_fence_open(vfile);
+	if (ret) {
+		kfree(vfile);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+
+	vgem_fence_close(vfile);
+	kfree(vfile);
+}
+
 /* ioctls */
 
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -164,6 +192,8 @@ unref:
 }
 
 static struct drm_ioctl_desc vgem_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
 
 static struct drm_driver vgem_driver = {
 	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
+	.open				= vgem_open,
+	.preclose			= vgem_preclose,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
+	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
 	.fops				= &vgem_driver_fops,
 
 	.dumb_create			= vgem_gem_dumb_create,
@@ -328,5 +361,6 @@ module_init(vgem_init);
 module_exit(vgem_exit);
 
 MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 988cbaae7588..1f8798ad329c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,9 +32,25 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+	struct idr fence_idr;
+	struct mutex fence_mutex;
+};
+
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
 };
 
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
+
 #endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000000..63095331c446
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+struct vgem_fence {
+	struct fence base;
+	struct spinlock lock;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+	return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+	return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+	return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+	snprintf(str, size, "%u", 0);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+					  int size)
+{
+	snprintf(str, size, "%u", 0);
+}
+
+const struct fence_ops vgem_fence_ops = {
+	.get_driver_name = vgem_fence_get_driver_name,
+	.get_timeline_name = vgem_fence_get_timeline_name,
+	.enable_signaling = vgem_fence_enable_signaling,
+	.signaled = vgem_fence_signaled,
+	.wait = fence_default_wait,
+	.fence_value_str = vgem_fence_value_str,
+	.timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile)
+{
+	struct vgem_fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	spin_lock_init(&fence->lock);
+	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+		   fence_context_alloc(1), 0);
+
+	return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+			 struct drm_gem_object *obj)
+{
+	struct dma_buf *dmabuf;
+
+	if (obj->dma_buf)
+		return 0;
+
+	dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	obj->dma_buf = dmabuf;
+	drm_gem_object_reference(obj);
+	return 0;
+}
+
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct drm_vgem_fence_attach *arg = data;
+	struct vgem_file *vfile = file->driver_priv;
+	struct reservation_object *resv;
+	struct drm_gem_object *obj;
+	struct fence *fence;
+	int ret;
+
+	if (arg->flags & ~VGEM_FENCE_WRITE)
+		return -EINVAL;
+
+	if (arg->pad)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(file, arg->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = attach_dmabuf(dev, obj);
+	if (ret)
+		goto out;
+
+	fence = vgem_fence_create(vfile);
+	if (!fence) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = 0;
+	resv = obj->dma_buf->resv;
+	mutex_lock(&resv->lock.base);
+	if (arg->flags & VGEM_FENCE_WRITE)
+		reservation_object_add_excl_fence(resv, fence);
+	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+		reservation_object_add_shared_fence(resv, fence);
+	mutex_unlock(&resv->lock.base);
+
+	if (ret == 0) {
+		mutex_lock(&vfile->fence_mutex);
+		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+		mutex_unlock(&vfile->fence_mutex);
+		if (ret > 0) {
+			arg->out_fence = ret;
+			ret = 0;
+		}
+	}
+	if (ret) {
+		fence_signal(fence);
+		fence_put(fence);
+	}
+out:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+	struct drm_vgem_fence_signal *arg = data;
+	struct fence *fence;
+
+	if (arg->flags)
+		return -EINVAL;
+
+	mutex_lock(&vfile->fence_mutex);
+	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+	mutex_unlock(&vfile->fence_mutex);
+	if (!fence)
+		return -ENOENT;
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	fence_signal(fence);
+	fence_put(fence);
+	return 0;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+	mutex_init(&vfile->fence_mutex);
+	idr_init(&vfile->fence_idr);
+
+	return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+	fence_signal(p);
+	fence_put(p);
+	return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+	idr_destroy(&vfile->fence_idr);
+}
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644
index 000000000000..352d2fae8de9
--- /dev/null
+++ b/include/uapi/drm/vgem_drm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH	0x1
+#define DRM_VGEM_FENCE_SIGNAL	0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH	DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL	DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+	__u32 handle;
+	__u32 flags;
+#define VGEM_FENCE_WRITE 0x1
+	__u32 out_fence;
+	__u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+	__u32 fence;
+	__u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* ✗ Ro.CI.BAT: failure for drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-12 12:46 [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) Chris Wilson
@ 2016-07-12 16:13 ` Patchwork
  2016-07-13 18:57 ` [PATCH v2] " Chris Wilson
  2016-07-13 20:29 ` Gustavo Padovan
  2 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2016-07-12 16:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
URL   : https://patchwork.freedesktop.org/series/9757/
State : failure

== Summary ==

Series 9757v1 drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
http://patchwork.freedesktop.org/api/1.0/series/9757/revisions/1/mbox

Test gem_sync:
        Subgroup basic-store-each:
                pass       -> DMESG-FAIL (ro-bdw-i7-5600u)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                dmesg-warn -> SKIP       (ro-bdw-i7-5557U)

fi-kbl-qkkr      total:237  pass:174  dwarn:27  dfail:1   fail:7   skip:28 
fi-skl-i5-6260u  total:237  pass:218  dwarn:0   dfail:0   fail:7   skip:12 
fi-skl-i7-6700k  total:237  pass:204  dwarn:0   dfail:0   fail:7   skip:26 
fi-snb-i7-2600   total:237  pass:190  dwarn:0   dfail:0   fail:7   skip:40 
ro-bdw-i5-5250u  total:237  pass:213  dwarn:1   dfail:0   fail:7   skip:16 
ro-bdw-i7-5557U  total:237  pass:213  dwarn:1   dfail:0   fail:7   skip:16 
ro-bdw-i7-5600u  total:237  pass:198  dwarn:0   dfail:1   fail:7   skip:31 
ro-bsw-n3050     total:217  pass:172  dwarn:0   dfail:0   fail:2   skip:42 
ro-byt-n2820     total:237  pass:191  dwarn:0   dfail:0   fail:8   skip:38 
ro-hsw-i3-4010u  total:237  pass:206  dwarn:0   dfail:0   fail:7   skip:24 
ro-hsw-i7-4770r  total:237  pass:206  dwarn:0   dfail:0   fail:7   skip:24 
ro-ilk-i7-620lm  total:237  pass:166  dwarn:0   dfail:0   fail:8   skip:63 
ro-ilk1-i5-650   total:232  pass:166  dwarn:0   dfail:0   fail:8   skip:58 
ro-ivb-i7-3770   total:237  pass:197  dwarn:0   dfail:0   fail:7   skip:33 
ro-skl3-i5-6260u total:237  pass:217  dwarn:1   dfail:0   fail:7   skip:12 
ro-snb-i7-2620M  total:237  pass:188  dwarn:0   dfail:0   fail:8   skip:41 

Results at /archive/results/CI_IGT_test/RO_Patchwork_1474/

9561f5c drm-intel-nightly: 2016y-07m-12d-15h-14m-43s UTC integration manifest
befd5bc drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-12 12:46 [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) Chris Wilson
  2016-07-12 16:13 ` ✗ Ro.CI.BAT: failure for " Patchwork
@ 2016-07-13 18:57 ` Chris Wilson
  2016-07-13 20:29 ` Gustavo Padovan
  2 siblings, 0 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-13 18:57 UTC (permalink / raw)
  To: dri-devel; +Cc: Zach Reizner, intel-gfx, Gustavo Padovan, Daniel Vetter

On Tue, Jul 12, 2016 at 01:46:21PM +0100, Chris Wilson wrote:
> vGEM buffers are useful for passing data between software clients and
> hardware renders. By allowing the user to create and attach fences to
> the exported vGEM buffers (on the dma-buf), the user can implement a
> deferred renderer and queue hardware operations like flipping and then
> signal the buffer readiness (i.e. this allows the user to schedule
> operations out-of-order, but have them complete in-order).
> 
> This also makes it much easier to write tightly controlled testcases for
> dma-buf fencing and signaling between hardware drivers.
> 
> v2: Don't pretend the fences exist in an ordered timeline, but allocate
> a separate fence-context for each fence so that the fences are
> unordered.
> 
> Testcase: igt/vgem_basic/dmabuf-fence
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Zach Reizner <zachr@google.com>
> Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Acked-by: Zach Reizner <zachr@google.com>

For purely selfish reasons that this enables more testing for i915,
poke?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-12 12:46 [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) Chris Wilson
  2016-07-12 16:13 ` ✗ Ro.CI.BAT: failure for " Patchwork
  2016-07-13 18:57 ` [PATCH v2] " Chris Wilson
@ 2016-07-13 20:29 ` Gustavo Padovan
  2016-07-13 20:46   ` Chris Wilson
  2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
  2 siblings, 2 replies; 20+ messages in thread
From: Gustavo Padovan @ 2016-07-13 20:29 UTC (permalink / raw)
  To: Chris Wilson
  Cc: Gustavo Padovan, Daniel Vetter, intel-gfx, Zach Reizner, dri-devel

2016-07-12 Chris Wilson <chris@chris-wilson.co.uk>:

> vGEM buffers are useful for passing data between software clients and
> hardware renders. By allowing the user to create and attach fences to
> the exported vGEM buffers (on the dma-buf), the user can implement a
> deferred renderer and queue hardware operations like flipping and then
> signal the buffer readiness (i.e. this allows the user to schedule
> operations out-of-order, but have them complete in-order).
> 
> This also makes it much easier to write tightly controlled testcases for
> dma-buf fencing and signaling between hardware drivers.
> 
> v2: Don't pretend the fences exist in an ordered timeline, but allocate
> a separate fence-context for each fence so that the fences are
> unordered.
> 
> Testcase: igt/vgem_basic/dmabuf-fence
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Zach Reizner <zachr@google.com>
> Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Acked-by: Zach Reizner <zachr@google.com>
> ---
>  drivers/gpu/drm/vgem/Makefile     |   2 +-
>  drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++++
>  drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
>  drivers/gpu/drm/vgem/vgem_fence.c | 207 ++++++++++++++++++++++++++++++++++++++
>  include/uapi/drm/vgem_drm.h       |  62 ++++++++++++
>  5 files changed, 320 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
>  create mode 100644 include/uapi/drm/vgem_drm.h
> 
> diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
> index 3f4c7b842028..bfcdea1330e6 100644
> --- a/drivers/gpu/drm/vgem/Makefile
> +++ b/drivers/gpu/drm/vgem/Makefile
> @@ -1,4 +1,4 @@
>  ccflags-y := -Iinclude/drm
> -vgem-y := vgem_drv.o
> +vgem-y := vgem_drv.o vgem_fence.o
>  
>  obj-$(CONFIG_DRM_VGEM)	+= vgem.o
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index 29c2aab3c1a7..c15bafb06665 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
>  	.close = drm_gem_vm_close,
>  };
>  
> +static int vgem_open(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile;
> +	int ret;
> +
> +	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
> +	if (!vfile)
> +		return -ENOMEM;
> +
> +	file->driver_priv = vfile;
> +
> +	ret = vgem_fence_open(vfile);
> +	if (ret) {
> +		kfree(vfile);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile = file->driver_priv;
> +
> +	vgem_fence_close(vfile);
> +	kfree(vfile);
> +}
> +
>  /* ioctls */
>  
>  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> @@ -164,6 +192,8 @@ unref:
>  }
>  
>  static struct drm_ioctl_desc vgem_ioctls[] = {
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>  };
>  
>  static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> @@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
>  
>  static struct drm_driver vgem_driver = {
>  	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
> +	.open				= vgem_open,
> +	.preclose			= vgem_preclose,
>  	.gem_free_object_unlocked	= vgem_gem_free_object,
>  	.gem_vm_ops			= &vgem_gem_vm_ops,
>  	.ioctls				= vgem_ioctls,
> +	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
>  	.fops				= &vgem_driver_fops,
>  
>  	.dumb_create			= vgem_gem_dumb_create,
> @@ -328,5 +361,6 @@ module_init(vgem_init);
>  module_exit(vgem_exit);
>  
>  MODULE_AUTHOR("Red Hat, Inc.");
> +MODULE_AUTHOR("Intel Corporation");
>  MODULE_DESCRIPTION(DRIVER_DESC);
>  MODULE_LICENSE("GPL and additional rights");
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 988cbaae7588..1f8798ad329c 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -32,9 +32,25 @@
>  #include <drm/drmP.h>
>  #include <drm/drm_gem.h>
>  
> +#include <uapi/drm/vgem_drm.h>
> +
> +struct vgem_file {
> +	struct idr fence_idr;
> +	struct mutex fence_mutex;
> +};
> +
>  #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
>  struct drm_vgem_gem_object {
>  	struct drm_gem_object base;
>  };
>  
> +int vgem_fence_open(struct vgem_file *file);
> +int vgem_fence_attach_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +int vgem_fence_signal_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +void vgem_fence_close(struct vgem_file *file);
> +
>  #endif
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> new file mode 100644
> index 000000000000..63095331c446
> --- /dev/null
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -0,0 +1,207 @@
> +/*
> + * Copyright 2016 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software")
> + * to deal in the software without restriction, including without limitation
> + * on the rights to use, copy, modify, merge, publish, distribute, sub
> + * license, and/or sell copies of the Software, and to permit persons to whom
> + * them Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
> + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <linux/dma-buf.h>
> +#include <linux/reservation.h>
> +
> +#include "vgem_drv.h"
> +
> +struct vgem_fence {
> +	struct fence base;
> +	struct spinlock lock;
> +};
> +
> +static const char *vgem_fence_get_driver_name(struct fence *fence)
> +{
> +	return "vgem";
> +}
> +
> +static const char *vgem_fence_get_timeline_name(struct fence *fence)
> +{
> +	return "unbound";
> +}
> +
> +static bool vgem_fence_signaled(struct fence *fence)
> +{
> +	return false;

With one fence per context you can actually return if the fence was
signalled or not. May be useful for debug.

> +}
> +
> +static bool vgem_fence_enable_signaling(struct fence *fence)
> +{
> +	return true;
> +}
> +
> +static void vgem_fence_value_str(struct fence *fence, char *str, int size)
> +{
> +	snprintf(str, size, "%u", 0);
> +}
> +
> +static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
> +					  int size)
> +{
> +	snprintf(str, size, "%u", 0);
> +}
> +
> +const struct fence_ops vgem_fence_ops = {
> +	.get_driver_name = vgem_fence_get_driver_name,
> +	.get_timeline_name = vgem_fence_get_timeline_name,
> +	.enable_signaling = vgem_fence_enable_signaling,
> +	.signaled = vgem_fence_signaled,
> +	.wait = fence_default_wait,
> +	.fence_value_str = vgem_fence_value_str,
> +	.timeline_value_str = vgem_fence_timeline_value_str,
> +};
> +
> +static struct fence *vgem_fence_create(struct vgem_file *vfile)
> +{
> +	struct vgem_fence *fence;
> +
> +	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
> +	if (!fence)
> +		return NULL;
> +
> +	spin_lock_init(&fence->lock);
> +	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
> +		   fence_context_alloc(1), 0);

Usually 0 would mean that the fence is already signalled, so I'd better
set it to 1.

	Gustavo
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-13 20:29 ` Gustavo Padovan
@ 2016-07-13 20:46   ` Chris Wilson
  2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
  1 sibling, 0 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-13 20:46 UTC (permalink / raw)
  To: Gustavo Padovan, dri-devel, Daniel Vetter, intel-gfx,
	Zach Reizner, Gustavo Padovan

On Wed, Jul 13, 2016 at 05:29:21PM -0300, Gustavo Padovan wrote:
> 2016-07-12 Chris Wilson <chris@chris-wilson.co.uk>:
> 
> > vGEM buffers are useful for passing data between software clients and
> > hardware renders. By allowing the user to create and attach fences to
> > the exported vGEM buffers (on the dma-buf), the user can implement a
> > deferred renderer and queue hardware operations like flipping and then
> > signal the buffer readiness (i.e. this allows the user to schedule
> > operations out-of-order, but have them complete in-order).
> > 
> > This also makes it much easier to write tightly controlled testcases for
> > dma-buf fencing and signaling between hardware drivers.
> > 
> > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > a separate fence-context for each fence so that the fences are
> > unordered.
> > 
> > Testcase: igt/vgem_basic/dmabuf-fence
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Sean Paul <seanpaul@chromium.org>
> > Cc: Zach Reizner <zachr@google.com>
> > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > Acked-by: Zach Reizner <zachr@google.com>
> > ---
> >  drivers/gpu/drm/vgem/Makefile     |   2 +-
> >  drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++++
> >  drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
> >  drivers/gpu/drm/vgem/vgem_fence.c | 207 ++++++++++++++++++++++++++++++++++++++
> >  include/uapi/drm/vgem_drm.h       |  62 ++++++++++++
> >  5 files changed, 320 insertions(+), 1 deletion(-)
> >  create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
> >  create mode 100644 include/uapi/drm/vgem_drm.h
> > 
> > diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
> > index 3f4c7b842028..bfcdea1330e6 100644
> > --- a/drivers/gpu/drm/vgem/Makefile
> > +++ b/drivers/gpu/drm/vgem/Makefile
> > @@ -1,4 +1,4 @@
> >  ccflags-y := -Iinclude/drm
> > -vgem-y := vgem_drv.o
> > +vgem-y := vgem_drv.o vgem_fence.o
> >  
> >  obj-$(CONFIG_DRM_VGEM)	+= vgem.o
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> > index 29c2aab3c1a7..c15bafb06665 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.c
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> > @@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
> >  	.close = drm_gem_vm_close,
> >  };
> >  
> > +static int vgem_open(struct drm_device *dev, struct drm_file *file)
> > +{
> > +	struct vgem_file *vfile;
> > +	int ret;
> > +
> > +	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
> > +	if (!vfile)
> > +		return -ENOMEM;
> > +
> > +	file->driver_priv = vfile;
> > +
> > +	ret = vgem_fence_open(vfile);
> > +	if (ret) {
> > +		kfree(vfile);
> > +		return ret;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
> > +{
> > +	struct vgem_file *vfile = file->driver_priv;
> > +
> > +	vgem_fence_close(vfile);
> > +	kfree(vfile);
> > +}
> > +
> >  /* ioctls */
> >  
> >  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> > @@ -164,6 +192,8 @@ unref:
> >  }
> >  
> >  static struct drm_ioctl_desc vgem_ioctls[] = {
> > +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> > +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> >  };
> >  
> >  static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> > @@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
> >  
> >  static struct drm_driver vgem_driver = {
> >  	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
> > +	.open				= vgem_open,
> > +	.preclose			= vgem_preclose,
> >  	.gem_free_object_unlocked	= vgem_gem_free_object,
> >  	.gem_vm_ops			= &vgem_gem_vm_ops,
> >  	.ioctls				= vgem_ioctls,
> > +	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
> >  	.fops				= &vgem_driver_fops,
> >  
> >  	.dumb_create			= vgem_gem_dumb_create,
> > @@ -328,5 +361,6 @@ module_init(vgem_init);
> >  module_exit(vgem_exit);
> >  
> >  MODULE_AUTHOR("Red Hat, Inc.");
> > +MODULE_AUTHOR("Intel Corporation");
> >  MODULE_DESCRIPTION(DRIVER_DESC);
> >  MODULE_LICENSE("GPL and additional rights");
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> > index 988cbaae7588..1f8798ad329c 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.h
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> > @@ -32,9 +32,25 @@
> >  #include <drm/drmP.h>
> >  #include <drm/drm_gem.h>
> >  
> > +#include <uapi/drm/vgem_drm.h>
> > +
> > +struct vgem_file {
> > +	struct idr fence_idr;
> > +	struct mutex fence_mutex;
> > +};
> > +
> >  #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> >  struct drm_vgem_gem_object {
> >  	struct drm_gem_object base;
> >  };
> >  
> > +int vgem_fence_open(struct vgem_file *file);
> > +int vgem_fence_attach_ioctl(struct drm_device *dev,
> > +			    void *data,
> > +			    struct drm_file *file);
> > +int vgem_fence_signal_ioctl(struct drm_device *dev,
> > +			    void *data,
> > +			    struct drm_file *file);
> > +void vgem_fence_close(struct vgem_file *file);
> > +
> >  #endif
> > diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> > new file mode 100644
> > index 000000000000..63095331c446
> > --- /dev/null
> > +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> > @@ -0,0 +1,207 @@
> > +/*
> > + * Copyright 2016 Intel Corporation
> > + *
> > + * Permission is hereby granted, free of charge, to any person obtaining a
> > + * copy of this software and associated documentation files (the "Software")
> > + * to deal in the software without restriction, including without limitation
> > + * on the rights to use, copy, modify, merge, publish, distribute, sub
> > + * license, and/or sell copies of the Software, and to permit persons to whom
> > + * them Software is furnished to do so, subject to the following conditions:
> > + *
> > + * The above copyright notice and this permission notice (including the next
> > + * paragraph) shall be included in all copies or substantial portions of the
> > + * Software.
> > + *
> > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
> > + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
> > + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
> > + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
> > + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> > + */
> > +
> > +#include <linux/dma-buf.h>
> > +#include <linux/reservation.h>
> > +
> > +#include "vgem_drv.h"
> > +
> > +struct vgem_fence {
> > +	struct fence base;
> > +	struct spinlock lock;
> > +};
> > +
> > +static const char *vgem_fence_get_driver_name(struct fence *fence)
> > +{
> > +	return "vgem";
> > +}
> > +
> > +static const char *vgem_fence_get_timeline_name(struct fence *fence)
> > +{
> > +	return "unbound";
> > +}
> > +
> > +static bool vgem_fence_signaled(struct fence *fence)
> > +{
> > +	return false;
> 
> With one fence per context you can actually return if the fence was
> signalled or not. May be useful for debug.

That's already stored inside the fence->flags FENCE_FLAG_SIGNALED_BIT.
If it it set (which it will be after we are signaled),
fence->ops->signaled() is not called. So we know that if this function
is ever called, the fence is unsignaled.

> > +static struct fence *vgem_fence_create(struct vgem_file *vfile)
> > +{
> > +	struct vgem_fence *fence;
> > +
> > +	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
> > +	if (!fence)
> > +		return NULL;
> > +
> > +	spin_lock_init(&fence->lock);
> > +	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
> > +		   fence_context_alloc(1), 0);
> 
> Usually 0 would mean that the fence is already signalled, so I'd better
> set it to 1.

Hmm, yes, we can improve the debug output using that:

diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 63095331c446..b7da11419ad6 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -52,13 +52,13 @@ static bool vgem_fence_enable_signaling(struct fence *fence)
 
 static void vgem_fence_value_str(struct fence *fence, char *str, int size)
 {
-       snprintf(str, size, "%u", 0);
+       snprintf(str, size, "%u", fence->seqno);
 }
 
 static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
                                          int size)
 {
-       snprintf(str, size, "%u", 0);
+       snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
 }
 
 const struct fence_ops vgem_fence_ops = {
@@ -81,7 +81,7 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile)
 
        spin_lock_init(&fence->lock);
        fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
-                  fence_context_alloc(1), 0);
+                  fence_context_alloc(1), 1);
 
        return &fence->base;

-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-13 20:29 ` Gustavo Padovan
  2016-07-13 20:46   ` Chris Wilson
@ 2016-07-14  7:04   ` Chris Wilson
  2016-07-14  8:12     ` Daniel Vetter
                       ` (2 more replies)
  1 sibling, 3 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-14  7:04 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter, intel-gfx, Zach Reizner, Gustavo Padovan

vGEM buffers are useful for passing data between software clients and
hardware renders. By allowing the user to create and attach fences to
the exported vGEM buffers (on the dma-buf), the user can implement a
deferred renderer and queue hardware operations like flipping and then
signal the buffer readiness (i.e. this allows the user to schedule
operations out-of-order, but have them complete in-order).

This also makes it much easier to write tightly controlled testcases for
dma-buf fencing and signaling between hardware drivers.

v2: Don't pretend the fences exist in an ordered timeline, but allocate
a separate fence-context for each fence so that the fences are
unordered.
v3: Make the debug output more interesting, and so the signaled status.

Testcase: igt/vgem_basic/dmabuf-fence
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Zach Reizner <zachr@google.com>
---
 drivers/gpu/drm/vgem/Makefile     |   2 +-
 drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++++
 drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
 drivers/gpu/drm/vgem/vgem_fence.c | 207 ++++++++++++++++++++++++++++++++++++++
 include/uapi/drm/vgem_drm.h       |  62 ++++++++++++
 5 files changed, 320 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
 create mode 100644 include/uapi/drm/vgem_drm.h

diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842028..bfcdea1330e6 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
 
 obj-$(CONFIG_DRM_VGEM)	+= vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 29c2aab3c1a7..c15bafb06665 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
 	.close = drm_gem_vm_close,
 };
 
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile;
+	int ret;
+
+	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+	if (!vfile)
+		return -ENOMEM;
+
+	file->driver_priv = vfile;
+
+	ret = vgem_fence_open(vfile);
+	if (ret) {
+		kfree(vfile);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+
+	vgem_fence_close(vfile);
+	kfree(vfile);
+}
+
 /* ioctls */
 
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -164,6 +192,8 @@ unref:
 }
 
 static struct drm_ioctl_desc vgem_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
 
 static struct drm_driver vgem_driver = {
 	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
+	.open				= vgem_open,
+	.preclose			= vgem_preclose,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
+	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
 	.fops				= &vgem_driver_fops,
 
 	.dumb_create			= vgem_gem_dumb_create,
@@ -328,5 +361,6 @@ module_init(vgem_init);
 module_exit(vgem_exit);
 
 MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 988cbaae7588..1f8798ad329c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,9 +32,25 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+	struct idr fence_idr;
+	struct mutex fence_mutex;
+};
+
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
 };
 
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
+
 #endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000000..b7da11419ad6
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+struct vgem_fence {
+	struct fence base;
+	struct spinlock lock;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+	return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+	return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+	return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+	snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+					  int size)
+{
+	snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+const struct fence_ops vgem_fence_ops = {
+	.get_driver_name = vgem_fence_get_driver_name,
+	.get_timeline_name = vgem_fence_get_timeline_name,
+	.enable_signaling = vgem_fence_enable_signaling,
+	.signaled = vgem_fence_signaled,
+	.wait = fence_default_wait,
+	.fence_value_str = vgem_fence_value_str,
+	.timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile)
+{
+	struct vgem_fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	spin_lock_init(&fence->lock);
+	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+		   fence_context_alloc(1), 1);
+
+	return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+			 struct drm_gem_object *obj)
+{
+	struct dma_buf *dmabuf;
+
+	if (obj->dma_buf)
+		return 0;
+
+	dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	obj->dma_buf = dmabuf;
+	drm_gem_object_reference(obj);
+	return 0;
+}
+
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct drm_vgem_fence_attach *arg = data;
+	struct vgem_file *vfile = file->driver_priv;
+	struct reservation_object *resv;
+	struct drm_gem_object *obj;
+	struct fence *fence;
+	int ret;
+
+	if (arg->flags & ~VGEM_FENCE_WRITE)
+		return -EINVAL;
+
+	if (arg->pad)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(file, arg->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = attach_dmabuf(dev, obj);
+	if (ret)
+		goto out;
+
+	fence = vgem_fence_create(vfile);
+	if (!fence) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = 0;
+	resv = obj->dma_buf->resv;
+	mutex_lock(&resv->lock.base);
+	if (arg->flags & VGEM_FENCE_WRITE)
+		reservation_object_add_excl_fence(resv, fence);
+	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+		reservation_object_add_shared_fence(resv, fence);
+	mutex_unlock(&resv->lock.base);
+
+	if (ret == 0) {
+		mutex_lock(&vfile->fence_mutex);
+		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+		mutex_unlock(&vfile->fence_mutex);
+		if (ret > 0) {
+			arg->out_fence = ret;
+			ret = 0;
+		}
+	}
+	if (ret) {
+		fence_signal(fence);
+		fence_put(fence);
+	}
+out:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+	struct drm_vgem_fence_signal *arg = data;
+	struct fence *fence;
+
+	if (arg->flags)
+		return -EINVAL;
+
+	mutex_lock(&vfile->fence_mutex);
+	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+	mutex_unlock(&vfile->fence_mutex);
+	if (!fence)
+		return -ENOENT;
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	fence_signal(fence);
+	fence_put(fence);
+	return 0;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+	mutex_init(&vfile->fence_mutex);
+	idr_init(&vfile->fence_idr);
+
+	return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+	fence_signal(p);
+	fence_put(p);
+	return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+	idr_destroy(&vfile->fence_idr);
+}
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644
index 000000000000..352d2fae8de9
--- /dev/null
+++ b/include/uapi/drm/vgem_drm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH	0x1
+#define DRM_VGEM_FENCE_SIGNAL	0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH	DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL	DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+	__u32 handle;
+	__u32 flags;
+#define VGEM_FENCE_WRITE 0x1
+	__u32 out_fence;
+	__u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+	__u32 fence;
+	__u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
@ 2016-07-14  8:12     ` Daniel Vetter
  2016-07-14  9:59       ` Chris Wilson
  2016-07-15  8:31     ` [PATCH v4] " Chris Wilson
  2016-07-15  9:15     ` ✗ Ro.CI.BAT: failure for drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) (rev2) Patchwork
  2 siblings, 1 reply; 20+ messages in thread
From: Daniel Vetter @ 2016-07-14  8:12 UTC (permalink / raw)
  To: Chris Wilson
  Cc: Daniel Vetter, intel-gfx, dri-devel, Zach Reizner, Gustavo Padovan

On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> vGEM buffers are useful for passing data between software clients and
> hardware renders. By allowing the user to create and attach fences to
> the exported vGEM buffers (on the dma-buf), the user can implement a
> deferred renderer and queue hardware operations like flipping and then
> signal the buffer readiness (i.e. this allows the user to schedule
> operations out-of-order, but have them complete in-order).
> 
> This also makes it much easier to write tightly controlled testcases for
> dma-buf fencing and signaling between hardware drivers.
> 
> v2: Don't pretend the fences exist in an ordered timeline, but allocate
> a separate fence-context for each fence so that the fences are
> unordered.
> v3: Make the debug output more interesting, and so the signaled status.
> 
> Testcase: igt/vgem_basic/dmabuf-fence
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Zach Reizner <zachr@google.com>
> Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Acked-by: Zach Reizner <zachr@google.com>

One thing I completely forgotten: This allows userspace to hang kernel
drivers. i915 (and other gpu drivers) can recover using hangcheck, but
dumber drivers (v4l, if that ever happens) probably never except such a
case. We've had a similar discusion with the userspace fences exposed in
sw_fence, and decided to move all those ioctl into debugfs. I think we
should do the same for this vgem-based debugging of implicit sync. Sorry
for realizing this this late.
-Daniel

> ---
>  drivers/gpu/drm/vgem/Makefile     |   2 +-
>  drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++++
>  drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
>  drivers/gpu/drm/vgem/vgem_fence.c | 207 ++++++++++++++++++++++++++++++++++++++
>  include/uapi/drm/vgem_drm.h       |  62 ++++++++++++
>  5 files changed, 320 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
>  create mode 100644 include/uapi/drm/vgem_drm.h
> 
> diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
> index 3f4c7b842028..bfcdea1330e6 100644
> --- a/drivers/gpu/drm/vgem/Makefile
> +++ b/drivers/gpu/drm/vgem/Makefile
> @@ -1,4 +1,4 @@
>  ccflags-y := -Iinclude/drm
> -vgem-y := vgem_drv.o
> +vgem-y := vgem_drv.o vgem_fence.o
>  
>  obj-$(CONFIG_DRM_VGEM)	+= vgem.o
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index 29c2aab3c1a7..c15bafb06665 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
>  	.close = drm_gem_vm_close,
>  };
>  
> +static int vgem_open(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile;
> +	int ret;
> +
> +	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
> +	if (!vfile)
> +		return -ENOMEM;
> +
> +	file->driver_priv = vfile;
> +
> +	ret = vgem_fence_open(vfile);
> +	if (ret) {
> +		kfree(vfile);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile = file->driver_priv;
> +
> +	vgem_fence_close(vfile);
> +	kfree(vfile);
> +}
> +
>  /* ioctls */
>  
>  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> @@ -164,6 +192,8 @@ unref:
>  }
>  
>  static struct drm_ioctl_desc vgem_ioctls[] = {
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>  };
>  
>  static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> @@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
>  
>  static struct drm_driver vgem_driver = {
>  	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
> +	.open				= vgem_open,
> +	.preclose			= vgem_preclose,
>  	.gem_free_object_unlocked	= vgem_gem_free_object,
>  	.gem_vm_ops			= &vgem_gem_vm_ops,
>  	.ioctls				= vgem_ioctls,
> +	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
>  	.fops				= &vgem_driver_fops,
>  
>  	.dumb_create			= vgem_gem_dumb_create,
> @@ -328,5 +361,6 @@ module_init(vgem_init);
>  module_exit(vgem_exit);
>  
>  MODULE_AUTHOR("Red Hat, Inc.");
> +MODULE_AUTHOR("Intel Corporation");
>  MODULE_DESCRIPTION(DRIVER_DESC);
>  MODULE_LICENSE("GPL and additional rights");
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 988cbaae7588..1f8798ad329c 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -32,9 +32,25 @@
>  #include <drm/drmP.h>
>  #include <drm/drm_gem.h>
>  
> +#include <uapi/drm/vgem_drm.h>
> +
> +struct vgem_file {
> +	struct idr fence_idr;
> +	struct mutex fence_mutex;
> +};
> +
>  #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
>  struct drm_vgem_gem_object {
>  	struct drm_gem_object base;
>  };
>  
> +int vgem_fence_open(struct vgem_file *file);
> +int vgem_fence_attach_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +int vgem_fence_signal_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +void vgem_fence_close(struct vgem_file *file);
> +
>  #endif
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> new file mode 100644
> index 000000000000..b7da11419ad6
> --- /dev/null
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -0,0 +1,207 @@
> +/*
> + * Copyright 2016 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software")
> + * to deal in the software without restriction, including without limitation
> + * on the rights to use, copy, modify, merge, publish, distribute, sub
> + * license, and/or sell copies of the Software, and to permit persons to whom
> + * them Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
> + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <linux/dma-buf.h>
> +#include <linux/reservation.h>
> +
> +#include "vgem_drv.h"
> +
> +struct vgem_fence {
> +	struct fence base;
> +	struct spinlock lock;
> +};
> +
> +static const char *vgem_fence_get_driver_name(struct fence *fence)
> +{
> +	return "vgem";
> +}
> +
> +static const char *vgem_fence_get_timeline_name(struct fence *fence)
> +{
> +	return "unbound";
> +}
> +
> +static bool vgem_fence_signaled(struct fence *fence)
> +{
> +	return false;
> +}
> +
> +static bool vgem_fence_enable_signaling(struct fence *fence)
> +{
> +	return true;
> +}
> +
> +static void vgem_fence_value_str(struct fence *fence, char *str, int size)
> +{
> +	snprintf(str, size, "%u", fence->seqno);
> +}
> +
> +static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
> +					  int size)
> +{
> +	snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
> +}
> +
> +const struct fence_ops vgem_fence_ops = {
> +	.get_driver_name = vgem_fence_get_driver_name,
> +	.get_timeline_name = vgem_fence_get_timeline_name,
> +	.enable_signaling = vgem_fence_enable_signaling,
> +	.signaled = vgem_fence_signaled,
> +	.wait = fence_default_wait,
> +	.fence_value_str = vgem_fence_value_str,
> +	.timeline_value_str = vgem_fence_timeline_value_str,
> +};
> +
> +static struct fence *vgem_fence_create(struct vgem_file *vfile)
> +{
> +	struct vgem_fence *fence;
> +
> +	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
> +	if (!fence)
> +		return NULL;
> +
> +	spin_lock_init(&fence->lock);
> +	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
> +		   fence_context_alloc(1), 1);
> +
> +	return &fence->base;
> +}
> +
> +static int attach_dmabuf(struct drm_device *dev,
> +			 struct drm_gem_object *obj)
> +{
> +	struct dma_buf *dmabuf;
> +
> +	if (obj->dma_buf)
> +		return 0;
> +
> +	dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
> +	if (IS_ERR(dmabuf))
> +		return PTR_ERR(dmabuf);
> +
> +	obj->dma_buf = dmabuf;
> +	drm_gem_object_reference(obj);
> +	return 0;
> +}
> +
> +int vgem_fence_attach_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file)
> +{
> +	struct drm_vgem_fence_attach *arg = data;
> +	struct vgem_file *vfile = file->driver_priv;
> +	struct reservation_object *resv;
> +	struct drm_gem_object *obj;
> +	struct fence *fence;
> +	int ret;
> +
> +	if (arg->flags & ~VGEM_FENCE_WRITE)
> +		return -EINVAL;
> +
> +	if (arg->pad)
> +		return -EINVAL;
> +
> +	obj = drm_gem_object_lookup(file, arg->handle);
> +	if (!obj)
> +		return -ENOENT;
> +
> +	ret = attach_dmabuf(dev, obj);
> +	if (ret)
> +		goto out;
> +
> +	fence = vgem_fence_create(vfile);
> +	if (!fence) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
> +
> +	ret = 0;
> +	resv = obj->dma_buf->resv;
> +	mutex_lock(&resv->lock.base);
> +	if (arg->flags & VGEM_FENCE_WRITE)
> +		reservation_object_add_excl_fence(resv, fence);
> +	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
> +		reservation_object_add_shared_fence(resv, fence);
> +	mutex_unlock(&resv->lock.base);
> +
> +	if (ret == 0) {
> +		mutex_lock(&vfile->fence_mutex);
> +		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
> +		mutex_unlock(&vfile->fence_mutex);
> +		if (ret > 0) {
> +			arg->out_fence = ret;
> +			ret = 0;
> +		}
> +	}
> +	if (ret) {
> +		fence_signal(fence);
> +		fence_put(fence);
> +	}
> +out:
> +	drm_gem_object_unreference_unlocked(obj);
> +	return ret;
> +}
> +
> +int vgem_fence_signal_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file)
> +{
> +	struct vgem_file *vfile = file->driver_priv;
> +	struct drm_vgem_fence_signal *arg = data;
> +	struct fence *fence;
> +
> +	if (arg->flags)
> +		return -EINVAL;
> +
> +	mutex_lock(&vfile->fence_mutex);
> +	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
> +	mutex_unlock(&vfile->fence_mutex);
> +	if (!fence)
> +		return -ENOENT;
> +	if (IS_ERR(fence))
> +		return PTR_ERR(fence);
> +
> +	fence_signal(fence);
> +	fence_put(fence);
> +	return 0;
> +}
> +
> +int vgem_fence_open(struct vgem_file *vfile)
> +{
> +	mutex_init(&vfile->fence_mutex);
> +	idr_init(&vfile->fence_idr);
> +
> +	return 0;
> +}
> +
> +static int __vgem_fence_idr_fini(int id, void *p, void *data)
> +{
> +	fence_signal(p);
> +	fence_put(p);
> +	return 0;
> +}
> +
> +void vgem_fence_close(struct vgem_file *vfile)
> +{
> +	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
> +	idr_destroy(&vfile->fence_idr);
> +}
> diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
> new file mode 100644
> index 000000000000..352d2fae8de9
> --- /dev/null
> +++ b/include/uapi/drm/vgem_drm.h
> @@ -0,0 +1,62 @@
> +/*
> + * Copyright 2016 Intel Corporation
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sub license, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial portions
> + * of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
> + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
> + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
> + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
> + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
> + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#ifndef _UAPI_VGEM_DRM_H_
> +#define _UAPI_VGEM_DRM_H_
> +
> +#include "drm.h"
> +
> +#if defined(__cplusplus)
> +extern "C" {
> +#endif
> +
> +/* Please note that modifications to all structs defined here are
> + * subject to backwards-compatibility constraints.
> + */
> +#define DRM_VGEM_FENCE_ATTACH	0x1
> +#define DRM_VGEM_FENCE_SIGNAL	0x2
> +
> +#define DRM_IOCTL_VGEM_FENCE_ATTACH	DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
> +#define DRM_IOCTL_VGEM_FENCE_SIGNAL	DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
> +
> +struct drm_vgem_fence_attach {
> +	__u32 handle;
> +	__u32 flags;
> +#define VGEM_FENCE_WRITE 0x1
> +	__u32 out_fence;
> +	__u32 pad;
> +};
> +
> +struct drm_vgem_fence_signal {
> +	__u32 fence;
> +	__u32 flags;
> +};
> +
> +#if defined(__cplusplus)
> +}
> +#endif
> +
> +#endif /* _UAPI_VGEM_DRM_H_ */
> -- 
> 2.8.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14  8:12     ` Daniel Vetter
@ 2016-07-14  9:59       ` Chris Wilson
  2016-07-14 10:11         ` Chris Wilson
  0 siblings, 1 reply; 20+ messages in thread
From: Chris Wilson @ 2016-07-14  9:59 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Daniel Vetter, intel-gfx, dri-devel, Zach Reizner, Gustavo Padovan

On Thu, Jul 14, 2016 at 10:12:17AM +0200, Daniel Vetter wrote:
> On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> > vGEM buffers are useful for passing data between software clients and
> > hardware renders. By allowing the user to create and attach fences to
> > the exported vGEM buffers (on the dma-buf), the user can implement a
> > deferred renderer and queue hardware operations like flipping and then
> > signal the buffer readiness (i.e. this allows the user to schedule
> > operations out-of-order, but have them complete in-order).
> > 
> > This also makes it much easier to write tightly controlled testcases for
> > dma-buf fencing and signaling between hardware drivers.
> > 
> > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > a separate fence-context for each fence so that the fences are
> > unordered.
> > v3: Make the debug output more interesting, and so the signaled status.
> > 
> > Testcase: igt/vgem_basic/dmabuf-fence
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Sean Paul <seanpaul@chromium.org>
> > Cc: Zach Reizner <zachr@google.com>
> > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > Acked-by: Zach Reizner <zachr@google.com>
> 
> One thing I completely forgotten: This allows userspace to hang kernel
> drivers. i915 (and other gpu drivers) can recover using hangcheck, but
> dumber drivers (v4l, if that ever happens) probably never except such a
> case. We've had a similar discusion with the userspace fences exposed in
> sw_fence, and decided to move all those ioctl into debugfs. I think we
> should do the same for this vgem-based debugging of implicit sync. Sorry
> for realizing this this late.

One of the very tests I make is to ensure that we recover from such a
hang. I don't see the difference between this any of the other ways
userspace can shoot itself (and others) in the foot.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14  9:59       ` Chris Wilson
@ 2016-07-14 10:11         ` Chris Wilson
  2016-07-14 12:15           ` [Intel-gfx] " Chris Wilson
  2016-07-14 12:40           ` Daniel Vetter
  0 siblings, 2 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-14 10:11 UTC (permalink / raw)
  To: Daniel Vetter, dri-devel, intel-gfx, Sean Paul, Zach Reizner,
	Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 10:59:04AM +0100, Chris Wilson wrote:
> On Thu, Jul 14, 2016 at 10:12:17AM +0200, Daniel Vetter wrote:
> > On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> > > vGEM buffers are useful for passing data between software clients and
> > > hardware renders. By allowing the user to create and attach fences to
> > > the exported vGEM buffers (on the dma-buf), the user can implement a
> > > deferred renderer and queue hardware operations like flipping and then
> > > signal the buffer readiness (i.e. this allows the user to schedule
> > > operations out-of-order, but have them complete in-order).
> > > 
> > > This also makes it much easier to write tightly controlled testcases for
> > > dma-buf fencing and signaling between hardware drivers.
> > > 
> > > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > > a separate fence-context for each fence so that the fences are
> > > unordered.
> > > v3: Make the debug output more interesting, and so the signaled status.
> > > 
> > > Testcase: igt/vgem_basic/dmabuf-fence
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Sean Paul <seanpaul@chromium.org>
> > > Cc: Zach Reizner <zachr@google.com>
> > > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > Acked-by: Zach Reizner <zachr@google.com>
> > 
> > One thing I completely forgotten: This allows userspace to hang kernel
> > drivers. i915 (and other gpu drivers) can recover using hangcheck, but
> > dumber drivers (v4l, if that ever happens) probably never except such a
> > case. We've had a similar discusion with the userspace fences exposed in
> > sw_fence, and decided to move all those ioctl into debugfs. I think we
> > should do the same for this vgem-based debugging of implicit sync. Sorry
> > for realizing this this late.
> 
> One of the very tests I make is to ensure that we recover from such a
> hang. I don't see the difference between this any of the other ways
> userspace can shoot itself (and others) in the foot.

So one solution would be to make vgem fences automatically timeout (with
a flag for root to override for the sake of testing hang detection).
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 10:11         ` Chris Wilson
@ 2016-07-14 12:15           ` Chris Wilson
  2016-07-14 12:40           ` Daniel Vetter
  1 sibling, 0 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-14 12:15 UTC (permalink / raw)
  To: Daniel Vetter, dri-devel, intel-gfx, Sean Paul, Zach Reizner,
	Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 11:11:02AM +0100, Chris Wilson wrote:
> So one solution would be to make vgem fences automatically timeout (with
> a flag for root to override for the sake of testing hang detection).

diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index b7da11419ad6..17c63c9a8ea0 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,6 +28,7 @@
 struct vgem_fence {
        struct fence base;
        struct spinlock lock;
+       struct timer_list timer;
 };
 
 static const char *vgem_fence_get_driver_name(struct fence *fence)
@@ -50,6 +51,14 @@ static bool vgem_fence_enable_signaling(struct fence *fence)
        return true;
 }
 
+static void vgem_fence_release(struct fence *base)
+{
+       struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+       del_timer_sync(&fence->timer);
+       fence_free(&fence->base);
+}
+
 static void vgem_fence_value_str(struct fence *fence, char *str, int size)
 {
        snprintf(str, size, "%u", fence->seqno);
@@ -67,11 +76,21 @@ const struct fence_ops vgem_fence_ops = {
        .enable_signaling = vgem_fence_enable_signaling,
        .signaled = vgem_fence_signaled,
        .wait = fence_default_wait,
+       .release = vgem_fence_release,
+
        .fence_value_str = vgem_fence_value_str,
        .timeline_value_str = vgem_fence_timeline_value_str,
 };
 
-static struct fence *vgem_fence_create(struct vgem_file *vfile)
+static void vgem_fence_timeout(unsigned long data)
+{
+       struct vgem_fence *fence = (struct vgem_fence *)data;
+
+       fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+                                      unsigned int flags)
 {
        struct vgem_fence *fence;
 
@@ -83,6 +102,12 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile)
        fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
                   fence_context_alloc(1), 1);
 
+       setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+       /* We force the fence to expire within 10s to prevent driver hangs */
+       if (!(flags & VGEM_FENCE_NOTIMEOUT))
+               mod_timer(&fence->timer, 10*HZ);
+
        return &fence->base;
 }
 
@@ -114,9 +139,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
        struct fence *fence;
        int ret;
 
-       if (arg->flags & ~VGEM_FENCE_WRITE)
+       if (arg->flags & ~(VGEM_FENCE_WRITE | VGEM_FENCE_NOTIMEOUT))
                return -EINVAL;
 
+       if (arg->flags & VGEM_FENCE_NOTIMEOUT && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        if (arg->pad)
                return -EINVAL;
 
@@ -128,7 +156,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
        if (ret)
                goto out;
 
-       fence = vgem_fence_create(vfile);
+       fence = vgem_fence_create(vfile, arg->flags);
        if (!fence) {
                ret = -ENOMEM;
                goto out;
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
index 352d2fae8de9..55fd08750773 100644
--- a/include/uapi/drm/vgem_drm.h
+++ b/include/uapi/drm/vgem_drm.h
@@ -45,7 +45,8 @@ extern "C" {
 struct drm_vgem_fence_attach {
        __u32 handle;
        __u32 flags;
-#define VGEM_FENCE_WRITE 0x1
+#define VGEM_FENCE_WRITE       0x1
+#define VGEM_FENCE_NOTIMEOUT   0x2
        __u32 out_fence;
        __u32 pad;
 };


-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 10:11         ` Chris Wilson
  2016-07-14 12:15           ` [Intel-gfx] " Chris Wilson
@ 2016-07-14 12:40           ` Daniel Vetter
  2016-07-14 13:23             ` Chris Wilson
  1 sibling, 1 reply; 20+ messages in thread
From: Daniel Vetter @ 2016-07-14 12:40 UTC (permalink / raw)
  To: Chris Wilson, Daniel Vetter, dri-devel, intel-gfx, Sean Paul,
	Zach Reizner, Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 11:11:02AM +0100, Chris Wilson wrote:
> On Thu, Jul 14, 2016 at 10:59:04AM +0100, Chris Wilson wrote:
> > On Thu, Jul 14, 2016 at 10:12:17AM +0200, Daniel Vetter wrote:
> > > On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> > > > vGEM buffers are useful for passing data between software clients and
> > > > hardware renders. By allowing the user to create and attach fences to
> > > > the exported vGEM buffers (on the dma-buf), the user can implement a
> > > > deferred renderer and queue hardware operations like flipping and then
> > > > signal the buffer readiness (i.e. this allows the user to schedule
> > > > operations out-of-order, but have them complete in-order).
> > > > 
> > > > This also makes it much easier to write tightly controlled testcases for
> > > > dma-buf fencing and signaling between hardware drivers.
> > > > 
> > > > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > > > a separate fence-context for each fence so that the fences are
> > > > unordered.
> > > > v3: Make the debug output more interesting, and so the signaled status.
> > > > 
> > > > Testcase: igt/vgem_basic/dmabuf-fence
> > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > Cc: Sean Paul <seanpaul@chromium.org>
> > > > Cc: Zach Reizner <zachr@google.com>
> > > > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > > Acked-by: Zach Reizner <zachr@google.com>
> > > 
> > > One thing I completely forgotten: This allows userspace to hang kernel
> > > drivers. i915 (and other gpu drivers) can recover using hangcheck, but
> > > dumber drivers (v4l, if that ever happens) probably never except such a
> > > case. We've had a similar discusion with the userspace fences exposed in
> > > sw_fence, and decided to move all those ioctl into debugfs. I think we
> > > should do the same for this vgem-based debugging of implicit sync. Sorry
> > > for realizing this this late.
> > 
> > One of the very tests I make is to ensure that we recover from such a
> > hang. I don't see the difference between this any of the other ways
> > userspace can shoot itself (and others) in the foot.
> 
> So one solution would be to make vgem fences automatically timeout (with
> a flag for root to override for the sake of testing hang detection).

The problem is other drivers. E.g. right now atomic helpers assume that
fences will signal, and can't recover if they don't. This is why drivers
where things might fail must have some recovery (hangcheck, timeout) to
make sure dma_fences always signal.

Imo not even root should be allowed to break this, since it could put
drivers into a non-recoverable state. I think this must be restricted to
something known-unsafe-don't-enable-on-production like debugfs.

Other solutions which I don't like:
- Everyone needs to be able to recover. Given how much effort it is to
  just keep i915 hangcheck in working order I think that's totally
  illusionary to assume. At least once world+dog (atomic, v4l, ...) all
  consume/produce fences, subsystems where the usual assumption holds that
  async ops complete.

- Really long timeouts are allowed for root in vgem. Could lead to even
  more fun in testing i915 hangchecks I think, so don't like that much
  either.

I think the best option is to just do the same as we've done for sw_fence,
and move it to debugfs. We could reuse the debugfs sw_fence interface to
create them (gives us more control as a bonus), and just have an ioctl to
attach fences to vgem (which could be unpriviledged).
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 12:40           ` Daniel Vetter
@ 2016-07-14 13:23             ` Chris Wilson
  2016-07-14 13:39               ` Chris Wilson
  2016-07-14 14:33               ` Daniel Vetter
  0 siblings, 2 replies; 20+ messages in thread
From: Chris Wilson @ 2016-07-14 13:23 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Daniel Vetter, intel-gfx, dri-devel, Zach Reizner, Gustavo Padovan

On Thu, Jul 14, 2016 at 02:40:59PM +0200, Daniel Vetter wrote:
> On Thu, Jul 14, 2016 at 11:11:02AM +0100, Chris Wilson wrote:
> > On Thu, Jul 14, 2016 at 10:59:04AM +0100, Chris Wilson wrote:
> > > On Thu, Jul 14, 2016 at 10:12:17AM +0200, Daniel Vetter wrote:
> > > > On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> > > > > vGEM buffers are useful for passing data between software clients and
> > > > > hardware renders. By allowing the user to create and attach fences to
> > > > > the exported vGEM buffers (on the dma-buf), the user can implement a
> > > > > deferred renderer and queue hardware operations like flipping and then
> > > > > signal the buffer readiness (i.e. this allows the user to schedule
> > > > > operations out-of-order, but have them complete in-order).
> > > > > 
> > > > > This also makes it much easier to write tightly controlled testcases for
> > > > > dma-buf fencing and signaling between hardware drivers.
> > > > > 
> > > > > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > > > > a separate fence-context for each fence so that the fences are
> > > > > unordered.
> > > > > v3: Make the debug output more interesting, and so the signaled status.
> > > > > 
> > > > > Testcase: igt/vgem_basic/dmabuf-fence
> > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > Cc: Sean Paul <seanpaul@chromium.org>
> > > > > Cc: Zach Reizner <zachr@google.com>
> > > > > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > > > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > > > Acked-by: Zach Reizner <zachr@google.com>
> > > > 
> > > > One thing I completely forgotten: This allows userspace to hang kernel
> > > > drivers. i915 (and other gpu drivers) can recover using hangcheck, but
> > > > dumber drivers (v4l, if that ever happens) probably never except such a
> > > > case. We've had a similar discusion with the userspace fences exposed in
> > > > sw_fence, and decided to move all those ioctl into debugfs. I think we
> > > > should do the same for this vgem-based debugging of implicit sync. Sorry
> > > > for realizing this this late.
> > > 
> > > One of the very tests I make is to ensure that we recover from such a
> > > hang. I don't see the difference between this any of the other ways
> > > userspace can shoot itself (and others) in the foot.
> > 
> > So one solution would be to make vgem fences automatically timeout (with
> > a flag for root to override for the sake of testing hang detection).
> 
> The problem is other drivers. E.g. right now atomic helpers assume that
> fences will signal, and can't recover if they don't. This is why drivers
> where things might fail must have some recovery (hangcheck, timeout) to
> make sure dma_fences always signal.

Urm, all the atomic helpers should work with fails. The waits on dma-buf
should be before any hardware is modified and so cancellation is trivial.
Anyone using a foriegn fence (or even native) must cope that it may not
meet some deadline.

They have to. Anyone sharing a i915 dma-buf is susceptible to all kinds
of (unprivileged) fun.
 
> Imo not even root should be allowed to break this, since it could put
> drivers into a non-recoverable state. I think this must be restricted to
> something known-unsafe-don't-enable-on-production like debugfs.

Providing fences is extremely useful, even for software buffers. (For
the sake of argument, just imagine an asynchronous multithreaded llvmpipe
wanting to support client fences for deferred rendering.) The only
question in my mind is how much cotton wool to use.

> Other solutions which I don't like:
> - Everyone needs to be able to recover. Given how much effort it is to
>   just keep i915 hangcheck in working order I think that's totally
>   illusionary to assume. At least once world+dog (atomic, v4l, ...) all
>   consume/produce fences, subsystems where the usual assumption holds that
>   async ops complete.
> 
> - Really long timeouts are allowed for root in vgem. Could lead to even
>   more fun in testing i915 hangchecks I think, so don't like that much
>   either.

The whole point is in testing our handling before we become suspectible
to real world fail - because as you point out, not everyone guarantees
that a fence will be signaled. I can't simply pass around i915 dma-buf
simply because we may unwind them and in the process completely curtail
being able to test a foriegn fence that hangs.

> I think the best option is to just do the same as we've done for sw_fence,
> and move it to debugfs. We could reuse the debugfs sw_fence interface to
> create them (gives us more control as a bonus), and just have an ioctl to
> attach fences to vgem (which could be unpriviledged).

The biggest reason I had against going the sw_sync only route was that
vgem should provide unprivileged fences and that through the bookkeeping
in vgem we can keep them safe, ensure that we don't leak random buffers
or fences. (And I need a source of foriegn dma-buf with implicit fence
tracking with which I can try and break the driver.)
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 13:23             ` Chris Wilson
@ 2016-07-14 13:39               ` Chris Wilson
  2016-07-14 14:36                 ` [Intel-gfx] " Daniel Vetter
  2016-07-14 14:33               ` Daniel Vetter
  1 sibling, 1 reply; 20+ messages in thread
From: Chris Wilson @ 2016-07-14 13:39 UTC (permalink / raw)
  To: Daniel Vetter, dri-devel, intel-gfx, Sean Paul, Zach Reizner,
	Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 02:23:04PM +0100, Chris Wilson wrote:
> The biggest reason I had against going the sw_sync only route was that
> vgem should provide unprivileged fences and that through the bookkeeping
> in vgem we can keep them safe, ensure that we don't leak random buffers
> or fences. (And I need a source of foriegn dma-buf with implicit fence
> tracking with which I can try and break the driver.)

And for testing passing around content + fences is more useful than
passing fences alone.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 13:23             ` Chris Wilson
  2016-07-14 13:39               ` Chris Wilson
@ 2016-07-14 14:33               ` Daniel Vetter
  1 sibling, 0 replies; 20+ messages in thread
From: Daniel Vetter @ 2016-07-14 14:33 UTC (permalink / raw)
  To: Chris Wilson, Daniel Vetter, dri-devel, intel-gfx, Sean Paul,
	Zach Reizner, Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 02:23:04PM +0100, Chris Wilson wrote:
> On Thu, Jul 14, 2016 at 02:40:59PM +0200, Daniel Vetter wrote:
> > On Thu, Jul 14, 2016 at 11:11:02AM +0100, Chris Wilson wrote:
> > > On Thu, Jul 14, 2016 at 10:59:04AM +0100, Chris Wilson wrote:
> > > > On Thu, Jul 14, 2016 at 10:12:17AM +0200, Daniel Vetter wrote:
> > > > > On Thu, Jul 14, 2016 at 08:04:19AM +0100, Chris Wilson wrote:
> > > > > > vGEM buffers are useful for passing data between software clients and
> > > > > > hardware renders. By allowing the user to create and attach fences to
> > > > > > the exported vGEM buffers (on the dma-buf), the user can implement a
> > > > > > deferred renderer and queue hardware operations like flipping and then
> > > > > > signal the buffer readiness (i.e. this allows the user to schedule
> > > > > > operations out-of-order, but have them complete in-order).
> > > > > > 
> > > > > > This also makes it much easier to write tightly controlled testcases for
> > > > > > dma-buf fencing and signaling between hardware drivers.
> > > > > > 
> > > > > > v2: Don't pretend the fences exist in an ordered timeline, but allocate
> > > > > > a separate fence-context for each fence so that the fences are
> > > > > > unordered.
> > > > > > v3: Make the debug output more interesting, and so the signaled status.
> > > > > > 
> > > > > > Testcase: igt/vgem_basic/dmabuf-fence
> > > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > > Cc: Sean Paul <seanpaul@chromium.org>
> > > > > > Cc: Zach Reizner <zachr@google.com>
> > > > > > Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> > > > > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > > > > Acked-by: Zach Reizner <zachr@google.com>
> > > > > 
> > > > > One thing I completely forgotten: This allows userspace to hang kernel
> > > > > drivers. i915 (and other gpu drivers) can recover using hangcheck, but
> > > > > dumber drivers (v4l, if that ever happens) probably never except such a
> > > > > case. We've had a similar discusion with the userspace fences exposed in
> > > > > sw_fence, and decided to move all those ioctl into debugfs. I think we
> > > > > should do the same for this vgem-based debugging of implicit sync. Sorry
> > > > > for realizing this this late.
> > > > 
> > > > One of the very tests I make is to ensure that we recover from such a
> > > > hang. I don't see the difference between this any of the other ways
> > > > userspace can shoot itself (and others) in the foot.
> > > 
> > > So one solution would be to make vgem fences automatically timeout (with
> > > a flag for root to override for the sake of testing hang detection).
> > 
> > The problem is other drivers. E.g. right now atomic helpers assume that
> > fences will signal, and can't recover if they don't. This is why drivers
> > where things might fail must have some recovery (hangcheck, timeout) to
> > make sure dma_fences always signal.
> 
> Urm, all the atomic helpers should work with fails. The waits on dma-buf
> should be before any hardware is modified and so cancellation is trivial.
> Anyone using a foriegn fence (or even native) must cope that it may not
> meet some deadline.
> 
> They have to. Anyone sharing a i915 dma-buf is susceptible to all kinds
> of (unprivileged) fun.
>  
> > Imo not even root should be allowed to break this, since it could put
> > drivers into a non-recoverable state. I think this must be restricted to
> > something known-unsafe-don't-enable-on-production like debugfs.
> 
> Providing fences is extremely useful, even for software buffers. (For
> the sake of argument, just imagine an asynchronous multithreaded llvmpipe
> wanting to support client fences for deferred rendering.) The only
> question in my mind is how much cotton wool to use.
> 
> > Other solutions which I don't like:
> > - Everyone needs to be able to recover. Given how much effort it is to
> >   just keep i915 hangcheck in working order I think that's totally
> >   illusionary to assume. At least once world+dog (atomic, v4l, ...) all
> >   consume/produce fences, subsystems where the usual assumption holds that
> >   async ops complete.
> > 
> > - Really long timeouts are allowed for root in vgem. Could lead to even
> >   more fun in testing i915 hangchecks I think, so don't like that much
> >   either.
> 
> The whole point is in testing our handling before we become suspectible
> to real world fail - because as you point out, not everyone guarantees
> that a fence will be signaled. I can't simply pass around i915 dma-buf
> simply because we may unwind them and in the process completely curtail
> being able to test a foriegn fence that hangs.

I think that's where we differ in opinion: Right now we do have the
guarantee that every fence gets signalled in finite time. For drivers
where that is not just guaranteed there must be a hangcheck to force the
completion.

The only exception thus far is the debugfs-only sw_fence interface.
-Daniel

> 
> > I think the best option is to just do the same as we've done for sw_fence,
> > and move it to debugfs. We could reuse the debugfs sw_fence interface to
> > create them (gives us more control as a bonus), and just have an ioctl to
> > attach fences to vgem (which could be unpriviledged).
> 
> The biggest reason I had against going the sw_sync only route was that
> vgem should provide unprivileged fences and that through the bookkeeping
> in vgem we can keep them safe, ensure that we don't leak random buffers
> or fences. (And I need a source of foriegn dma-buf with implicit fence
> tracking with which I can try and break the driver.)
> -Chris
> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 13:39               ` Chris Wilson
@ 2016-07-14 14:36                 ` Daniel Vetter
  2016-07-14 15:24                   ` Chris Wilson
  0 siblings, 1 reply; 20+ messages in thread
From: Daniel Vetter @ 2016-07-14 14:36 UTC (permalink / raw)
  To: Chris Wilson, Daniel Vetter, dri-devel, intel-gfx, Sean Paul,
	Zach Reizner, Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 02:39:54PM +0100, Chris Wilson wrote:
> On Thu, Jul 14, 2016 at 02:23:04PM +0100, Chris Wilson wrote:
> > The biggest reason I had against going the sw_sync only route was that
> > vgem should provide unprivileged fences and that through the bookkeeping
> > in vgem we can keep them safe, ensure that we don't leak random buffers
> > or fences. (And I need a source of foriegn dma-buf with implicit fence
> > tracking with which I can try and break the driver.)
> 
> And for testing passing around content + fences is more useful than
> passing fences alone.

Yup, agreed. But having fences free-standing isn't a real issue since
their refcounted and the userspace parts (sync_file) will get cleaned up
on process exit latest. Ḯ'm not advocating for any behaviour change at
all, just for hiding these things in debugfs.

Or maybe we could add a special (tainting) module option to vgem.ko which
enables this interface? That would be even less work, can easily be
integrated into igt (just set that knob at runtime, done), and with a
stern enough warning in dmesg + tainting the point should be clear. Of
course that switch would be off by default. Thoughts?
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 14:36                 ` [Intel-gfx] " Daniel Vetter
@ 2016-07-14 15:24                   ` Chris Wilson
  2016-07-15  7:08                     ` [Intel-gfx] " Daniel Vetter
  0 siblings, 1 reply; 20+ messages in thread
From: Chris Wilson @ 2016-07-14 15:24 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Daniel Vetter, intel-gfx, dri-devel, Zach Reizner, Gustavo Padovan

On Thu, Jul 14, 2016 at 04:36:37PM +0200, Daniel Vetter wrote:
> On Thu, Jul 14, 2016 at 02:39:54PM +0100, Chris Wilson wrote:
> > On Thu, Jul 14, 2016 at 02:23:04PM +0100, Chris Wilson wrote:
> > > The biggest reason I had against going the sw_sync only route was that
> > > vgem should provide unprivileged fences and that through the bookkeeping
> > > in vgem we can keep them safe, ensure that we don't leak random buffers
> > > or fences. (And I need a source of foriegn dma-buf with implicit fence
> > > tracking with which I can try and break the driver.)
> > 
> > And for testing passing around content + fences is more useful than
> > passing fences alone.
> 
> Yup, agreed. But having fences free-standing isn't a real issue since
> their refcounted and the userspace parts (sync_file) will get cleaned up
> on process exit latest. Ḯ'm not advocating for any behaviour change at
> all, just for hiding these things in debugfs.

It's just a choice of api. We could equally hide it behind a separate
config flag.

First question, are we happy that there is a legitimate usecase for fences
on vgem?

If so, what enforced timeout on the fence should we use?

(I think that this ioctl api is correct, I don't forsee sw_sync being
viable for unprivileged use.)

Then we can restrict this patch to add the safe interface, enable a bunch
more tests and get on with discussing how to break the kernel "safely"!
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH v3] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14 15:24                   ` Chris Wilson
@ 2016-07-15  7:08                     ` Daniel Vetter
  0 siblings, 0 replies; 20+ messages in thread
From: Daniel Vetter @ 2016-07-15  7:08 UTC (permalink / raw)
  To: Chris Wilson, Daniel Vetter, dri-devel, intel-gfx, Sean Paul,
	Zach Reizner, Gustavo Padovan, Daniel Vetter

On Thu, Jul 14, 2016 at 04:24:41PM +0100, Chris Wilson wrote:
> On Thu, Jul 14, 2016 at 04:36:37PM +0200, Daniel Vetter wrote:
> > On Thu, Jul 14, 2016 at 02:39:54PM +0100, Chris Wilson wrote:
> > > On Thu, Jul 14, 2016 at 02:23:04PM +0100, Chris Wilson wrote:
> > > > The biggest reason I had against going the sw_sync only route was that
> > > > vgem should provide unprivileged fences and that through the bookkeeping
> > > > in vgem we can keep them safe, ensure that we don't leak random buffers
> > > > or fences. (And I need a source of foriegn dma-buf with implicit fence
> > > > tracking with which I can try and break the driver.)
> > > 
> > > And for testing passing around content + fences is more useful than
> > > passing fences alone.
> > 
> > Yup, agreed. But having fences free-standing isn't a real issue since
> > their refcounted and the userspace parts (sync_file) will get cleaned up
> > on process exit latest. Ḯ'm not advocating for any behaviour change at
> > all, just for hiding these things in debugfs.
> 
> It's just a choice of api. We could equally hide it behind a separate
> config flag.
> 
> First question, are we happy that there is a legitimate usecase for fences
> on vgem?
> 
> If so, what enforced timeout on the fence should we use?
> 
> (I think that this ioctl api is correct, I don't forsee sw_sync being
> viable for unprivileged use.)
> 
> Then we can restrict this patch to add the safe interface, enable a bunch
> more tests and get on with discussing how to break the kernel "safely"!

I think the interface is sound. We could probably bikeshed the timeout
forever, but 10s is still reasonable imo.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v4] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
  2016-07-14  8:12     ` Daniel Vetter
@ 2016-07-15  8:31     ` Chris Wilson
  2016-07-18  6:34       ` Daniel Vetter
  2016-07-15  9:15     ` ✗ Ro.CI.BAT: failure for drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) (rev2) Patchwork
  2 siblings, 1 reply; 20+ messages in thread
From: Chris Wilson @ 2016-07-15  8:31 UTC (permalink / raw)
  To: dri-devel; +Cc: Daniel Vetter, intel-gfx, Zach Reizner, Gustavo Padovan

vGEM buffers are useful for passing data between software clients and
hardware renders. By allowing the user to create and attach fences to
the exported vGEM buffers (on the dma-buf), the user can implement a
deferred renderer and queue hardware operations like flipping and then
signal the buffer readiness (i.e. this allows the user to schedule
operations out-of-order, but have them complete in-order).

This also makes it much easier to write tightly controlled testcases for
dma-buf fencing and signaling between hardware drivers.

v2: Don't pretend the fences exist in an ordered timeline, but allocate
a separate fence-context for each fence so that the fences are
unordered.
v3: Make the debug output more interesting, and show the signaled status.
v4: Automatically signal the fence to prevent userspace from
indefinitely hanging drivers.

Testcase: igt/vgem_basic/dmabuf-fence
Testcase: igt/vgem_slow/nohang
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Zach Reizner <zachr@google.com>
---
 drivers/gpu/drm/vgem/Makefile     |   2 +-
 drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++
 drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
 drivers/gpu/drm/vgem/vgem_fence.c | 283 ++++++++++++++++++++++++++++++++++++++
 include/uapi/drm/vgem_drm.h       |  62 +++++++++
 5 files changed, 396 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
 create mode 100644 include/uapi/drm/vgem_drm.h

diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842028..bfcdea1330e6 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
 
 obj-$(CONFIG_DRM_VGEM)	+= vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 29c2aab3c1a7..c15bafb06665 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
 	.close = drm_gem_vm_close,
 };
 
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile;
+	int ret;
+
+	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+	if (!vfile)
+		return -ENOMEM;
+
+	file->driver_priv = vfile;
+
+	ret = vgem_fence_open(vfile);
+	if (ret) {
+		kfree(vfile);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+
+	vgem_fence_close(vfile);
+	kfree(vfile);
+}
+
 /* ioctls */
 
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -164,6 +192,8 @@ unref:
 }
 
 static struct drm_ioctl_desc vgem_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
 
 static struct drm_driver vgem_driver = {
 	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
+	.open				= vgem_open,
+	.preclose			= vgem_preclose,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
+	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
 	.fops				= &vgem_driver_fops,
 
 	.dumb_create			= vgem_gem_dumb_create,
@@ -328,5 +361,6 @@ module_init(vgem_init);
 module_exit(vgem_exit);
 
 MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 988cbaae7588..1f8798ad329c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,9 +32,25 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+	struct idr fence_idr;
+	struct mutex fence_mutex;
+};
+
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
 	struct drm_gem_object base;
 };
 
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
+
 #endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000000..e77b52208699
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+	struct fence base;
+	struct spinlock lock;
+	struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+	return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+	return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+	return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+	struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+	del_timer_sync(&fence->timer);
+	fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+	snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+					  int size)
+{
+	snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+const struct fence_ops vgem_fence_ops = {
+	.get_driver_name = vgem_fence_get_driver_name,
+	.get_timeline_name = vgem_fence_get_timeline_name,
+	.enable_signaling = vgem_fence_enable_signaling,
+	.signaled = vgem_fence_signaled,
+	.wait = fence_default_wait,
+	.release = vgem_fence_release,
+
+	.fence_value_str = vgem_fence_value_str,
+	.timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+	struct vgem_fence *fence = (struct vgem_fence *)data;
+
+	fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+				       unsigned int flags)
+{
+	struct vgem_fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	spin_lock_init(&fence->lock);
+	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+		   fence_context_alloc(1), 1);
+
+	setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+	/* We force the fence to expire within 10s to prevent driver hangs */
+	mod_timer(&fence->timer, VGEM_FENCE_TIMEOUT);
+
+	return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+			 struct drm_gem_object *obj)
+{
+	struct dma_buf *dmabuf;
+
+	if (obj->dma_buf)
+		return 0;
+
+	dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	obj->dma_buf = dmabuf;
+	drm_gem_object_reference(obj);
+	return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct drm_vgem_fence_attach *arg = data;
+	struct vgem_file *vfile = file->driver_priv;
+	struct reservation_object *resv;
+	struct drm_gem_object *obj;
+	struct fence *fence;
+	int ret;
+
+	if (arg->flags & ~VGEM_FENCE_WRITE)
+		return -EINVAL;
+
+	if (arg->pad)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(file, arg->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = attach_dmabuf(dev, obj);
+	if (ret)
+		goto err;
+
+	fence = vgem_fence_create(vfile, arg->flags);
+	if (!fence) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Check for a conflicting fence */
+	resv = obj->dma_buf->resv;
+	if (!reservation_object_test_signaled_rcu(resv,
+						  arg->flags & VGEM_FENCE_WRITE)) {
+		ret = -EBUSY;
+		goto err_fence;
+	}
+
+	/* Expose the fence via the dma-buf */
+	ret = 0;
+	mutex_lock(&resv->lock.base);
+	if (arg->flags & VGEM_FENCE_WRITE)
+		reservation_object_add_excl_fence(resv, fence);
+	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+		reservation_object_add_shared_fence(resv, fence);
+	mutex_unlock(&resv->lock.base);
+
+	/* Record the fence in our idr for later signaling */
+	if (ret == 0) {
+		mutex_lock(&vfile->fence_mutex);
+		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+		mutex_unlock(&vfile->fence_mutex);
+		if (ret > 0) {
+			arg->out_fence = ret;
+			ret = 0;
+		}
+	}
+err_fence:
+	if (ret) {
+		fence_signal(fence);
+		fence_put(fence);
+	}
+err:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+			    void *data,
+			    struct drm_file *file)
+{
+	struct vgem_file *vfile = file->driver_priv;
+	struct drm_vgem_fence_signal *arg = data;
+	struct fence *fence;
+	int ret;
+
+	if (arg->flags)
+		return -EINVAL;
+
+	mutex_lock(&vfile->fence_mutex);
+	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+	mutex_unlock(&vfile->fence_mutex);
+	if (!fence)
+		return -ENOENT;
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	if (fence_is_signaled(fence))
+		ret = -ETIMEDOUT;
+
+	fence_signal(fence);
+	fence_put(fence);
+	return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+	mutex_init(&vfile->fence_mutex);
+	idr_init(&vfile->fence_idr);
+
+	return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+	fence_signal(p);
+	fence_put(p);
+	return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+	idr_destroy(&vfile->fence_idr);
+}
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644
index 000000000000..bf66f5db6da8
--- /dev/null
+++ b/include/uapi/drm/vgem_drm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH	0x1
+#define DRM_VGEM_FENCE_SIGNAL	0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH	DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL	DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+	__u32 handle;
+	__u32 flags;
+#define VGEM_FENCE_WRITE	0x1
+	__u32 out_fence;
+	__u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+	__u32 fence;
+	__u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
-- 
2.8.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* ✗ Ro.CI.BAT: failure for drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) (rev2)
  2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
  2016-07-14  8:12     ` Daniel Vetter
  2016-07-15  8:31     ` [PATCH v4] " Chris Wilson
@ 2016-07-15  9:15     ` Patchwork
  2 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2016-07-15  9:15 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) (rev2)
URL   : https://patchwork.freedesktop.org/series/9757/
State : failure

== Summary ==

Series 9757v2 drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
http://patchwork.freedesktop.org/api/1.0/series/9757/revisions/2/mbox

Test drv_module_reload_basic:
                skip       -> PASS       (ro-ivb-i7-3770)
Test gem_exec_suspend:
        Subgroup basic-s3:
                dmesg-warn -> PASS       (ro-bdw-i7-5557U)
Test gem_sync:
        Subgroup basic-store-each:
                pass       -> DMESG-FAIL (ro-bdw-i7-5600u)
Test kms_cursor_legacy:
        Subgroup basic-flip-vs-cursor:
                dmesg-warn -> PASS       (ro-byt-n2820)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                pass       -> INCOMPLETE (fi-skl-i7-6700k)
Test prime_vgem:
        Subgroup basic-fence-flip:
                skip       -> FAIL       (ro-bdw-i7-5557U)
                skip       -> FAIL       (ro-ilk1-i5-650)
                skip       -> FAIL       (ro-hsw-i3-4010u)
                skip       -> FAIL       (fi-skl-i5-6260u)
                skip       -> FAIL       (ro-skl3-i5-6260u)
                skip       -> FAIL       (ro-bdw-i5-5250u)
                skip       -> FAIL       (ro-hsw-i7-4770r)
                skip       -> FAIL       (fi-snb-i7-2600)
                skip       -> FAIL       (ro-ivb-i7-3770)
                skip       -> FAIL       (ro-byt-n2820)
        Subgroup basic-fence-wait-default:
                skip       -> FAIL       (ro-bdw-i7-5557U)
                skip       -> FAIL       (ro-ilk1-i5-650)
                skip       -> FAIL       (ro-hsw-i3-4010u)
                skip       -> FAIL       (fi-skl-i5-6260u)
                skip       -> FAIL       (ro-bdw-i7-5600u)
                skip       -> FAIL       (ro-snb-i7-2620M)
                skip       -> FAIL       (ro-bdw-i5-5250u)
                skip       -> FAIL       (ro-hsw-i7-4770r)
                skip       -> FAIL       (fi-snb-i7-2600)
                skip       -> FAIL       (ro-ivb-i7-3770)
                skip       -> FAIL       (ro-byt-n2820)
                skip       -> FAIL       (ro-skl3-i5-6260u)
Test vgem_basic:
        Subgroup debugfs:
                incomplete -> PASS       (ro-snb-i7-2620M)
        Subgroup dmabuf-fence:
                skip       -> FAIL       (ro-bdw-i7-5557U)
                skip       -> FAIL       (ro-ilk1-i5-650)
                skip       -> FAIL       (ro-hsw-i3-4010u)
                skip       -> FAIL       (fi-skl-i5-6260u)
                skip       -> FAIL       (ro-bdw-i7-5600u)
                skip       -> FAIL       (ro-snb-i7-2620M)
                skip       -> FAIL       (ro-bdw-i5-5250u)
                skip       -> FAIL       (ro-hsw-i7-4770r)
                skip       -> FAIL       (fi-snb-i7-2600)
                skip       -> FAIL       (ro-ivb-i7-3770)
                skip       -> FAIL       (ro-byt-n2820)
                skip       -> FAIL       (ro-skl3-i5-6260u)
        Subgroup dmabuf-fence-before:
                skip       -> FAIL       (ro-bdw-i7-5557U)
                skip       -> FAIL       (ro-ilk1-i5-650)
                skip       -> FAIL       (ro-hsw-i3-4010u)
                skip       -> FAIL       (fi-skl-i5-6260u)
                skip       -> FAIL       (ro-bdw-i7-5600u)
                skip       -> FAIL       (ro-snb-i7-2620M)
                skip       -> FAIL       (ro-bdw-i5-5250u)
                skip       -> FAIL       (ro-hsw-i7-4770r)
                skip       -> FAIL       (fi-snb-i7-2600)
                skip       -> FAIL       (ro-ivb-i7-3770)
                skip       -> FAIL       (ro-byt-n2820)
                skip       -> FAIL       (ro-skl3-i5-6260u)

fi-kbl-qkkr      total:241  pass:174  dwarn:28  dfail:1   fail:10  skip:28 
fi-skl-i5-6260u  total:241  pass:217  dwarn:0   dfail:0   fail:11  skip:13 
fi-skl-i7-6700k  total:195  pass:170  dwarn:0   dfail:0   fail:0   skip:24 
fi-snb-i7-2600   total:241  pass:190  dwarn:0   dfail:0   fail:11  skip:40 
ro-bdw-i5-5250u  total:241  pass:213  dwarn:4   dfail:0   fail:11  skip:13 
ro-bdw-i7-5557U  total:241  pass:214  dwarn:0   dfail:0   fail:11  skip:16 
ro-bdw-i7-5600u  total:241  pass:198  dwarn:0   dfail:1   fail:10  skip:32 
ro-byt-n2820     total:241  pass:191  dwarn:0   dfail:0   fail:12  skip:38 
ro-hsw-i3-4010u  total:241  pass:206  dwarn:0   dfail:0   fail:11  skip:24 
ro-hsw-i7-4770r  total:241  pass:206  dwarn:0   dfail:0   fail:11  skip:24 
ro-ilk-i7-620lm  total:241  pass:166  dwarn:0   dfail:0   fail:12  skip:63 
ro-ilk1-i5-650   total:236  pass:166  dwarn:0   dfail:0   fail:12  skip:58 
ro-ivb-i7-3770   total:241  pass:197  dwarn:0   dfail:0   fail:11  skip:33 
ro-skl3-i5-6260u total:241  pass:217  dwarn:1   dfail:0   fail:11  skip:12 
ro-snb-i7-2620M  total:241  pass:188  dwarn:0   dfail:0   fail:11  skip:42 

Results at /archive/results/CI_IGT_test/RO_Patchwork_1496/

c01b445 drm-intel-nightly: 2016y-07m-15d-07h-02m-07s UTC integration manifest
e95d532 drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v4] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl)
  2016-07-15  8:31     ` [PATCH v4] " Chris Wilson
@ 2016-07-18  6:34       ` Daniel Vetter
  0 siblings, 0 replies; 20+ messages in thread
From: Daniel Vetter @ 2016-07-18  6:34 UTC (permalink / raw)
  To: Chris Wilson
  Cc: Daniel Vetter, intel-gfx, dri-devel, Zach Reizner, Gustavo Padovan

On Fri, Jul 15, 2016 at 09:31:11AM +0100, Chris Wilson wrote:
> vGEM buffers are useful for passing data between software clients and
> hardware renders. By allowing the user to create and attach fences to
> the exported vGEM buffers (on the dma-buf), the user can implement a
> deferred renderer and queue hardware operations like flipping and then
> signal the buffer readiness (i.e. this allows the user to schedule
> operations out-of-order, but have them complete in-order).
> 
> This also makes it much easier to write tightly controlled testcases for
> dma-buf fencing and signaling between hardware drivers.
> 
> v2: Don't pretend the fences exist in an ordered timeline, but allocate
> a separate fence-context for each fence so that the fences are
> unordered.
> v3: Make the debug output more interesting, and show the signaled status.
> v4: Automatically signal the fence to prevent userspace from
> indefinitely hanging drivers.
> 
> Testcase: igt/vgem_basic/dmabuf-fence
> Testcase: igt/vgem_slow/nohang
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Zach Reizner <zachr@google.com>
> Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Acked-by: Zach Reizner <zachr@google.com>

Applied to drm-misc, thanks.
-Daniel

> ---
>  drivers/gpu/drm/vgem/Makefile     |   2 +-
>  drivers/gpu/drm/vgem/vgem_drv.c   |  34 +++++
>  drivers/gpu/drm/vgem/vgem_drv.h   |  16 +++
>  drivers/gpu/drm/vgem/vgem_fence.c | 283 ++++++++++++++++++++++++++++++++++++++
>  include/uapi/drm/vgem_drm.h       |  62 +++++++++
>  5 files changed, 396 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/gpu/drm/vgem/vgem_fence.c
>  create mode 100644 include/uapi/drm/vgem_drm.h
> 
> diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
> index 3f4c7b842028..bfcdea1330e6 100644
> --- a/drivers/gpu/drm/vgem/Makefile
> +++ b/drivers/gpu/drm/vgem/Makefile
> @@ -1,4 +1,4 @@
>  ccflags-y := -Iinclude/drm
> -vgem-y := vgem_drv.o
> +vgem-y := vgem_drv.o vgem_fence.o
>  
>  obj-$(CONFIG_DRM_VGEM)	+= vgem.o
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index 29c2aab3c1a7..c15bafb06665 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
>  	.close = drm_gem_vm_close,
>  };
>  
> +static int vgem_open(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile;
> +	int ret;
> +
> +	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
> +	if (!vfile)
> +		return -ENOMEM;
> +
> +	file->driver_priv = vfile;
> +
> +	ret = vgem_fence_open(vfile);
> +	if (ret) {
> +		kfree(vfile);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct vgem_file *vfile = file->driver_priv;
> +
> +	vgem_fence_close(vfile);
> +	kfree(vfile);
> +}
> +
>  /* ioctls */
>  
>  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> @@ -164,6 +192,8 @@ unref:
>  }
>  
>  static struct drm_ioctl_desc vgem_ioctls[] = {
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>  };
>  
>  static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> @@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
>  
>  static struct drm_driver vgem_driver = {
>  	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
> +	.open				= vgem_open,
> +	.preclose			= vgem_preclose,
>  	.gem_free_object_unlocked	= vgem_gem_free_object,
>  	.gem_vm_ops			= &vgem_gem_vm_ops,
>  	.ioctls				= vgem_ioctls,
> +	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
>  	.fops				= &vgem_driver_fops,
>  
>  	.dumb_create			= vgem_gem_dumb_create,
> @@ -328,5 +361,6 @@ module_init(vgem_init);
>  module_exit(vgem_exit);
>  
>  MODULE_AUTHOR("Red Hat, Inc.");
> +MODULE_AUTHOR("Intel Corporation");
>  MODULE_DESCRIPTION(DRIVER_DESC);
>  MODULE_LICENSE("GPL and additional rights");
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 988cbaae7588..1f8798ad329c 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -32,9 +32,25 @@
>  #include <drm/drmP.h>
>  #include <drm/drm_gem.h>
>  
> +#include <uapi/drm/vgem_drm.h>
> +
> +struct vgem_file {
> +	struct idr fence_idr;
> +	struct mutex fence_mutex;
> +};
> +
>  #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
>  struct drm_vgem_gem_object {
>  	struct drm_gem_object base;
>  };
>  
> +int vgem_fence_open(struct vgem_file *file);
> +int vgem_fence_attach_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +int vgem_fence_signal_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file);
> +void vgem_fence_close(struct vgem_file *file);
> +
>  #endif
> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
> new file mode 100644
> index 000000000000..e77b52208699
> --- /dev/null
> +++ b/drivers/gpu/drm/vgem/vgem_fence.c
> @@ -0,0 +1,283 @@
> +/*
> + * Copyright 2016 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software")
> + * to deal in the software without restriction, including without limitation
> + * on the rights to use, copy, modify, merge, publish, distribute, sub
> + * license, and/or sell copies of the Software, and to permit persons to whom
> + * them Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
> + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <linux/dma-buf.h>
> +#include <linux/reservation.h>
> +
> +#include "vgem_drv.h"
> +
> +#define VGEM_FENCE_TIMEOUT (10*HZ)
> +
> +struct vgem_fence {
> +	struct fence base;
> +	struct spinlock lock;
> +	struct timer_list timer;
> +};
> +
> +static const char *vgem_fence_get_driver_name(struct fence *fence)
> +{
> +	return "vgem";
> +}
> +
> +static const char *vgem_fence_get_timeline_name(struct fence *fence)
> +{
> +	return "unbound";
> +}
> +
> +static bool vgem_fence_signaled(struct fence *fence)
> +{
> +	return false;
> +}
> +
> +static bool vgem_fence_enable_signaling(struct fence *fence)
> +{
> +	return true;
> +}
> +
> +static void vgem_fence_release(struct fence *base)
> +{
> +	struct vgem_fence *fence = container_of(base, typeof(*fence), base);
> +
> +	del_timer_sync(&fence->timer);
> +	fence_free(&fence->base);
> +}
> +
> +static void vgem_fence_value_str(struct fence *fence, char *str, int size)
> +{
> +	snprintf(str, size, "%u", fence->seqno);
> +}
> +
> +static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
> +					  int size)
> +{
> +	snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
> +}
> +
> +const struct fence_ops vgem_fence_ops = {
> +	.get_driver_name = vgem_fence_get_driver_name,
> +	.get_timeline_name = vgem_fence_get_timeline_name,
> +	.enable_signaling = vgem_fence_enable_signaling,
> +	.signaled = vgem_fence_signaled,
> +	.wait = fence_default_wait,
> +	.release = vgem_fence_release,
> +
> +	.fence_value_str = vgem_fence_value_str,
> +	.timeline_value_str = vgem_fence_timeline_value_str,
> +};
> +
> +static void vgem_fence_timeout(unsigned long data)
> +{
> +	struct vgem_fence *fence = (struct vgem_fence *)data;
> +
> +	fence_signal(&fence->base);
> +}
> +
> +static struct fence *vgem_fence_create(struct vgem_file *vfile,
> +				       unsigned int flags)
> +{
> +	struct vgem_fence *fence;
> +
> +	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
> +	if (!fence)
> +		return NULL;
> +
> +	spin_lock_init(&fence->lock);
> +	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
> +		   fence_context_alloc(1), 1);
> +
> +	setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
> +
> +	/* We force the fence to expire within 10s to prevent driver hangs */
> +	mod_timer(&fence->timer, VGEM_FENCE_TIMEOUT);
> +
> +	return &fence->base;
> +}
> +
> +static int attach_dmabuf(struct drm_device *dev,
> +			 struct drm_gem_object *obj)
> +{
> +	struct dma_buf *dmabuf;
> +
> +	if (obj->dma_buf)
> +		return 0;
> +
> +	dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
> +	if (IS_ERR(dmabuf))
> +		return PTR_ERR(dmabuf);
> +
> +	obj->dma_buf = dmabuf;
> +	drm_gem_object_reference(obj);
> +	return 0;
> +}
> +
> +/*
> + * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
> + *
> + * Create and attach a fence to the vGEM handle. This fence is then exposed
> + * via the dma-buf reservation object and visible to consumers of the exported
> + * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
> + * vGEM buffer is being written to by the client and is exposed as an exclusive
> + * fence, otherwise the fence indicates the client is current reading from the
> + * buffer and all future writes should wait for the client to signal its
> + * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
> + * an exclusive fence when adding a read, or any fence when adding a write),
> + * -EBUSY is reported. Serialisation between operations should be handled
> + * by waiting upon the dma-buf.
> + *
> + * This returns the handle for the new fence that must be signaled within 10
> + * seconds (or otherwise it will automatically expire). See
> + * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
> + *
> + * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
> + */
> +int vgem_fence_attach_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file)
> +{
> +	struct drm_vgem_fence_attach *arg = data;
> +	struct vgem_file *vfile = file->driver_priv;
> +	struct reservation_object *resv;
> +	struct drm_gem_object *obj;
> +	struct fence *fence;
> +	int ret;
> +
> +	if (arg->flags & ~VGEM_FENCE_WRITE)
> +		return -EINVAL;
> +
> +	if (arg->pad)
> +		return -EINVAL;
> +
> +	obj = drm_gem_object_lookup(file, arg->handle);
> +	if (!obj)
> +		return -ENOENT;
> +
> +	ret = attach_dmabuf(dev, obj);
> +	if (ret)
> +		goto err;
> +
> +	fence = vgem_fence_create(vfile, arg->flags);
> +	if (!fence) {
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	/* Check for a conflicting fence */
> +	resv = obj->dma_buf->resv;
> +	if (!reservation_object_test_signaled_rcu(resv,
> +						  arg->flags & VGEM_FENCE_WRITE)) {
> +		ret = -EBUSY;
> +		goto err_fence;
> +	}
> +
> +	/* Expose the fence via the dma-buf */
> +	ret = 0;
> +	mutex_lock(&resv->lock.base);
> +	if (arg->flags & VGEM_FENCE_WRITE)
> +		reservation_object_add_excl_fence(resv, fence);
> +	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
> +		reservation_object_add_shared_fence(resv, fence);
> +	mutex_unlock(&resv->lock.base);
> +
> +	/* Record the fence in our idr for later signaling */
> +	if (ret == 0) {
> +		mutex_lock(&vfile->fence_mutex);
> +		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
> +		mutex_unlock(&vfile->fence_mutex);
> +		if (ret > 0) {
> +			arg->out_fence = ret;
> +			ret = 0;
> +		}
> +	}
> +err_fence:
> +	if (ret) {
> +		fence_signal(fence);
> +		fence_put(fence);
> +	}
> +err:
> +	drm_gem_object_unreference_unlocked(obj);
> +	return ret;
> +}
> +
> +/*
> + * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
> + *
> + * Signal and consume a fence ealier attached to a vGEM handle using
> + * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
> + *
> + * All fences must be signaled within 10s of attachment or otherwise they
> + * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
> + *
> + * Signaling a fence indicates to all consumers of the dma-buf that the
> + * client has completed the operation associated with the fence, and that the
> + * buffer is then ready for consumption.
> + *
> + * If the fence does not exist (or has already been signaled by the client),
> + * vgem_fence_signal_ioctl returns -ENOENT.
> + */
> +int vgem_fence_signal_ioctl(struct drm_device *dev,
> +			    void *data,
> +			    struct drm_file *file)
> +{
> +	struct vgem_file *vfile = file->driver_priv;
> +	struct drm_vgem_fence_signal *arg = data;
> +	struct fence *fence;
> +	int ret;
> +
> +	if (arg->flags)
> +		return -EINVAL;
> +
> +	mutex_lock(&vfile->fence_mutex);
> +	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
> +	mutex_unlock(&vfile->fence_mutex);
> +	if (!fence)
> +		return -ENOENT;
> +	if (IS_ERR(fence))
> +		return PTR_ERR(fence);
> +
> +	if (fence_is_signaled(fence))
> +		ret = -ETIMEDOUT;
> +
> +	fence_signal(fence);
> +	fence_put(fence);
> +	return ret;
> +}
> +
> +int vgem_fence_open(struct vgem_file *vfile)
> +{
> +	mutex_init(&vfile->fence_mutex);
> +	idr_init(&vfile->fence_idr);
> +
> +	return 0;
> +}
> +
> +static int __vgem_fence_idr_fini(int id, void *p, void *data)
> +{
> +	fence_signal(p);
> +	fence_put(p);
> +	return 0;
> +}
> +
> +void vgem_fence_close(struct vgem_file *vfile)
> +{
> +	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
> +	idr_destroy(&vfile->fence_idr);
> +}
> diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
> new file mode 100644
> index 000000000000..bf66f5db6da8
> --- /dev/null
> +++ b/include/uapi/drm/vgem_drm.h
> @@ -0,0 +1,62 @@
> +/*
> + * Copyright 2016 Intel Corporation
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sub license, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial portions
> + * of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
> + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
> + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
> + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
> + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
> + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#ifndef _UAPI_VGEM_DRM_H_
> +#define _UAPI_VGEM_DRM_H_
> +
> +#include "drm.h"
> +
> +#if defined(__cplusplus)
> +extern "C" {
> +#endif
> +
> +/* Please note that modifications to all structs defined here are
> + * subject to backwards-compatibility constraints.
> + */
> +#define DRM_VGEM_FENCE_ATTACH	0x1
> +#define DRM_VGEM_FENCE_SIGNAL	0x2
> +
> +#define DRM_IOCTL_VGEM_FENCE_ATTACH	DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
> +#define DRM_IOCTL_VGEM_FENCE_SIGNAL	DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
> +
> +struct drm_vgem_fence_attach {
> +	__u32 handle;
> +	__u32 flags;
> +#define VGEM_FENCE_WRITE	0x1
> +	__u32 out_fence;
> +	__u32 pad;
> +};
> +
> +struct drm_vgem_fence_signal {
> +	__u32 fence;
> +	__u32 flags;
> +};
> +
> +#if defined(__cplusplus)
> +}
> +#endif
> +
> +#endif /* _UAPI_VGEM_DRM_H_ */
> -- 
> 2.8.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2016-07-18  6:34 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-12 12:46 [PATCH v2] drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) Chris Wilson
2016-07-12 16:13 ` ✗ Ro.CI.BAT: failure for " Patchwork
2016-07-13 18:57 ` [PATCH v2] " Chris Wilson
2016-07-13 20:29 ` Gustavo Padovan
2016-07-13 20:46   ` Chris Wilson
2016-07-14  7:04   ` [PATCH v3] " Chris Wilson
2016-07-14  8:12     ` Daniel Vetter
2016-07-14  9:59       ` Chris Wilson
2016-07-14 10:11         ` Chris Wilson
2016-07-14 12:15           ` [Intel-gfx] " Chris Wilson
2016-07-14 12:40           ` Daniel Vetter
2016-07-14 13:23             ` Chris Wilson
2016-07-14 13:39               ` Chris Wilson
2016-07-14 14:36                 ` [Intel-gfx] " Daniel Vetter
2016-07-14 15:24                   ` Chris Wilson
2016-07-15  7:08                     ` [Intel-gfx] " Daniel Vetter
2016-07-14 14:33               ` Daniel Vetter
2016-07-15  8:31     ` [PATCH v4] " Chris Wilson
2016-07-18  6:34       ` Daniel Vetter
2016-07-15  9:15     ` ✗ Ro.CI.BAT: failure for drm/vgem: Attach sw fences to exported vGEM dma-buf (ioctl) (rev2) Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.