All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
@ 2017-09-12 20:42 Marek Olšák
  2017-09-12 20:42 ` [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file Marek Olšák
                   ` (2 more replies)
  0 siblings, 3 replies; 36+ messages in thread
From: Marek Olšák @ 2017-09-12 20:42 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Marek Olšák <marek.olsak@amd.com>

For amdgpu.

drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
helpers drm_syncobj_create and drm_syncobj_get_handle are added.

Signed-off-by: Marek Olšák <marek.olsak@amd.com>
---
 drivers/gpu/drm/drm_syncobj.c | 49 +++++++++++++++++++++++++++++++++++++++----
 include/drm/drm_syncobj.h     |  4 ++++
 2 files changed, 49 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0422b8c..0bb1741 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_syncobj_free);
 
-static int drm_syncobj_create(struct drm_file *file_private,
-			      u32 *handle, uint32_t flags)
+/**
+ * drm_syncobj_create - create a new syncobj
+ * @out_syncobj: returned syncobj
+ * @flags: DRM_SYNCOBJ_* flags
+ * @fence: if non-NULL, the syncobj will represent this fence
+ */
+int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+		       struct dma_fence *fence)
 {
 	int ret;
 	struct drm_syncobj *syncobj;
@@ -284,6 +290,25 @@ static int drm_syncobj_create(struct drm_file *file_private,
 		}
 	}
 
+	if (fence)
+		drm_syncobj_replace_fence(syncobj, fence);
+
+	*out_syncobj = syncobj;
+	return 0;
+}
+EXPORT_SYMBOL(drm_syncobj_create);
+
+/**
+ * drm_syncobj_get_handle - get a handle from a syncobj
+ */
+int drm_syncobj_get_handle(struct drm_file *file_private,
+			   struct drm_syncobj *syncobj, u32 *handle)
+{
+	int ret;
+
+	/* take a reference to put in the idr */
+	drm_syncobj_get(syncobj);
+
 	idr_preload(GFP_KERNEL);
 	spin_lock(&file_private->syncobj_table_lock);
 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
@@ -299,6 +324,22 @@ static int drm_syncobj_create(struct drm_file *file_private,
 	*handle = ret;
 	return 0;
 }
+EXPORT_SYMBOL(drm_syncobj_get_handle);
+
+static int drm_syncobj_create_as_handle(struct drm_file *file_private,
+					u32 *handle, uint32_t flags)
+{
+	int ret;
+	struct drm_syncobj *syncobj;
+
+	ret = drm_syncobj_create(&syncobj, flags, NULL);
+	if (ret)
+		return ret;
+
+	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
+	drm_syncobj_put(syncobj);
+	return ret;
+}
 
 static int drm_syncobj_destroy(struct drm_file *file_private,
 			       u32 handle)
@@ -522,8 +563,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
 	if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
 		return -EINVAL;
 
-	return drm_syncobj_create(file_private,
-				  &args->handle, args->flags);
+	return drm_syncobj_create_as_handle(file_private,
+					    &args->handle, args->flags);
 }
 
 int
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index c00fee5..e7f0035 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -136,5 +136,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 			   u32 handle,
 			   struct dma_fence **fence);
 void drm_syncobj_free(struct kref *kref);
+int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+		       struct dma_fence *fence);
+int drm_syncobj_get_handle(struct drm_file *file_private,
+			   struct drm_syncobj *syncobj, u32 *handle);
 
 #endif
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 2/3] drm/syncobj: add a new helper drm_syncobj_get_fd
       [not found] ` <1505248934-1819-1-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-09-12 20:42   ` Marek Olšák
  2017-09-29  2:44   ` [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Chunming Zhou
  1 sibling, 0 replies; 36+ messages in thread
From: Marek Olšák @ 2017-09-12 20:42 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Marek Olšák <marek.olsak@amd.com>

Signed-off-by: Marek Olšák <marek.olsak@amd.com>
---
 drivers/gpu/drm/drm_syncobj.c | 33 +++++++++++++++++++--------------
 include/drm/drm_syncobj.h     |  1 +
 2 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0bb1741..62adc7a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -386,33 +386,38 @@ static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
 	return 0;
 }
 
-static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
-				    u32 handle, int *p_fd)
+int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
 {
-	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
 	int ret;
 	int fd;
 
-	if (!syncobj)
-		return -EINVAL;
-
 	fd = get_unused_fd_flags(O_CLOEXEC);
-	if (fd < 0) {
-		drm_syncobj_put(syncobj);
+	if (fd < 0)
 		return fd;
-	}
 
 	if (!syncobj->file) {
 		ret = drm_syncobj_alloc_file(syncobj);
-		if (ret)
-			goto out_put_fd;
+		if (ret) {
+			put_unused_fd(fd);
+			return ret;
+		}
 	}
 	fd_install(fd, syncobj->file);
-	drm_syncobj_put(syncobj);
 	*p_fd = fd;
 	return 0;
-out_put_fd:
-	put_unused_fd(fd);
+}
+EXPORT_SYMBOL(drm_syncobj_get_fd);
+
+static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
+				    u32 handle, int *p_fd)
+{
+	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+	int ret;
+
+	if (!syncobj)
+		return -EINVAL;
+
+	ret = drm_syncobj_get_fd(syncobj, p_fd);
 	drm_syncobj_put(syncobj);
 	return ret;
 }
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index e7f0035..43e2f38 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -140,5 +140,6 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 		       struct dma_fence *fence);
 int drm_syncobj_get_handle(struct drm_file *file_private,
 			   struct drm_syncobj *syncobj, u32 *handle);
+int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd);
 
 #endif
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-12 20:42 [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Marek Olšák
@ 2017-09-12 20:42 ` Marek Olšák
       [not found]   ` <1505248934-1819-3-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2017-09-14  7:56 ` [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Emil Velikov
       [not found] ` <1505248934-1819-1-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-12 20:42 UTC (permalink / raw)
  To: dri-devel, amd-gfx

From: Marek Olšák <marek.olsak@amd.com>

for being able to convert an amdgpu fence into one of the handles.
Mesa will use this.

Signed-off-by: Marek Olšák <marek.olsak@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
 include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
 5 files changed, 82 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b5c8b90..c15fa93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *filp);
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *filp);
 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7cb8a59..6dd719c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,6 +25,7 @@
  *    Jerome Glisse <glisse@freedesktop.org>
  */
 #include <linux/pagemap.h>
+#include <linux/sync_file.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_syncobj.h>
@@ -1311,6 +1312,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
 	return fence;
 }
 
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+	union drm_amdgpu_fence_to_handle *info = data;
+	struct dma_fence *fence;
+	struct drm_syncobj *syncobj;
+	struct sync_file *sync_file;
+	int fd, r;
+
+	if (amdgpu_kms_vram_lost(adev, fpriv))
+		return -ENODEV;
+
+	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	switch (info->in.what) {
+	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
+		r = drm_syncobj_create(&syncobj, 0, fence);
+		dma_fence_put(fence);
+		if (r)
+			return r;
+		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
+		drm_syncobj_put(syncobj);
+		return r;
+
+	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
+		r = drm_syncobj_create(&syncobj, 0, fence);
+		dma_fence_put(fence);
+		if (r)
+			return r;
+		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
+		drm_syncobj_put(syncobj);
+		return r;
+
+	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
+		fd = get_unused_fd_flags(O_CLOEXEC);
+		if (fd < 0) {
+			dma_fence_put(fence);
+			return fd;
+		}
+
+		sync_file = sync_file_create(fence);
+		dma_fence_put(fence);
+		if (!sync_file) {
+			put_unused_fd(fd);
+			return -ENOMEM;
+		}
+
+		fd_install(fd, sync_file->file);
+		info->out.handle = fd;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
 /**
  * amdgpu_cs_wait_all_fence - wait on all fences to signal
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d01aca6..1e38411 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -70,9 +70,10 @@
  * - 3.18.0 - Export gpu always on cu bitmap
  * - 3.19.0 - Add support for UVD MJPEG decode
  * - 3.20.0 - Add support for local BOs
+ * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	20
+#define KMS_DRIVER_MINOR	21
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d31777b..b09d315 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	/* KMS */
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 9f5bd97..ec57cd0 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -53,6 +53,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_FENCES		0x12
 #define DRM_AMDGPU_VM			0x13
 #define DRM_AMDGPU_FREESYNC	        0x14
+#define DRM_AMDGPU_FENCE_TO_HANDLE	0x15
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -69,6 +70,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_FENCES	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
 #define DRM_IOCTL_AMDGPU_VM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
 #define DRM_IOCTL_AMDGPU_FREESYNC	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
+#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
 
 #define AMDGPU_GEM_DOMAIN_CPU		0x1
 #define AMDGPU_GEM_DOMAIN_GTT		0x2
@@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
 	__u32 handle;
 };
 
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2
+
+union drm_amdgpu_fence_to_handle {
+	struct {
+		struct drm_amdgpu_fence fence;
+		__u32 what;
+	} in;
+	struct {
+		__u32 handle;
+	} out;
+};
+
 struct drm_amdgpu_cs_chunk_data {
 	union {
 		struct drm_amdgpu_cs_chunk_ib		ib_data;
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]   ` <1505248934-1819-3-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-09-13  3:03     ` zhoucm1
  2017-09-13  6:57       ` Christian König
  2017-09-13 10:11       ` Marek Olšák
  2017-09-28 20:41     ` Marek Olšák
  1 sibling, 2 replies; 36+ messages in thread
From: zhoucm1 @ 2017-09-13  3:03 UTC (permalink / raw)
  To: Marek Olšák,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi Marek,

You're doing same things with me, see my "introduce syncfile as fence 
reuturn" patch set, which makes things more simple, we just need to 
directly return syncfile fd to UMD when CS, then the fence UMD get will 
be always syncfile fd, UMD don't need to construct 
ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
dependency and syncobj as well.

Regards,
David Zhou
On 2017年09月13日 04:42, Marek Olšák wrote:
> From: Marek Olšák <marek.olsak@amd.com>
>
> for being able to convert an amdgpu fence into one of the handles.
> Mesa will use this.
>
> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 +++++++++++++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>   5 files changed, 82 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index b5c8b90..c15fa93 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>   			struct drm_file *filp);
>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> +				    struct drm_file *filp);
>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>   				struct drm_file *filp);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7cb8a59..6dd719c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -25,6 +25,7 @@
>    *    Jerome Glisse <glisse@freedesktop.org>
>    */
>   #include <linux/pagemap.h>
> +#include <linux/sync_file.h>
>   #include <drm/drmP.h>
>   #include <drm/amdgpu_drm.h>
>   #include <drm/drm_syncobj.h>
> @@ -1311,6 +1312,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>   	return fence;
>   }
>   
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> +				    struct drm_file *filp)
> +{
> +	struct amdgpu_device *adev = dev->dev_private;
> +	struct amdgpu_fpriv *fpriv = filp->driver_priv;
> +	union drm_amdgpu_fence_to_handle *info = data;
> +	struct dma_fence *fence;
> +	struct drm_syncobj *syncobj;
> +	struct sync_file *sync_file;
> +	int fd, r;
> +
> +	if (amdgpu_kms_vram_lost(adev, fpriv))
> +		return -ENODEV;
> +
> +	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
> +	if (IS_ERR(fence))
> +		return PTR_ERR(fence);
> +
> +	switch (info->in.what) {
> +	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
> +		r = drm_syncobj_create(&syncobj, 0, fence);
> +		dma_fence_put(fence);
> +		if (r)
> +			return r;
> +		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
> +		drm_syncobj_put(syncobj);
> +		return r;
> +
> +	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
> +		r = drm_syncobj_create(&syncobj, 0, fence);
> +		dma_fence_put(fence);
> +		if (r)
> +			return r;
> +		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
> +		drm_syncobj_put(syncobj);
> +		return r;
> +
> +	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
> +		fd = get_unused_fd_flags(O_CLOEXEC);
> +		if (fd < 0) {
> +			dma_fence_put(fence);
> +			return fd;
> +		}
> +
> +		sync_file = sync_file_create(fence);
> +		dma_fence_put(fence);
> +		if (!sync_file) {
> +			put_unused_fd(fd);
> +			return -ENOMEM;
> +		}
> +
> +		fd_install(fd, sync_file->file);
> +		info->out.handle = fd;
> +		return 0;
> +
> +	default:
> +		return -EINVAL;
> +	}
> +}
> +
>   /**
>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>    *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index d01aca6..1e38411 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -70,9 +70,10 @@
>    * - 3.18.0 - Export gpu always on cu bitmap
>    * - 3.19.0 - Add support for UVD MJPEG decode
>    * - 3.20.0 - Add support for local BOs
> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>    */
>   #define KMS_DRIVER_MAJOR	3
> -#define KMS_DRIVER_MINOR	20
> +#define KMS_DRIVER_MINOR	21
>   #define KMS_DRIVER_PATCHLEVEL	0
>   
>   int amdgpu_vram_limit = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index d31777b..b09d315 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
>   	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	/* KMS */
>   	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 9f5bd97..ec57cd0 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -53,6 +53,7 @@ extern "C" {
>   #define DRM_AMDGPU_WAIT_FENCES		0x12
>   #define DRM_AMDGPU_VM			0x13
>   #define DRM_AMDGPU_FREESYNC	        0x14
> +#define DRM_AMDGPU_FENCE_TO_HANDLE	0x15
>   
>   #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>   #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -69,6 +70,7 @@ extern "C" {
>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>   #define DRM_IOCTL_AMDGPU_VM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
>   #define DRM_IOCTL_AMDGPU_FREESYNC	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>   
>   #define AMDGPU_GEM_DOMAIN_CPU		0x1
>   #define AMDGPU_GEM_DOMAIN_GTT		0x2
> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>   	__u32 handle;
>   };
>   
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2
> +
> +union drm_amdgpu_fence_to_handle {
> +	struct {
> +		struct drm_amdgpu_fence fence;
> +		__u32 what;
> +	} in;
> +	struct {
> +		__u32 handle;
> +	} out;
> +};
> +
>   struct drm_amdgpu_cs_chunk_data {
>   	union {
>   		struct drm_amdgpu_cs_chunk_ib		ib_data;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13  3:03     ` zhoucm1
@ 2017-09-13  6:57       ` Christian König
       [not found]         ` <10bed18c-f41b-5570-c299-2be190767a20-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-09-13 10:11       ` Marek Olšák
  1 sibling, 1 reply; 36+ messages in thread
From: Christian König @ 2017-09-13  6:57 UTC (permalink / raw)
  To: zhoucm1, Marek Olšák, dri-devel, amd-gfx

Hi guys,

Mareks IOCTL proposal looks really good to me.

Please note that we already have sync_obj support for the CS IOCTL in 
the 4.13 branch and this work is based on top of that.

> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
Well the UMD does want to construct ip_type/ip_instance/ctx_id/ring and 
use for the simple reason that it allows more flexibility than 
sync_obj/sync_file.

Thinking more about this I'm pretty sure we want to do something similar 
for VM map/unmap operations as well.

Regards,
Christian.

Am 13.09.2017 um 05:03 schrieb zhoucm1:
> Hi Marek,
>
> You're doing same things with me, see my "introduce syncfile as fence 
> reuturn" patch set, which makes things more simple, we just need to 
> directly return syncfile fd to UMD when CS, then the fence UMD get 
> will be always syncfile fd, UMD don't need to construct 
> ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
> dependency and syncobj as well.
>
> Regards,
> David Zhou
> On 2017年09月13日 04:42, Marek Olšák wrote:
>> From: Marek Olšák <marek.olsak@amd.com>
>>
>> for being able to convert an amdgpu fence into one of the handles.
>> Mesa will use this.
>>
>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>> +++++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index b5c8b90..c15fa93 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, 
>> void *data,
>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>               struct drm_file *filp);
>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>> drm_file *filp);
>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
>> +                    struct drm_file *filp);
>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct 
>> drm_file *filp);
>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>>                   struct drm_file *filp);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index 7cb8a59..6dd719c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -25,6 +25,7 @@
>>    *    Jerome Glisse <glisse@freedesktop.org>
>>    */
>>   #include <linux/pagemap.h>
>> +#include <linux/sync_file.h>
>>   #include <drm/drmP.h>
>>   #include <drm/amdgpu_drm.h>
>>   #include <drm/drm_syncobj.h>
>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>       return fence;
>>   }
>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>> *data,
>> +                    struct drm_file *filp)
>> +{
>> +    struct amdgpu_device *adev = dev->dev_private;
>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>> +    union drm_amdgpu_fence_to_handle *info = data;
>> +    struct dma_fence *fence;
>> +    struct drm_syncobj *syncobj;
>> +    struct sync_file *sync_file;
>> +    int fd, r;
>> +
>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>> +        return -ENODEV;
>> +
>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>> +    if (IS_ERR(fence))
>> +        return PTR_ERR(fence);
>> +
>> +    switch (info->in.what) {
>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>> +        dma_fence_put(fence);
>> +        if (r)
>> +            return r;
>> +        r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
>> +        drm_syncobj_put(syncobj);
>> +        return r;
>> +
>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>> +        dma_fence_put(fence);
>> +        if (r)
>> +            return r;
>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>> +        drm_syncobj_put(syncobj);
>> +        return r;
>> +
>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>> +        if (fd < 0) {
>> +            dma_fence_put(fence);
>> +            return fd;
>> +        }
>> +
>> +        sync_file = sync_file_create(fence);
>> +        dma_fence_put(fence);
>> +        if (!sync_file) {
>> +            put_unused_fd(fd);
>> +            return -ENOMEM;
>> +        }
>> +
>> +        fd_install(fd, sync_file->file);
>> +        info->out.handle = fd;
>> +        return 0;
>> +
>> +    default:
>> +        return -EINVAL;
>> +    }
>> +}
>> +
>>   /**
>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>    *
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index d01aca6..1e38411 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -70,9 +70,10 @@
>>    * - 3.18.0 - Export gpu always on cu bitmap
>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>    * - 3.20.0 - Add support for local BOs
>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>    */
>>   #define KMS_DRIVER_MAJOR    3
>> -#define KMS_DRIVER_MINOR    20
>> +#define KMS_DRIVER_MINOR    21
>>   #define KMS_DRIVER_PATCHLEVEL    0
>>     int amdgpu_vram_limit = 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index d31777b..b09d315 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] 
>> = {
>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>> DRM_AUTH|DRM_RENDER_ALLOW),
>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>> DRM_AUTH|DRM_RENDER_ALLOW),
>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>> DRM_AUTH|DRM_RENDER_ALLOW),
>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>       /* KMS */
>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>> DRM_AUTH|DRM_RENDER_ALLOW),
>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>> b/include/uapi/drm/amdgpu_drm.h
>> index 9f5bd97..ec57cd0 100644
>> --- a/include/uapi/drm/amdgpu_drm.h
>> +++ b/include/uapi/drm/amdgpu_drm.h
>> @@ -53,6 +53,7 @@ extern "C" {
>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>   #define DRM_AMDGPU_VM            0x13
>>   #define DRM_AMDGPU_FREESYNC            0x14
>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP    DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>> @@ -69,6 +70,7 @@ extern "C" {
>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>   #define DRM_IOCTL_AMDGPU_VM        DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>   #define DRM_IOCTL_AMDGPU_FREESYNC    DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>       __u32 handle;
>>   };
>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>> +
>> +union drm_amdgpu_fence_to_handle {
>> +    struct {
>> +        struct drm_amdgpu_fence fence;
>> +        __u32 what;
>> +    } in;
>> +    struct {
>> +        __u32 handle;
>> +    } out;
>> +};
>> +
>>   struct drm_amdgpu_cs_chunk_data {
>>       union {
>>           struct drm_amdgpu_cs_chunk_ib        ib_data;
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]         ` <10bed18c-f41b-5570-c299-2be190767a20-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-13  7:03           ` zhoucm1
  2017-09-13  7:09             ` Christian König
  0 siblings, 1 reply; 36+ messages in thread
From: zhoucm1 @ 2017-09-13  7:03 UTC (permalink / raw)
  To: Christian König, Marek Olšák,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

I really very surprise that you prefer to expand sync_obj to get 
syncfile fd!!!

Why not directly generate syncfile fd and use it? Which one is simple is 
so obvious.

Regards,
David Zhou
On 2017年09月13日 14:57, Christian König wrote:
> Hi guys,
>
> Mareks IOCTL proposal looks really good to me.
>
> Please note that we already have sync_obj support for the CS IOCTL in 
> the 4.13 branch and this work is based on top of that.
>
>> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
> Well the UMD does want to construct ip_type/ip_instance/ctx_id/ring 
> and use for the simple reason that it allows more flexibility than 
> sync_obj/sync_file.
>
> Thinking more about this I'm pretty sure we want to do something 
> similar for VM map/unmap operations as well.
>
> Regards,
> Christian.
>
> Am 13.09.2017 um 05:03 schrieb zhoucm1:
>> Hi Marek,
>>
>> You're doing same things with me, see my "introduce syncfile as fence 
>> reuturn" patch set, which makes things more simple, we just need to 
>> directly return syncfile fd to UMD when CS, then the fence UMD get 
>> will be always syncfile fd, UMD don't need to construct 
>> ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
>> dependency and syncobj as well.
>>
>> Regards,
>> David Zhou
>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>> From: Marek Olšák <marek.olsak@amd.com>
>>>
>>> for being able to convert an amdgpu fence into one of the handles.
>>> Mesa will use this.
>>>
>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>>> +++++++++++++++++++++++++++++++++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> index b5c8b90..c15fa93 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device 
>>> *dev, void *data,
>>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>>               struct drm_file *filp);
>>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>>> drm_file *filp);
>>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>> *data,
>>> +                    struct drm_file *filp);
>>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 
>>> struct drm_file *filp);
>>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>>>                   struct drm_file *filp);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> index 7cb8a59..6dd719c 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> @@ -25,6 +25,7 @@
>>>    *    Jerome Glisse <glisse@freedesktop.org>
>>>    */
>>>   #include <linux/pagemap.h>
>>> +#include <linux/sync_file.h>
>>>   #include <drm/drmP.h>
>>>   #include <drm/amdgpu_drm.h>
>>>   #include <drm/drm_syncobj.h>
>>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>>       return fence;
>>>   }
>>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>> *data,
>>> +                    struct drm_file *filp)
>>> +{
>>> +    struct amdgpu_device *adev = dev->dev_private;
>>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>>> +    union drm_amdgpu_fence_to_handle *info = data;
>>> +    struct dma_fence *fence;
>>> +    struct drm_syncobj *syncobj;
>>> +    struct sync_file *sync_file;
>>> +    int fd, r;
>>> +
>>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>>> +        return -ENODEV;
>>> +
>>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>>> +    if (IS_ERR(fence))
>>> +        return PTR_ERR(fence);
>>> +
>>> +    switch (info->in.what) {
>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>> +        dma_fence_put(fence);
>>> +        if (r)
>>> +            return r;
>>> +        r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
>>> +        drm_syncobj_put(syncobj);
>>> +        return r;
>>> +
>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>> +        dma_fence_put(fence);
>>> +        if (r)
>>> +            return r;
>>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>>> +        drm_syncobj_put(syncobj);
>>> +        return r;
>>> +
>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>>> +        if (fd < 0) {
>>> +            dma_fence_put(fence);
>>> +            return fd;
>>> +        }
>>> +
>>> +        sync_file = sync_file_create(fence);
>>> +        dma_fence_put(fence);
>>> +        if (!sync_file) {
>>> +            put_unused_fd(fd);
>>> +            return -ENOMEM;
>>> +        }
>>> +
>>> +        fd_install(fd, sync_file->file);
>>> +        info->out.handle = fd;
>>> +        return 0;
>>> +
>>> +    default:
>>> +        return -EINVAL;
>>> +    }
>>> +}
>>> +
>>>   /**
>>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>>    *
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>> index d01aca6..1e38411 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>> @@ -70,9 +70,10 @@
>>>    * - 3.18.0 - Export gpu always on cu bitmap
>>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>>    * - 3.20.0 - Add support for local BOs
>>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>>    */
>>>   #define KMS_DRIVER_MAJOR    3
>>> -#define KMS_DRIVER_MINOR    20
>>> +#define KMS_DRIVER_MINOR    21
>>>   #define KMS_DRIVER_PATCHLEVEL    0
>>>     int amdgpu_vram_limit = 0;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> index d31777b..b09d315 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc 
>>> amdgpu_ioctls_kms[] = {
>>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>       /* KMS */
>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>>> b/include/uapi/drm/amdgpu_drm.h
>>> index 9f5bd97..ec57cd0 100644
>>> --- a/include/uapi/drm/amdgpu_drm.h
>>> +++ b/include/uapi/drm/amdgpu_drm.h
>>> @@ -53,6 +53,7 @@ extern "C" {
>>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>>   #define DRM_AMDGPU_VM            0x13
>>>   #define DRM_AMDGPU_FREESYNC            0x14
>>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + 
>>> DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + 
>>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>>> @@ -69,6 +70,7 @@ extern "C" {
>>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + 
>>> DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>>   #define DRM_IOCTL_AMDGPU_VM        DRM_IOWR(DRM_COMMAND_BASE + 
>>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>>   #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + 
>>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE 
>>> + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>>       __u32 handle;
>>>   };
>>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>>> +
>>> +union drm_amdgpu_fence_to_handle {
>>> +    struct {
>>> +        struct drm_amdgpu_fence fence;
>>> +        __u32 what;
>>> +    } in;
>>> +    struct {
>>> +        __u32 handle;
>>> +    } out;
>>> +};
>>> +
>>>   struct drm_amdgpu_cs_chunk_data {
>>>       union {
>>>           struct drm_amdgpu_cs_chunk_ib        ib_data;
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13  7:03           ` zhoucm1
@ 2017-09-13  7:09             ` Christian König
       [not found]               ` <a6c564e8-fe75-0e05-d267-e6dc7255b4af-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Christian König @ 2017-09-13  7:09 UTC (permalink / raw)
  To: zhoucm1, Marek Olšák, dri-devel, amd-gfx

syncfile is the older implementation, sync_obj is the replacement for it.

I don't think we should implement syncfile in the CS any more, sync_obj 
is already done and can be converted to/from syncfiles.

Mareks IOCTL should cover the case when we need to create a syncfile or 
syncobj from a fence sequence number.

Regards,
Christian.

Am 13.09.2017 um 09:03 schrieb zhoucm1:
> I really very surprise that you prefer to expand sync_obj to get 
> syncfile fd!!!
>
> Why not directly generate syncfile fd and use it? Which one is simple 
> is so obvious.
>
> Regards,
> David Zhou
> On 2017年09月13日 14:57, Christian König wrote:
>> Hi guys,
>>
>> Mareks IOCTL proposal looks really good to me.
>>
>> Please note that we already have sync_obj support for the CS IOCTL in 
>> the 4.13 branch and this work is based on top of that.
>>
>>> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>> Well the UMD does want to construct ip_type/ip_instance/ctx_id/ring 
>> and use for the simple reason that it allows more flexibility than 
>> sync_obj/sync_file.
>>
>> Thinking more about this I'm pretty sure we want to do something 
>> similar for VM map/unmap operations as well.
>>
>> Regards,
>> Christian.
>>
>> Am 13.09.2017 um 05:03 schrieb zhoucm1:
>>> Hi Marek,
>>>
>>> You're doing same things with me, see my "introduce syncfile as 
>>> fence reuturn" patch set, which makes things more simple, we just 
>>> need to directly return syncfile fd to UMD when CS, then the fence 
>>> UMD get will be always syncfile fd, UMD don't need to construct 
>>> ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
>>> dependency and syncobj as well.
>>>
>>> Regards,
>>> David Zhou
>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>
>>>> for being able to convert an amdgpu fence into one of the handles.
>>>> Mesa will use this.
>>>>
>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>>>> +++++++++++++++++++++++++++++++++
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> index b5c8b90..c15fa93 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device 
>>>> *dev, void *data,
>>>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>>>               struct drm_file *filp);
>>>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>>>> drm_file *filp);
>>>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>>> *data,
>>>> +                    struct drm_file *filp);
>>>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 
>>>> struct drm_file *filp);
>>>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>>>>                   struct drm_file *filp);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> index 7cb8a59..6dd719c 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> @@ -25,6 +25,7 @@
>>>>    *    Jerome Glisse <glisse@freedesktop.org>
>>>>    */
>>>>   #include <linux/pagemap.h>
>>>> +#include <linux/sync_file.h>
>>>>   #include <drm/drmP.h>
>>>>   #include <drm/amdgpu_drm.h>
>>>>   #include <drm/drm_syncobj.h>
>>>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>>>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>>>       return fence;
>>>>   }
>>>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>>> *data,
>>>> +                    struct drm_file *filp)
>>>> +{
>>>> +    struct amdgpu_device *adev = dev->dev_private;
>>>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>>>> +    union drm_amdgpu_fence_to_handle *info = data;
>>>> +    struct dma_fence *fence;
>>>> +    struct drm_syncobj *syncobj;
>>>> +    struct sync_file *sync_file;
>>>> +    int fd, r;
>>>> +
>>>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>>>> +        return -ENODEV;
>>>> +
>>>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>>>> +    if (IS_ERR(fence))
>>>> +        return PTR_ERR(fence);
>>>> +
>>>> +    switch (info->in.what) {
>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>> +        dma_fence_put(fence);
>>>> +        if (r)
>>>> +            return r;
>>>> +        r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
>>>> +        drm_syncobj_put(syncobj);
>>>> +        return r;
>>>> +
>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>> +        dma_fence_put(fence);
>>>> +        if (r)
>>>> +            return r;
>>>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>>>> +        drm_syncobj_put(syncobj);
>>>> +        return r;
>>>> +
>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>>>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>>>> +        if (fd < 0) {
>>>> +            dma_fence_put(fence);
>>>> +            return fd;
>>>> +        }
>>>> +
>>>> +        sync_file = sync_file_create(fence);
>>>> +        dma_fence_put(fence);
>>>> +        if (!sync_file) {
>>>> +            put_unused_fd(fd);
>>>> +            return -ENOMEM;
>>>> +        }
>>>> +
>>>> +        fd_install(fd, sync_file->file);
>>>> +        info->out.handle = fd;
>>>> +        return 0;
>>>> +
>>>> +    default:
>>>> +        return -EINVAL;
>>>> +    }
>>>> +}
>>>> +
>>>>   /**
>>>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>>>    *
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>> index d01aca6..1e38411 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>> @@ -70,9 +70,10 @@
>>>>    * - 3.18.0 - Export gpu always on cu bitmap
>>>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>>>    * - 3.20.0 - Add support for local BOs
>>>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>>>    */
>>>>   #define KMS_DRIVER_MAJOR    3
>>>> -#define KMS_DRIVER_MINOR    20
>>>> +#define KMS_DRIVER_MINOR    21
>>>>   #define KMS_DRIVER_PATCHLEVEL    0
>>>>     int amdgpu_vram_limit = 0;
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>> index d31777b..b09d315 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc 
>>>> amdgpu_ioctls_kms[] = {
>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>>>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>       /* KMS */
>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>>>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>>>> b/include/uapi/drm/amdgpu_drm.h
>>>> index 9f5bd97..ec57cd0 100644
>>>> --- a/include/uapi/drm/amdgpu_drm.h
>>>> +++ b/include/uapi/drm/amdgpu_drm.h
>>>> @@ -53,6 +53,7 @@ extern "C" {
>>>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>>>   #define DRM_AMDGPU_VM            0x13
>>>>   #define DRM_AMDGPU_FREESYNC            0x14
>>>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>>>> @@ -69,6 +70,7 @@ extern "C" {
>>>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>>>   #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>>>   #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>>>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE 
>>>> + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>>>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>>>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>>>       __u32 handle;
>>>>   };
>>>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>>>> +
>>>> +union drm_amdgpu_fence_to_handle {
>>>> +    struct {
>>>> +        struct drm_amdgpu_fence fence;
>>>> +        __u32 what;
>>>> +    } in;
>>>> +    struct {
>>>> +        __u32 handle;
>>>> +    } out;
>>>> +};
>>>> +
>>>>   struct drm_amdgpu_cs_chunk_data {
>>>>       union {
>>>>           struct drm_amdgpu_cs_chunk_ib        ib_data;
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]               ` <a6c564e8-fe75-0e05-d267-e6dc7255b4af-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-13  7:39                 ` zhoucm1
  2017-09-13  8:07                   ` Christian König
  0 siblings, 1 reply; 36+ messages in thread
From: zhoucm1 @ 2017-09-13  7:39 UTC (permalink / raw)
  To: Christian König, Marek Olšák,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2017年09月13日 15:09, Christian König wrote:
> syncfile is the older implementation, sync_obj is the replacement for it.

Are you sure sync_obj is a replacement for syncfile? Anyone can clarify it?

sync_obj is mainly for semaphore usage, we can see sync_obj docs from 
Dave in drm_syncobj.c:
"/**
  * DOC: Overview
  *
  * DRM synchronisation objects (syncobj) are a persistent objects,
  * that contain an optional fence. The fence can be updated with a new
  * fence, or be NULL.
  *
  * syncobj's can be export to fd's and back, these fd's are opaque and
  * have no other use case, except passing the syncobj between processes.
  *
  * Their primary use-case is to implement Vulkan fences and semaphores.
  *
  * syncobj have a kref reference count, but also have an optional file.
  * The file is only created once the syncobj is exported.
  * The file takes a reference on the kref.
  */
"

>
> I don't think we should implement syncfile in the CS any more, 
> sync_obj is already done and can be converted to/from syncfiles.
>
> Mareks IOCTL should cover the case when we need to create a syncfile 
> or syncobj from a fence sequence number.

I know his convertion can do that things, but returning syncfile fd 
directly is a really simple method.

Regards,
David Zhou
>
> Regards,
> Christian.
>
> Am 13.09.2017 um 09:03 schrieb zhoucm1:
>> I really very surprise that you prefer to expand sync_obj to get 
>> syncfile fd!!!
>>
>> Why not directly generate syncfile fd and use it? Which one is simple 
>> is so obvious.
>>
>> Regards,
>> David Zhou
>> On 2017年09月13日 14:57, Christian König wrote:
>>> Hi guys,
>>>
>>> Mareks IOCTL proposal looks really good to me.
>>>
>>> Please note that we already have sync_obj support for the CS IOCTL 
>>> in the 4.13 branch and this work is based on top of that.
>>>
>>>> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>>> Well the UMD does want to construct ip_type/ip_instance/ctx_id/ring 
>>> and use for the simple reason that it allows more flexibility than 
>>> sync_obj/sync_file.
>>>
>>> Thinking more about this I'm pretty sure we want to do something 
>>> similar for VM map/unmap operations as well.
>>>
>>> Regards,
>>> Christian.
>>>
>>> Am 13.09.2017 um 05:03 schrieb zhoucm1:
>>>> Hi Marek,
>>>>
>>>> You're doing same things with me, see my "introduce syncfile as 
>>>> fence reuturn" patch set, which makes things more simple, we just 
>>>> need to directly return syncfile fd to UMD when CS, then the fence 
>>>> UMD get will be always syncfile fd, UMD don't need to construct 
>>>> ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
>>>> dependency and syncobj as well.
>>>>
>>>> Regards,
>>>> David Zhou
>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>
>>>>> for being able to convert an amdgpu fence into one of the handles.
>>>>> Mesa will use this.
>>>>>
>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>> ---
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>>>>> +++++++++++++++++++++++++++++++++
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>>>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>>>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> index b5c8b90..c15fa93 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device 
>>>>> *dev, void *data,
>>>>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>>>>               struct drm_file *filp);
>>>>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>>>>> drm_file *filp);
>>>>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>>>> *data,
>>>>> +                    struct drm_file *filp);
>>>>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 
>>>>> struct drm_file *filp);
>>>>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>>>>>                   struct drm_file *filp);
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>> index 7cb8a59..6dd719c 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>> @@ -25,6 +25,7 @@
>>>>>    *    Jerome Glisse <glisse@freedesktop.org>
>>>>>    */
>>>>>   #include <linux/pagemap.h>
>>>>> +#include <linux/sync_file.h>
>>>>>   #include <drm/drmP.h>
>>>>>   #include <drm/amdgpu_drm.h>
>>>>>   #include <drm/drm_syncobj.h>
>>>>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>>>>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>>>>       return fence;
>>>>>   }
>>>>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, 
>>>>> void *data,
>>>>> +                    struct drm_file *filp)
>>>>> +{
>>>>> +    struct amdgpu_device *adev = dev->dev_private;
>>>>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>>>>> +    union drm_amdgpu_fence_to_handle *info = data;
>>>>> +    struct dma_fence *fence;
>>>>> +    struct drm_syncobj *syncobj;
>>>>> +    struct sync_file *sync_file;
>>>>> +    int fd, r;
>>>>> +
>>>>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>>>>> +        return -ENODEV;
>>>>> +
>>>>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>>>>> +    if (IS_ERR(fence))
>>>>> +        return PTR_ERR(fence);
>>>>> +
>>>>> +    switch (info->in.what) {
>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>> +        dma_fence_put(fence);
>>>>> +        if (r)
>>>>> +            return r;
>>>>> +        r = drm_syncobj_get_handle(filp, syncobj, 
>>>>> &info->out.handle);
>>>>> +        drm_syncobj_put(syncobj);
>>>>> +        return r;
>>>>> +
>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>> +        dma_fence_put(fence);
>>>>> +        if (r)
>>>>> +            return r;
>>>>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>>>>> +        drm_syncobj_put(syncobj);
>>>>> +        return r;
>>>>> +
>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>>>>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>>>>> +        if (fd < 0) {
>>>>> +            dma_fence_put(fence);
>>>>> +            return fd;
>>>>> +        }
>>>>> +
>>>>> +        sync_file = sync_file_create(fence);
>>>>> +        dma_fence_put(fence);
>>>>> +        if (!sync_file) {
>>>>> +            put_unused_fd(fd);
>>>>> +            return -ENOMEM;
>>>>> +        }
>>>>> +
>>>>> +        fd_install(fd, sync_file->file);
>>>>> +        info->out.handle = fd;
>>>>> +        return 0;
>>>>> +
>>>>> +    default:
>>>>> +        return -EINVAL;
>>>>> +    }
>>>>> +}
>>>>> +
>>>>>   /**
>>>>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>>>>    *
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>> index d01aca6..1e38411 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>> @@ -70,9 +70,10 @@
>>>>>    * - 3.18.0 - Export gpu always on cu bitmap
>>>>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>>>>    * - 3.20.0 - Add support for local BOs
>>>>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>>>>    */
>>>>>   #define KMS_DRIVER_MAJOR    3
>>>>> -#define KMS_DRIVER_MINOR    20
>>>>> +#define KMS_DRIVER_MINOR    21
>>>>>   #define KMS_DRIVER_PATCHLEVEL    0
>>>>>     int amdgpu_vram_limit = 0;
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>> index d31777b..b09d315 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc 
>>>>> amdgpu_ioctls_kms[] = {
>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>>>>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>       /* KMS */
>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>>>>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>>>>> b/include/uapi/drm/amdgpu_drm.h
>>>>> index 9f5bd97..ec57cd0 100644
>>>>> --- a/include/uapi/drm/amdgpu_drm.h
>>>>> +++ b/include/uapi/drm/amdgpu_drm.h
>>>>> @@ -53,6 +53,7 @@ extern "C" {
>>>>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>>>>   #define DRM_AMDGPU_VM            0x13
>>>>>   #define DRM_AMDGPU_FREESYNC            0x14
>>>>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>>>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE 
>>>>> + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>>>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + 
>>>>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>>>>> @@ -69,6 +70,7 @@ extern "C" {
>>>>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + 
>>>>> DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>>>>   #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + 
>>>>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>>>>   #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + 
>>>>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>>>>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE 
>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union 
>>>>> drm_amdgpu_fence_to_handle)
>>>>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>>>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>>>>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>>>>       __u32 handle;
>>>>>   };
>>>>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>>>>> +
>>>>> +union drm_amdgpu_fence_to_handle {
>>>>> +    struct {
>>>>> +        struct drm_amdgpu_fence fence;
>>>>> +        __u32 what;
>>>>> +    } in;
>>>>> +    struct {
>>>>> +        __u32 handle;
>>>>> +    } out;
>>>>> +};
>>>>> +
>>>>>   struct drm_amdgpu_cs_chunk_data {
>>>>>       union {
>>>>>           struct drm_amdgpu_cs_chunk_ib        ib_data;
>>>>
>>>> _______________________________________________
>>>> amd-gfx mailing list
>>>> amd-gfx@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>
>>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13  7:39                 ` zhoucm1
@ 2017-09-13  8:07                   ` Christian König
       [not found]                     ` <58914d9d-f140-9cf3-f91b-78db7d913d12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Christian König @ 2017-09-13  8:07 UTC (permalink / raw)
  To: zhoucm1, Marek Olšák, dri-devel, amd-gfx

Am 13.09.2017 um 09:39 schrieb zhoucm1:
>
>
> On 2017年09月13日 15:09, Christian König wrote:
>> syncfile is the older implementation, sync_obj is the replacement for 
>> it.
>
> Are you sure sync_obj is a replacement for syncfile? Anyone can 
> clarify it?
>
> sync_obj is mainly for semaphore usage, we can see sync_obj docs from 
> Dave in drm_syncobj.c:
> "/**
>  * DOC: Overview
>  *
>  * DRM synchronisation objects (syncobj) are a persistent objects,
>  * that contain an optional fence. The fence can be updated with a new
>  * fence, or be NULL.
>  *
>  * syncobj's can be export to fd's and back, these fd's are opaque and
>  * have no other use case, except passing the syncobj between processes.
>  *
>  * Their primary use-case is to implement Vulkan fences and semaphores.
>  *
>  * syncobj have a kref reference count, but also have an optional file.
>  * The file is only created once the syncobj is exported.
>  * The file takes a reference on the kref.
>  */
> "
>

Correct, but you can convert from sync_obj into syncfile and back using 
a standard DRM IOCTL. So when we support sync_obj we also support syncfile.

>>
>> I don't think we should implement syncfile in the CS any more, 
>> sync_obj is already done and can be converted to/from syncfiles.
>>
>> Mareks IOCTL should cover the case when we need to create a syncfile 
>> or syncobj from a fence sequence number.
>
> I know his convertion can do that things, but returning syncfile fd 
> directly is a really simple method.

Well we can use sync_obj for this as well, it doesn't make so much 
difference.

Point is we shouldn't return a syncfile for VM operations because that 
will always use an fd which isn't very desirable.

Regards,
Christian.

>
> Regards,
> David Zhou
>>
>> Regards,
>> Christian.
>>
>> Am 13.09.2017 um 09:03 schrieb zhoucm1:
>>> I really very surprise that you prefer to expand sync_obj to get 
>>> syncfile fd!!!
>>>
>>> Why not directly generate syncfile fd and use it? Which one is 
>>> simple is so obvious.
>>>
>>> Regards,
>>> David Zhou
>>> On 2017年09月13日 14:57, Christian König wrote:
>>>> Hi guys,
>>>>
>>>> Mareks IOCTL proposal looks really good to me.
>>>>
>>>> Please note that we already have sync_obj support for the CS IOCTL 
>>>> in the 4.13 branch and this work is based on top of that.
>>>>
>>>>> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>>>> Well the UMD does want to construct ip_type/ip_instance/ctx_id/ring 
>>>> and use for the simple reason that it allows more flexibility than 
>>>> sync_obj/sync_file.
>>>>
>>>> Thinking more about this I'm pretty sure we want to do something 
>>>> similar for VM map/unmap operations as well.
>>>>
>>>> Regards,
>>>> Christian.
>>>>
>>>> Am 13.09.2017 um 05:03 schrieb zhoucm1:
>>>>> Hi Marek,
>>>>>
>>>>> You're doing same things with me, see my "introduce syncfile as 
>>>>> fence reuturn" patch set, which makes things more simple, we just 
>>>>> need to directly return syncfile fd to UMD when CS, then the fence 
>>>>> UMD get will be always syncfile fd, UMD don't need to construct 
>>>>> ip_type/ip_instance/ctx_id/ring any more, which also can pass to 
>>>>> dependency and syncobj as well.
>>>>>
>>>>> Regards,
>>>>> David Zhou
>>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>>
>>>>>> for being able to convert an amdgpu fence into one of the handles.
>>>>>> Mesa will use this.
>>>>>>
>>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>>> ---
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>>>>>> +++++++++++++++++++++++++++++++++
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>>>>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>>>>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> index b5c8b90..c15fa93 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device 
>>>>>> *dev, void *data,
>>>>>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>>>>>               struct drm_file *filp);
>>>>>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>>>>>> drm_file *filp);
>>>>>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void 
>>>>>> *data,
>>>>>> +                    struct drm_file *filp);
>>>>>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 
>>>>>> struct drm_file *filp);
>>>>>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void 
>>>>>> *data,
>>>>>>                   struct drm_file *filp);
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>> index 7cb8a59..6dd719c 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>> @@ -25,6 +25,7 @@
>>>>>>    *    Jerome Glisse <glisse@freedesktop.org>
>>>>>>    */
>>>>>>   #include <linux/pagemap.h>
>>>>>> +#include <linux/sync_file.h>
>>>>>>   #include <drm/drmP.h>
>>>>>>   #include <drm/amdgpu_drm.h>
>>>>>>   #include <drm/drm_syncobj.h>
>>>>>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>>>>>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>>>>>       return fence;
>>>>>>   }
>>>>>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, 
>>>>>> void *data,
>>>>>> +                    struct drm_file *filp)
>>>>>> +{
>>>>>> +    struct amdgpu_device *adev = dev->dev_private;
>>>>>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>>>>>> +    union drm_amdgpu_fence_to_handle *info = data;
>>>>>> +    struct dma_fence *fence;
>>>>>> +    struct drm_syncobj *syncobj;
>>>>>> +    struct sync_file *sync_file;
>>>>>> +    int fd, r;
>>>>>> +
>>>>>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>>>>>> +        return -ENODEV;
>>>>>> +
>>>>>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>>>>>> +    if (IS_ERR(fence))
>>>>>> +        return PTR_ERR(fence);
>>>>>> +
>>>>>> +    switch (info->in.what) {
>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>>> +        dma_fence_put(fence);
>>>>>> +        if (r)
>>>>>> +            return r;
>>>>>> +        r = drm_syncobj_get_handle(filp, syncobj, 
>>>>>> &info->out.handle);
>>>>>> +        drm_syncobj_put(syncobj);
>>>>>> +        return r;
>>>>>> +
>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>>> +        dma_fence_put(fence);
>>>>>> +        if (r)
>>>>>> +            return r;
>>>>>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>>>>>> +        drm_syncobj_put(syncobj);
>>>>>> +        return r;
>>>>>> +
>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>>>>>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>>>>>> +        if (fd < 0) {
>>>>>> +            dma_fence_put(fence);
>>>>>> +            return fd;
>>>>>> +        }
>>>>>> +
>>>>>> +        sync_file = sync_file_create(fence);
>>>>>> +        dma_fence_put(fence);
>>>>>> +        if (!sync_file) {
>>>>>> +            put_unused_fd(fd);
>>>>>> +            return -ENOMEM;
>>>>>> +        }
>>>>>> +
>>>>>> +        fd_install(fd, sync_file->file);
>>>>>> +        info->out.handle = fd;
>>>>>> +        return 0;
>>>>>> +
>>>>>> +    default:
>>>>>> +        return -EINVAL;
>>>>>> +    }
>>>>>> +}
>>>>>> +
>>>>>>   /**
>>>>>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>>>>>    *
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>> index d01aca6..1e38411 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>> @@ -70,9 +70,10 @@
>>>>>>    * - 3.18.0 - Export gpu always on cu bitmap
>>>>>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>>>>>    * - 3.20.0 - Add support for local BOs
>>>>>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>>>>>    */
>>>>>>   #define KMS_DRIVER_MAJOR    3
>>>>>> -#define KMS_DRIVER_MINOR    20
>>>>>> +#define KMS_DRIVER_MINOR    21
>>>>>>   #define KMS_DRIVER_PATCHLEVEL    0
>>>>>>     int amdgpu_vram_limit = 0;
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>> index d31777b..b09d315 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc 
>>>>>> amdgpu_ioctls_kms[] = {
>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>>>>>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>       /* KMS */
>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>>>>>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>>>>>> b/include/uapi/drm/amdgpu_drm.h
>>>>>> index 9f5bd97..ec57cd0 100644
>>>>>> --- a/include/uapi/drm/amdgpu_drm.h
>>>>>> +++ b/include/uapi/drm/amdgpu_drm.h
>>>>>> @@ -53,6 +53,7 @@ extern "C" {
>>>>>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>>>>>   #define DRM_AMDGPU_VM            0x13
>>>>>>   #define DRM_AMDGPU_FREESYNC            0x14
>>>>>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>>>>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE 
>>>>>> + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>>>>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>>>>>> @@ -69,6 +70,7 @@ extern "C" {
>>>>>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE 
>>>>>> + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>>>>>   #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>>>>>   #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>>>>>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE 
>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union 
>>>>>> drm_amdgpu_fence_to_handle)
>>>>>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>>>>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>>>>>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>>>>>       __u32 handle;
>>>>>>   };
>>>>>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>>>>>> +
>>>>>> +union drm_amdgpu_fence_to_handle {
>>>>>> +    struct {
>>>>>> +        struct drm_amdgpu_fence fence;
>>>>>> +        __u32 what;
>>>>>> +    } in;
>>>>>> +    struct {
>>>>>> +        __u32 handle;
>>>>>> +    } out;
>>>>>> +};
>>>>>> +
>>>>>>   struct drm_amdgpu_cs_chunk_data {
>>>>>>       union {
>>>>>>           struct drm_amdgpu_cs_chunk_ib        ib_data;
>>>>>
>>>>> _______________________________________________
>>>>> amd-gfx mailing list
>>>>> amd-gfx@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>>
>>>>
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                     ` <58914d9d-f140-9cf3-f91b-78db7d913d12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-13  9:57                       ` zhoucm1
  0 siblings, 0 replies; 36+ messages in thread
From: zhoucm1 @ 2017-09-13  9:57 UTC (permalink / raw)
  To: Christian König, Marek Olšák,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2017年09月13日 16:07, Christian König wrote:
> Am 13.09.2017 um 09:39 schrieb zhoucm1:
>>
>>
>> On 2017年09月13日 15:09, Christian König wrote:
>>> syncfile is the older implementation, sync_obj is the replacement 
>>> for it.
>>
>> Are you sure sync_obj is a replacement for syncfile? Anyone can 
>> clarify it?
>>
>> sync_obj is mainly for semaphore usage, we can see sync_obj docs from 
>> Dave in drm_syncobj.c:
>> "/**
>>  * DOC: Overview
>>  *
>>  * DRM synchronisation objects (syncobj) are a persistent objects,
>>  * that contain an optional fence. The fence can be updated with a new
>>  * fence, or be NULL.
>>  *
>>  * syncobj's can be export to fd's and back, these fd's are opaque and
>>  * have no other use case, except passing the syncobj between processes.
>>  *
>>  * Their primary use-case is to implement Vulkan fences and semaphores.
>>  *
>>  * syncobj have a kref reference count, but also have an optional file.
>>  * The file is only created once the syncobj is exported.
>>  * The file takes a reference on the kref.
>>  */
>> "
>>
>
> Correct, but you can convert from sync_obj into syncfile and back 
> using a standard DRM IOCTL. So when we support sync_obj we also 
> support syncfile.
>
>>>
>>> I don't think we should implement syncfile in the CS any more, 
>>> sync_obj is already done and can be converted to/from syncfiles.
>>>
>>> Mareks IOCTL should cover the case when we need to create a syncfile 
>>> or syncobj from a fence sequence number.
>>
>> I know his convertion can do that things, but returning syncfile fd 
>> directly is a really simple method.
>
> Well we can use sync_obj for this as well, it doesn't make so much 
> difference.
>
> Point is we shouldn't return a syncfile for VM operations because that 
> will always use an fd which isn't very desirable.
Have you seen Android fence fd? they are all syncfile fd, when Marek 
enables EGL_ANDROID_native_fence_sync, they will also be syncfile fd 
used by Android.

Regards,
David Zhou

>
> Regards,
> Christian.
>
>>
>> Regards,
>> David Zhou
>>>
>>> Regards,
>>> Christian.
>>>
>>> Am 13.09.2017 um 09:03 schrieb zhoucm1:
>>>> I really very surprise that you prefer to expand sync_obj to get 
>>>> syncfile fd!!!
>>>>
>>>> Why not directly generate syncfile fd and use it? Which one is 
>>>> simple is so obvious.
>>>>
>>>> Regards,
>>>> David Zhou
>>>> On 2017年09月13日 14:57, Christian König wrote:
>>>>> Hi guys,
>>>>>
>>>>> Mareks IOCTL proposal looks really good to me.
>>>>>
>>>>> Please note that we already have sync_obj support for the CS IOCTL 
>>>>> in the 4.13 branch and this work is based on top of that.
>>>>>
>>>>>> UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>>>>> Well the UMD does want to construct 
>>>>> ip_type/ip_instance/ctx_id/ring and use for the simple reason that 
>>>>> it allows more flexibility than sync_obj/sync_file.
>>>>>
>>>>> Thinking more about this I'm pretty sure we want to do something 
>>>>> similar for VM map/unmap operations as well.
>>>>>
>>>>> Regards,
>>>>> Christian.
>>>>>
>>>>> Am 13.09.2017 um 05:03 schrieb zhoucm1:
>>>>>> Hi Marek,
>>>>>>
>>>>>> You're doing same things with me, see my "introduce syncfile as 
>>>>>> fence reuturn" patch set, which makes things more simple, we just 
>>>>>> need to directly return syncfile fd to UMD when CS, then the 
>>>>>> fence UMD get will be always syncfile fd, UMD don't need to 
>>>>>> construct ip_type/ip_instance/ctx_id/ring any more, which also 
>>>>>> can pass to dependency and syncobj as well.
>>>>>>
>>>>>> Regards,
>>>>>> David Zhou
>>>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>>>
>>>>>>> for being able to convert an amdgpu fence into one of the handles.
>>>>>>> Mesa will use this.
>>>>>>>
>>>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>>>> ---
>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 
>>>>>>> +++++++++++++++++++++++++++++++++
>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>>>>>>>   include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>>>>>>>   5 files changed, 82 insertions(+), 1 deletion(-)
>>>>>>>
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>> index b5c8b90..c15fa93 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device 
>>>>>>> *dev, void *data,
>>>>>>>   int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>>>>>>               struct drm_file *filp);
>>>>>>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct 
>>>>>>> drm_file *filp);
>>>>>>> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, 
>>>>>>> void *data,
>>>>>>> +                    struct drm_file *filp);
>>>>>>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 
>>>>>>> struct drm_file *filp);
>>>>>>>   int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void 
>>>>>>> *data,
>>>>>>>                   struct drm_file *filp);
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>>> index 7cb8a59..6dd719c 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>>>>> @@ -25,6 +25,7 @@
>>>>>>>    *    Jerome Glisse <glisse@freedesktop.org>
>>>>>>>    */
>>>>>>>   #include <linux/pagemap.h>
>>>>>>> +#include <linux/sync_file.h>
>>>>>>>   #include <drm/drmP.h>
>>>>>>>   #include <drm/amdgpu_drm.h>
>>>>>>>   #include <drm/drm_syncobj.h>
>>>>>>> @@ -1311,6 +1312,66 @@ static struct dma_fence 
>>>>>>> *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>>>>>>>       return fence;
>>>>>>>   }
>>>>>>>   +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, 
>>>>>>> void *data,
>>>>>>> +                    struct drm_file *filp)
>>>>>>> +{
>>>>>>> +    struct amdgpu_device *adev = dev->dev_private;
>>>>>>> +    struct amdgpu_fpriv *fpriv = filp->driver_priv;
>>>>>>> +    union drm_amdgpu_fence_to_handle *info = data;
>>>>>>> +    struct dma_fence *fence;
>>>>>>> +    struct drm_syncobj *syncobj;
>>>>>>> +    struct sync_file *sync_file;
>>>>>>> +    int fd, r;
>>>>>>> +
>>>>>>> +    if (amdgpu_kms_vram_lost(adev, fpriv))
>>>>>>> +        return -ENODEV;
>>>>>>> +
>>>>>>> +    fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
>>>>>>> +    if (IS_ERR(fence))
>>>>>>> +        return PTR_ERR(fence);
>>>>>>> +
>>>>>>> +    switch (info->in.what) {
>>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
>>>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>>>> +        dma_fence_put(fence);
>>>>>>> +        if (r)
>>>>>>> +            return r;
>>>>>>> +        r = drm_syncobj_get_handle(filp, syncobj, 
>>>>>>> &info->out.handle);
>>>>>>> +        drm_syncobj_put(syncobj);
>>>>>>> +        return r;
>>>>>>> +
>>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
>>>>>>> +        r = drm_syncobj_create(&syncobj, 0, fence);
>>>>>>> +        dma_fence_put(fence);
>>>>>>> +        if (r)
>>>>>>> +            return r;
>>>>>>> +        r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
>>>>>>> +        drm_syncobj_put(syncobj);
>>>>>>> +        return r;
>>>>>>> +
>>>>>>> +    case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
>>>>>>> +        fd = get_unused_fd_flags(O_CLOEXEC);
>>>>>>> +        if (fd < 0) {
>>>>>>> +            dma_fence_put(fence);
>>>>>>> +            return fd;
>>>>>>> +        }
>>>>>>> +
>>>>>>> +        sync_file = sync_file_create(fence);
>>>>>>> +        dma_fence_put(fence);
>>>>>>> +        if (!sync_file) {
>>>>>>> +            put_unused_fd(fd);
>>>>>>> +            return -ENOMEM;
>>>>>>> +        }
>>>>>>> +
>>>>>>> +        fd_install(fd, sync_file->file);
>>>>>>> +        info->out.handle = fd;
>>>>>>> +        return 0;
>>>>>>> +
>>>>>>> +    default:
>>>>>>> +        return -EINVAL;
>>>>>>> +    }
>>>>>>> +}
>>>>>>> +
>>>>>>>   /**
>>>>>>>    * amdgpu_cs_wait_all_fence - wait on all fences to signal
>>>>>>>    *
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>>> index d01aca6..1e38411 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>>>>>>> @@ -70,9 +70,10 @@
>>>>>>>    * - 3.18.0 - Export gpu always on cu bitmap
>>>>>>>    * - 3.19.0 - Add support for UVD MJPEG decode
>>>>>>>    * - 3.20.0 - Add support for local BOs
>>>>>>> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>>>>>>>    */
>>>>>>>   #define KMS_DRIVER_MAJOR    3
>>>>>>> -#define KMS_DRIVER_MINOR    20
>>>>>>> +#define KMS_DRIVER_MINOR    21
>>>>>>>   #define KMS_DRIVER_PATCHLEVEL    0
>>>>>>>     int amdgpu_vram_limit = 0;
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>>> index d31777b..b09d315 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>>>>>> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc 
>>>>>>> amdgpu_ioctls_kms[] = {
>>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, 
>>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, 
>>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, 
>>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>> +    DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, 
>>>>>>> amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>>       /* KMS */
>>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, 
>>>>>>> DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>>       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, 
>>>>>>> amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>>>>>> diff --git a/include/uapi/drm/amdgpu_drm.h 
>>>>>>> b/include/uapi/drm/amdgpu_drm.h
>>>>>>> index 9f5bd97..ec57cd0 100644
>>>>>>> --- a/include/uapi/drm/amdgpu_drm.h
>>>>>>> +++ b/include/uapi/drm/amdgpu_drm.h
>>>>>>> @@ -53,6 +53,7 @@ extern "C" {
>>>>>>>   #define DRM_AMDGPU_WAIT_FENCES        0x12
>>>>>>>   #define DRM_AMDGPU_VM            0x13
>>>>>>>   #define DRM_AMDGPU_FREESYNC            0x14
>>>>>>> +#define DRM_AMDGPU_FENCE_TO_HANDLE    0x15
>>>>>>>     #define DRM_IOCTL_AMDGPU_GEM_CREATE 
>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union 
>>>>>>> drm_amdgpu_gem_create)
>>>>>>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>> DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>>>>>>> @@ -69,6 +70,7 @@ extern "C" {
>>>>>>>   #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE 
>>>>>>> + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>>>>>>   #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>> DRM_AMDGPU_VM, union drm_amdgpu_vm)
>>>>>>>   #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>> DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
>>>>>>> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE 
>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union 
>>>>>>> drm_amdgpu_fence_to_handle)
>>>>>>>     #define AMDGPU_GEM_DOMAIN_CPU        0x1
>>>>>>>   #define AMDGPU_GEM_DOMAIN_GTT        0x2
>>>>>>> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>>>>>>>       __u32 handle;
>>>>>>>   };
>>>>>>>   +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ    0
>>>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD    1
>>>>>>> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD    2
>>>>>>> +
>>>>>>> +union drm_amdgpu_fence_to_handle {
>>>>>>> +    struct {
>>>>>>> +        struct drm_amdgpu_fence fence;
>>>>>>> +        __u32 what;
>>>>>>> +    } in;
>>>>>>> +    struct {
>>>>>>> +        __u32 handle;
>>>>>>> +    } out;
>>>>>>> +};
>>>>>>> +
>>>>>>>   struct drm_amdgpu_cs_chunk_data {
>>>>>>>       union {
>>>>>>>           struct drm_amdgpu_cs_chunk_ib ib_data;
>>>>>>
>>>>>> _______________________________________________
>>>>>> amd-gfx mailing list
>>>>>> amd-gfx@lists.freedesktop.org
>>>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>>>
>>>>>
>>>>
>>>> _______________________________________________
>>>> amd-gfx mailing list
>>>> amd-gfx@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>
>>>
>>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13  3:03     ` zhoucm1
  2017-09-13  6:57       ` Christian König
@ 2017-09-13 10:11       ` Marek Olšák
       [not found]         ` <CAAxE2A7aR15Y9C2r_V_+6fTp-kEuFKwDApQRCnx_rxdsDOKtmw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  1 sibling, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-13 10:11 UTC (permalink / raw)
  To: zhoucm1; +Cc: amd-gfx mailing list, dri-devel

On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com> wrote:
> Hi Marek,
>
> You're doing same things with me, see my "introduce syncfile as fence
> reuturn" patch set, which makes things more simple, we just need to directly
> return syncfile fd to UMD when CS, then the fence UMD get will be always
> syncfile fd, UMD don't need to construct ip_type/ip_instance/ctx_id/ring any
> more, which also can pass to dependency and syncobj as well.

For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.

Marek
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]         ` <CAAxE2A7aR15Y9C2r_V_+6fTp-kEuFKwDApQRCnx_rxdsDOKtmw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-13 11:32           ` Zhou, David(ChunMing)
       [not found]             ` <MWHPR1201MB020689874960A24D32C4A39CB46E0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-13 11:32 UTC (permalink / raw)
  To: Marek Ol?醟
  Cc: Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 813 bytes --]

Could you describe how difficult to directly use CS syncfile fd in Mesa compared with concerting CS seq to syncfile fd via several syncobj ioctls?

Regards,
David Zhou


发自坚果 Pro

Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午6:11写道:

On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com> wrote:
> Hi Marek,
>
> You're doing same things with me, see my "introduce syncfile as fence
> reuturn" patch set, which makes things more simple, we just need to directly
> return syncfile fd to UMD when CS, then the fence UMD get will be always
> syncfile fd, UMD don't need to construct ip_type/ip_instance/ctx_id/ring any
> more, which also can pass to dependency and syncobj as well.

For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.

Marek

[-- Attachment #1.2: Type: text/html, Size: 2237 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]             ` <MWHPR1201MB020689874960A24D32C4A39CB46E0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-09-13 12:06               ` Marek Olšák
  2017-09-13 12:25                 ` Zhou, David(ChunMing)
  0 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-13 12:06 UTC (permalink / raw)
  To: Zhou, David(ChunMing)
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list

On Wed, Sep 13, 2017 at 1:32 PM, Zhou, David(ChunMing)
<David1.Zhou@amd.com> wrote:
> Could you describe how difficult to directly use CS syncfile fd in Mesa
> compared with concerting CS seq to syncfile fd via several syncobj ioctls?

It just simplifies things. Mesa primarily uses seq_no-based fences and
will continue to use them. We can't remove the seq_no fence code
because we have to keep Mesa compatible with older kernels.

The only possibilities are:
- Mesa gets both seq_no and sync_file from CS.
- Mesa only gets seq_no from CS.

I decided to take the simpler option. I don't know if there is a perf
difference between CS returning a sync_file and using a separate
ioctl, but it's probably insignificant since we already call 3 ioctls
per IB submission (BO list create+destroy, submit).

Marek

>
> Regards,
> David Zhou
>
> 发自坚果 Pro
>
> Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午6:11写道:
>
> On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com> wrote:
>> Hi Marek,
>>
>> You're doing same things with me, see my "introduce syncfile as fence
>> reuturn" patch set, which makes things more simple, we just need to
>> directly
>> return syncfile fd to UMD when CS, then the fence UMD get will be always
>> syncfile fd, UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>> any
>> more, which also can pass to dependency and syncobj as well.
>
> For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.
>
> Marek
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13 12:06               ` Marek Olšák
@ 2017-09-13 12:25                 ` Zhou, David(ChunMing)
  2017-09-13 13:04                   ` Christian König
  0 siblings, 1 reply; 36+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-13 12:25 UTC (permalink / raw)
  To: Marek Ol?醟; +Cc: dri-devel, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 1909 bytes --]

Yes, to be comptibility, I kept both seq_no and syncfile fd in the patch set, you can take a look, which really is simpler and effective way.

syncfile indeed be a good way to pass fence for user space, which already is proved in Android and is upstreamed.

Regards,
David Zhou


发自坚果 Pro

Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午8:06写道:

On Wed, Sep 13, 2017 at 1:32 PM, Zhou, David(ChunMing)
<David1.Zhou@amd.com> wrote:
> Could you describe how difficult to directly use CS syncfile fd in Mesa
> compared with concerting CS seq to syncfile fd via several syncobj ioctls?

It just simplifies things. Mesa primarily uses seq_no-based fences and
will continue to use them. We can't remove the seq_no fence code
because we have to keep Mesa compatible with older kernels.

The only possibilities are:
- Mesa gets both seq_no and sync_file from CS.
- Mesa only gets seq_no from CS.

I decided to take the simpler option. I don't know if there is a perf
difference between CS returning a sync_file and using a separate
ioctl, but it's probably insignificant since we already call 3 ioctls
per IB submission (BO list create+destroy, submit).

Marek

>
> Regards,
> David Zhou
>
> 发自坚果 Pro
>
> Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午6:11写道:
>
> On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com> wrote:
>> Hi Marek,
>>
>> You're doing same things with me, see my "introduce syncfile as fence
>> reuturn" patch set, which makes things more simple, we just need to
>> directly
>> return syncfile fd to UMD when CS, then the fence UMD get will be always
>> syncfile fd, UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>> any
>> more, which also can pass to dependency and syncobj as well.
>
> For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.
>
> Marek

[-- Attachment #1.2: Type: text/html, Size: 3665 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13 12:25                 ` Zhou, David(ChunMing)
@ 2017-09-13 13:04                   ` Christian König
       [not found]                     ` <8927d6b6-f87c-00d9-57af-fdf4fb6dfcb8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Christian König @ 2017-09-13 13:04 UTC (permalink / raw)
  To: Zhou, David(ChunMing), Marek Ol?醟; +Cc: amd-gfx mailing list, dri-devel


[-- Attachment #1.1: Type: text/plain, Size: 2683 bytes --]

> syncfile indeed be a good way to pass fence for user space, which already is proved in Android and is upstreamed.
Not really. syncfile needs a file descriptor for each handle it 
generates, that's quite a show stopper if you want to use it in general.

Android syncfile are meant to be used for inter process sharing, but as 
command submission sequence number they are not such a good fit.

Mareks approach looks really good to me and we should follow that 
direction further.

Regards,
Christian.

Am 13.09.2017 um 14:25 schrieb Zhou, David(ChunMing):
> Yes, to be comptibility, I kept both seq_no and syncfile fd in the patch set, you can take a look, which really is simpler and effective way.
>
> syncfile indeed be a good way to pass fence for user space, which already is proved in Android and is upstreamed.
>
> Regards,
> David Zhou
>
> 发自坚果 Pro
>
> Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午8:06写道:
>
> On Wed, Sep 13, 2017 at 1:32 PM, Zhou, David(ChunMing)
> <David1.Zhou@amd.com> wrote:
> > Could you describe how difficult to directly use CS syncfile fd in Mesa
> > compared with concerting CS seq to syncfile fd via several syncobj 
> ioctls?
>
> It just simplifies things. Mesa primarily uses seq_no-based fences and
> will continue to use them. We can't remove the seq_no fence code
> because we have to keep Mesa compatible with older kernels.
>
> The only possibilities are:
> - Mesa gets both seq_no and sync_file from CS.
> - Mesa only gets seq_no from CS.
>
> I decided to take the simpler option. I don't know if there is a perf
> difference between CS returning a sync_file and using a separate
> ioctl, but it's probably insignificant since we already call 3 ioctls
> per IB submission (BO list create+destroy, submit).
>
> Marek
>
> >
> > Regards,
> > David Zhou
> >
> > 发自坚果 Pro
> >
> > Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月13日 下午6:11写道:
> >
> > On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com> wrote:
> >> Hi Marek,
> >>
> >> You're doing same things with me, see my "introduce syncfile as fence
> >> reuturn" patch set, which makes things more simple, we just need to
> >> directly
> >> return syncfile fd to UMD when CS, then the fence UMD get will be 
> always
> >> syncfile fd, UMD don't need to construct 
> ip_type/ip_instance/ctx_id/ring
> >> any
> >> more, which also can pass to dependency and syncobj as well.
> >
> > For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.
> >
> > Marek
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[-- Attachment #1.2: Type: text/html, Size: 6056 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                     ` <8927d6b6-f87c-00d9-57af-fdf4fb6dfcb8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-13 13:46                       ` Zhou, David(ChunMing)
  2017-09-13 14:06                         ` Marek Olšák
  0 siblings, 1 reply; 36+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-13 13:46 UTC (permalink / raw)
  To: Christian K鰊ig
  Cc: Zhou, David(ChunMing), Marek Ol?醟,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 3242 bytes --]

For android using mesa instance, egl draw will dequeue an android buffer, after egl draw, the buffer will back to android bufffer queue, but need append a syncfile fd. If getting syncfile fd for every egl draw always needs several syncobj ioctls, the io overhead isn't small. But if we directly return syncfile when egl draw CS,  isn't it better?


发自坚果 Pro

Christian K鰊ig <deathsimple@vodafone.de> 于 2017年9月13日 下午9:04写道:

syncfile indeed be a good way to pass fence for user space, which already is proved in Android and is upstreamed.
Not really. syncfile needs a file descriptor for each handle it generates, that's quite a show stopper if you want to use it in general.

Android syncfile are meant to be used for inter process sharing, but as command submission sequence number they are not such a good fit.

Mareks approach looks really good to me and we should follow that direction further.

Regards,
Christian.

Am 13.09.2017 um 14:25 schrieb Zhou, David(ChunMing):
Yes, to be comptibility, I kept both seq_no and syncfile fd in the patch set, you can take a look, which really is simpler and effective way.

syncfile indeed be a good way to pass fence for user space, which already is proved in Android and is upstreamed.

Regards,
David Zhou


发自坚果 Pro

Marek Ol?醟 <maraeo@gmail.com><mailto:maraeo@gmail.com> 于 2017年9月13日 下午8:06写道:

On Wed, Sep 13, 2017 at 1:32 PM, Zhou, David(ChunMing)
<David1.Zhou@amd.com><mailto:David1.Zhou@amd.com> wrote:
> Could you describe how difficult to directly use CS syncfile fd in Mesa
> compared with concerting CS seq to syncfile fd via several syncobj ioctls?

It just simplifies things. Mesa primarily uses seq_no-based fences and
will continue to use them. We can't remove the seq_no fence code
because we have to keep Mesa compatible with older kernels.

The only possibilities are:
- Mesa gets both seq_no and sync_file from CS.
- Mesa only gets seq_no from CS.

I decided to take the simpler option. I don't know if there is a perf
difference between CS returning a sync_file and using a separate
ioctl, but it's probably insignificant since we already call 3 ioctls
per IB submission (BO list create+destroy, submit).

Marek

>
> Regards,
> David Zhou
>
> 发自坚果 Pro
>
> Marek Ol?醟 <maraeo@gmail.com><mailto:maraeo@gmail.com> 于 2017年9月13日 下午6:11写道:
>
> On Wed, Sep 13, 2017 at 5:03 AM, zhoucm1 <david1.zhou@amd.com><mailto:david1.zhou@amd.com> wrote:
>> Hi Marek,
>>
>> You're doing same things with me, see my "introduce syncfile as fence
>> reuturn" patch set, which makes things more simple, we just need to
>> directly
>> return syncfile fd to UMD when CS, then the fence UMD get will be always
>> syncfile fd, UMD don't need to construct ip_type/ip_instance/ctx_id/ring
>> any
>> more, which also can pass to dependency and syncobj as well.
>
> For simpler Mesa code, Mesa won't get a sync file from the CS ioctl.
>
> Marek



_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[-- Attachment #1.2: Type: text/html, Size: 6787 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-13 13:46                       ` Zhou, David(ChunMing)
@ 2017-09-13 14:06                         ` Marek Olšák
       [not found]                           ` <CAAxE2A72RAC61KOF10GTuged-pM4kpKGnnMJh-nmtF-xM4f9AQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-13 14:06 UTC (permalink / raw)
  To: Zhou, David(ChunMing); +Cc: dri-devel, amd-gfx mailing list

On Wed, Sep 13, 2017 at 3:46 PM, Zhou, David(ChunMing)
<David1.Zhou@amd.com> wrote:
> For android using mesa instance, egl draw will dequeue an android buffer,
> after egl draw, the buffer will back to android bufffer queue, but need
> append a syncfile fd. If getting syncfile fd for every egl draw always needs
> several syncobj ioctls, the io overhead isn't small. But if we directly
> return syncfile when egl draw CS,  isn't it better?

You have a good point. I'd be OK with either approach, or even with
having both options in the kernel.

Marek
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                           ` <CAAxE2A72RAC61KOF10GTuged-pM4kpKGnnMJh-nmtF-xM4f9AQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-13 14:16                             ` Christian König
       [not found]                               ` <be64ba03-3aa9-6eaf-9267-c3ec51882c12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Christian König @ 2017-09-13 14:16 UTC (permalink / raw)
  To: Marek Olšák, Zhou, David(ChunMing)
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list

Am 13.09.2017 um 16:06 schrieb Marek Olšák:
> On Wed, Sep 13, 2017 at 3:46 PM, Zhou, David(ChunMing)
> <David1.Zhou@amd.com> wrote:
>> For android using mesa instance, egl draw will dequeue an android buffer,
>> after egl draw, the buffer will back to android bufffer queue, but need
>> append a syncfile fd. If getting syncfile fd for every egl draw always needs
>> several syncobj ioctls, the io overhead isn't small. But if we directly
>> return syncfile when egl draw CS,  isn't it better?
> You have a good point. I'd be OK with either approach, or even with
> having both options in the kernel.

I don't have a strong opinion for the CS IOCTL either. If it saves us an 
extra IOCTL when we directly return a syncfile fd then why not?

But we really shouldn't use syncfile for the VA IOCTL. That's nothing we 
want to share with other processes and the returned fence or sequence 
needs to be lightweight.

Ideally I would say it should be a sequence number, so that you can say 
max(seq1, seq2) and always have the latest.

The next best approach I think would be to use syncobj, cause it is 
simply rather easily to implement.

Christian.

>
> Marek


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                               ` <be64ba03-3aa9-6eaf-9267-c3ec51882c12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-14  1:52                                 ` Dave Airlie
       [not found]                                   ` <CAPM=9typSOcf3L5Y6acDChY6R5fKMQ44LdCvKp_YZbX7B1LWiw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Dave Airlie @ 2017-09-14  1:52 UTC (permalink / raw)
  To: Christian König
  Cc: Zhou, David(ChunMing),
	amd-gfx mailing list, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	Marek Olšák

On 14 September 2017 at 00:16, Christian König <deathsimple@vodafone.de> wrote:
> Am 13.09.2017 um 16:06 schrieb Marek Olšák:
>>
>> On Wed, Sep 13, 2017 at 3:46 PM, Zhou, David(ChunMing)
>> <David1.Zhou@amd.com> wrote:
>>>
>>> For android using mesa instance, egl draw will dequeue an android buffer,
>>> after egl draw, the buffer will back to android bufffer queue, but need
>>> append a syncfile fd. If getting syncfile fd for every egl draw always
>>> needs
>>> several syncobj ioctls, the io overhead isn't small. But if we directly
>>> return syncfile when egl draw CS,  isn't it better?
>>
>> You have a good point. I'd be OK with either approach, or even with
>> having both options in the kernel.
>
>
> I don't have a strong opinion for the CS IOCTL either. If it saves us an
> extra IOCTL when we directly return a syncfile fd then why not?
>
> But we really shouldn't use syncfile for the VA IOCTL. That's nothing we
> want to share with other processes and the returned fence or sequence needs
> to be lightweight.
>
> Ideally I would say it should be a sequence number, so that you can say
> max(seq1, seq2) and always have the latest.
>
> The next best approach I think would be to use syncobj, cause it is simply
> rather easily to implement.

I'd go with not returning fd's by default, it's a bad use of a limited resource,
creating fd's should happen on giving the object to another process or API.

However having an optional chunk or flag to say this ioctl will return an
android sync fd if asked is fine with me, if is usually returns a syncobj.

Dave.
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                                   ` <CAPM=9typSOcf3L5Y6acDChY6R5fKMQ44LdCvKp_YZbX7B1LWiw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-14  2:07                                     ` zhoucm1
       [not found]                                       ` <8a800613-07ce-b417-8a1e-0db5a2e31758-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: zhoucm1 @ 2017-09-14  2:07 UTC (permalink / raw)
  To: Dave Airlie, Christian König
  Cc: amd-gfx mailing list, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	Marek Olšák



On 2017年09月14日 09:52, Dave Airlie wrote:
> On 14 September 2017 at 00:16, Christian König <deathsimple@vodafone.de> wrote:
>> Am 13.09.2017 um 16:06 schrieb Marek Olšák:
>>> On Wed, Sep 13, 2017 at 3:46 PM, Zhou, David(ChunMing)
>>> <David1.Zhou@amd.com> wrote:
>>>> For android using mesa instance, egl draw will dequeue an android buffer,
>>>> after egl draw, the buffer will back to android bufffer queue, but need
>>>> append a syncfile fd. If getting syncfile fd for every egl draw always
>>>> needs
>>>> several syncobj ioctls, the io overhead isn't small. But if we directly
>>>> return syncfile when egl draw CS,  isn't it better?
>>> You have a good point. I'd be OK with either approach, or even with
>>> having both options in the kernel.
>>
>> I don't have a strong opinion for the CS IOCTL either. If it saves us an
>> extra IOCTL when we directly return a syncfile fd then why not?
>>
>> But we really shouldn't use syncfile for the VA IOCTL. That's nothing we
>> want to share with other processes and the returned fence or sequence needs
>> to be lightweight.
>>
>> Ideally I would say it should be a sequence number, so that you can say
>> max(seq1, seq2) and always have the latest.
>>
>> The next best approach I think would be to use syncobj, cause it is simply
>> rather easily to implement.
> I'd go with not returning fd's by default, it's a bad use of a limited resource,
> creating fd's should happen on giving the object to another process or API.
>
> However having an optional chunk or flag to say this ioctl will return an
> android sync fd if asked is fine with me, if is usually returns a syncobj.
OK, that means we return fd only when UMD ask, right?
I will send V2.


Thanks all.
David Zhou
>
> Dave.

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]                                       ` <8a800613-07ce-b417-8a1e-0db5a2e31758-5C7GfCeVMHo@public.gmane.org>
@ 2017-09-14  2:26                                         ` zhoucm1
  2017-09-14  2:33                                           ` Dave Airlie
  0 siblings, 1 reply; 36+ messages in thread
From: zhoucm1 @ 2017-09-14  2:26 UTC (permalink / raw)
  To: Dave Airlie, Christian König
  Cc: amd-gfx mailing list, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	Marek Olšák



On 2017年09月14日 10:07, zhoucm1 wrote:
>
>
> On 2017年09月14日 09:52, Dave Airlie wrote:
>> On 14 September 2017 at 00:16, Christian König 
>> <deathsimple@vodafone.de> wrote:
>>> Am 13.09.2017 um 16:06 schrieb Marek Olšák:
>>>> On Wed, Sep 13, 2017 at 3:46 PM, Zhou, David(ChunMing)
>>>> <David1.Zhou@amd.com> wrote:
>>>>> For android using mesa instance, egl draw will dequeue an android 
>>>>> buffer,
>>>>> after egl draw, the buffer will back to android bufffer queue, but 
>>>>> need
>>>>> append a syncfile fd. If getting syncfile fd for every egl draw 
>>>>> always
>>>>> needs
>>>>> several syncobj ioctls, the io overhead isn't small. But if we 
>>>>> directly
>>>>> return syncfile when egl draw CS,  isn't it better?
>>>> You have a good point. I'd be OK with either approach, or even with
>>>> having both options in the kernel.
>>>
>>> I don't have a strong opinion for the CS IOCTL either. If it saves 
>>> us an
>>> extra IOCTL when we directly return a syncfile fd then why not?
>>>
>>> But we really shouldn't use syncfile for the VA IOCTL. That's 
>>> nothing we
>>> want to share with other processes and the returned fence or 
>>> sequence needs
>>> to be lightweight.
>>>
>>> Ideally I would say it should be a sequence number, so that you can say
>>> max(seq1, seq2) and always have the latest.
>>>
>>> The next best approach I think would be to use syncobj, cause it is 
>>> simply
>>> rather easily to implement.
>> I'd go with not returning fd's by default, it's a bad use of a 
>> limited resource,
>> creating fd's should happen on giving the object to another process 
>> or API.
>>
>> However having an optional chunk or flag to say this ioctl will 
>> return an
>> android sync fd if asked is fine with me, if is usually returns a 
>> syncobj.
> OK, that means we return fd only when UMD ask, right?
> I will send V2.
But think a bit more, if not by default, that isn't meaningful, we can 
continue to use seq_no base fence and Marek's syncfile fd approach.

Regards,
David Zhou
>
>
> Thanks all.
> David Zhou
>>
>> Dave.
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-14  2:26                                         ` zhoucm1
@ 2017-09-14  2:33                                           ` Dave Airlie
  2017-09-14  2:34                                             ` zhoucm1
  0 siblings, 1 reply; 36+ messages in thread
From: Dave Airlie @ 2017-09-14  2:33 UTC (permalink / raw)
  To: zhoucm1; +Cc: amd-gfx mailing list, dri-devel

>
> But think a bit more, if not by default, that isn't meaningful, we can
> continue to use seq_no base fence and Marek's syncfile fd approach.
>

At least for radv I've no interest in every CS ioctl returning an fd
that I have to
close.

Dave.
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-14  2:33                                           ` Dave Airlie
@ 2017-09-14  2:34                                             ` zhoucm1
  0 siblings, 0 replies; 36+ messages in thread
From: zhoucm1 @ 2017-09-14  2:34 UTC (permalink / raw)
  To: Dave Airlie; +Cc: amd-gfx mailing list, dri-devel



On 2017年09月14日 10:33, Dave Airlie wrote:
>> But think a bit more, if not by default, that isn't meaningful, we can
>> continue to use seq_no base fence and Marek's syncfile fd approach.
>>
> At least for radv I've no interest in every CS ioctl returning an fd
> that I have to
> close.
OK, let's pend it now. It doesn't matter.

Regards,
David Zhou
>
> Dave.

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
  2017-09-12 20:42 [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Marek Olšák
  2017-09-12 20:42 ` [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file Marek Olšák
@ 2017-09-14  7:56 ` Emil Velikov
       [not found]   ` <CACvgo50uprnZZ_+wXLH59Knq2SUCZwoFsyYH6ydcNJQ9Eiaw3A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
       [not found] ` <1505248934-1819-1-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2 siblings, 1 reply; 36+ messages in thread
From: Emil Velikov @ 2017-09-14  7:56 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list, ML dri-devel

Hi Marek,

On 12 September 2017 at 21:42, Marek Olšák <maraeo@gmail.com> wrote:

>  include/drm/drm_syncobj.h     |  4 ++++
Please sync the header as described in
https://cgit.freedesktop.org/mesa/drm/tree/include/drm/README#n72

Tl;DR: cd .../linux; make headers_install; cp ... .../drm/include/drm;
cd .../drm; git commit -sm ".... $branch $sha1..."

Thank you
Emil
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]   ` <CACvgo50uprnZZ_+wXLH59Knq2SUCZwoFsyYH6ydcNJQ9Eiaw3A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-14  8:01     ` Emil Velikov
       [not found]       ` <CACvgo51-pfQJgas_VhVv3gyp7DDAQfAtpb6pxNN7c54xDc46wQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Emil Velikov @ 2017-09-14  8:01 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list, ML dri-devel

On 14 September 2017 at 08:56, Emil Velikov <emil.l.velikov@gmail.com> wrote:
> Hi Marek,
>
> On 12 September 2017 at 21:42, Marek Olšák <maraeo@gmail.com> wrote:
>
>>  include/drm/drm_syncobj.h     |  4 ++++
> Please sync the header as described in
> https://cgit.freedesktop.org/mesa/drm/tree/include/drm/README#n72
>
> Tl;DR: cd .../linux; make headers_install; cp ... .../drm/include/drm;
> cd .../drm; git commit -sm ".... $branch $sha1..."
>
Seems like I've replied to the wrong patch - silly me.
This was meant for the libdrm ones - suggestion still applies though.

-Emil
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]   ` <1505248934-1819-3-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2017-09-13  3:03     ` zhoucm1
@ 2017-09-28 20:41     ` Marek Olšák
  2017-09-28 23:42       ` Dave Airlie
  1 sibling, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-28 20:41 UTC (permalink / raw)
  To: dri-devel, amd-gfx mailing list

Can I get Rb for this series?

Thanks,
Marek

On Tue, Sep 12, 2017 at 10:42 PM, Marek Olšák <maraeo@gmail.com> wrote:
> From: Marek Olšák <marek.olsak@amd.com>
>
> for being able to convert an amdgpu fence into one of the handles.
> Mesa will use this.
>
> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 61 +++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  1 +
>  include/uapi/drm/amdgpu_drm.h           | 16 +++++++++
>  5 files changed, 82 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index b5c8b90..c15fa93 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1308,6 +1308,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
>  int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>                         struct drm_file *filp);
>  int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> +                                   struct drm_file *filp);
>  int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
>  int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>                                 struct drm_file *filp);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7cb8a59..6dd719c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -25,6 +25,7 @@
>   *    Jerome Glisse <glisse@freedesktop.org>
>   */
>  #include <linux/pagemap.h>
> +#include <linux/sync_file.h>
>  #include <drm/drmP.h>
>  #include <drm/amdgpu_drm.h>
>  #include <drm/drm_syncobj.h>
> @@ -1311,6 +1312,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>         return fence;
>  }
>
> +int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
> +                                   struct drm_file *filp)
> +{
> +       struct amdgpu_device *adev = dev->dev_private;
> +       struct amdgpu_fpriv *fpriv = filp->driver_priv;
> +       union drm_amdgpu_fence_to_handle *info = data;
> +       struct dma_fence *fence;
> +       struct drm_syncobj *syncobj;
> +       struct sync_file *sync_file;
> +       int fd, r;
> +
> +       if (amdgpu_kms_vram_lost(adev, fpriv))
> +               return -ENODEV;
> +
> +       fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
> +       if (IS_ERR(fence))
> +               return PTR_ERR(fence);
> +
> +       switch (info->in.what) {
> +       case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
> +               r = drm_syncobj_create(&syncobj, 0, fence);
> +               dma_fence_put(fence);
> +               if (r)
> +                       return r;
> +               r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
> +               drm_syncobj_put(syncobj);
> +               return r;
> +
> +       case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
> +               r = drm_syncobj_create(&syncobj, 0, fence);
> +               dma_fence_put(fence);
> +               if (r)
> +                       return r;
> +               r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
> +               drm_syncobj_put(syncobj);
> +               return r;
> +
> +       case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
> +               fd = get_unused_fd_flags(O_CLOEXEC);
> +               if (fd < 0) {
> +                       dma_fence_put(fence);
> +                       return fd;
> +               }
> +
> +               sync_file = sync_file_create(fence);
> +               dma_fence_put(fence);
> +               if (!sync_file) {
> +                       put_unused_fd(fd);
> +                       return -ENOMEM;
> +               }
> +
> +               fd_install(fd, sync_file->file);
> +               info->out.handle = fd;
> +               return 0;
> +
> +       default:
> +               return -EINVAL;
> +       }
> +}
> +
>  /**
>   * amdgpu_cs_wait_all_fence - wait on all fences to signal
>   *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index d01aca6..1e38411 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -70,9 +70,10 @@
>   * - 3.18.0 - Export gpu always on cu bitmap
>   * - 3.19.0 - Add support for UVD MJPEG decode
>   * - 3.20.0 - Add support for local BOs
> + * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
>   */
>  #define KMS_DRIVER_MAJOR       3
> -#define KMS_DRIVER_MINOR       20
> +#define KMS_DRIVER_MINOR       21
>  #define KMS_DRIVER_PATCHLEVEL  0
>
>  int amdgpu_vram_limit = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index d31777b..b09d315 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -1021,6 +1021,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
>         DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         /* KMS */
>         DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 9f5bd97..ec57cd0 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -53,6 +53,7 @@ extern "C" {
>  #define DRM_AMDGPU_WAIT_FENCES         0x12
>  #define DRM_AMDGPU_VM                  0x13
>  #define DRM_AMDGPU_FREESYNC            0x14
> +#define DRM_AMDGPU_FENCE_TO_HANDLE     0x15
>
>  #define DRM_IOCTL_AMDGPU_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>  #define DRM_IOCTL_AMDGPU_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -69,6 +70,7 @@ extern "C" {
>  #define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>  #define DRM_IOCTL_AMDGPU_VM            DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
>  #define DRM_IOCTL_AMDGPU_FREESYNC      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
> +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
>
>  #define AMDGPU_GEM_DOMAIN_CPU          0x1
>  #define AMDGPU_GEM_DOMAIN_GTT          0x2
> @@ -517,6 +519,20 @@ struct drm_amdgpu_cs_chunk_sem {
>         __u32 handle;
>  };
>
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ     0
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD  1
> +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD        2
> +
> +union drm_amdgpu_fence_to_handle {
> +       struct {
> +               struct drm_amdgpu_fence fence;
> +               __u32 what;
> +       } in;
> +       struct {
> +               __u32 handle;
> +       } out;
> +};
> +
>  struct drm_amdgpu_cs_chunk_data {
>         union {
>                 struct drm_amdgpu_cs_chunk_ib           ib_data;
> --
> 2.7.4
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]       ` <CACvgo51-pfQJgas_VhVv3gyp7DDAQfAtpb6pxNN7c54xDc46wQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-28 22:06         ` Marek Olšák
  0 siblings, 0 replies; 36+ messages in thread
From: Marek Olšák @ 2017-09-28 22:06 UTC (permalink / raw)
  To: Emil Velikov; +Cc: amd-gfx mailing list, ML dri-devel

On Thu, Sep 14, 2017 at 10:01 AM, Emil Velikov <emil.l.velikov@gmail.com> wrote:
> On 14 September 2017 at 08:56, Emil Velikov <emil.l.velikov@gmail.com> wrote:
>> Hi Marek,
>>
>> On 12 September 2017 at 21:42, Marek Olšák <maraeo@gmail.com> wrote:
>>
>>>  include/drm/drm_syncobj.h     |  4 ++++
>> Please sync the header as described in
>> https://cgit.freedesktop.org/mesa/drm/tree/include/drm/README#n72
>>
>> Tl;DR: cd .../linux; make headers_install; cp ... .../drm/include/drm;
>> cd .../drm; git commit -sm ".... $branch $sha1..."
>>
> Seems like I've replied to the wrong patch - silly me.
> This was meant for the libdrm ones - suggestion still applies though.

I actually did that, but I'm on amd-staging-drm-next.

Marek
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
  2017-09-28 20:41     ` Marek Olšák
@ 2017-09-28 23:42       ` Dave Airlie
       [not found]         ` <CAPM=9ty8YhG16QSE8OQTDaAPQxT7mXE=cRn1OJXXznUtcBaRSA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Dave Airlie @ 2017-09-28 23:42 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list, dri-devel

On 29 September 2017 at 06:41, Marek Olšák <maraeo@gmail.com> wrote:
> Can I get Rb for this series?
>

For the series,

Reviewed-by: Dave Airlie <airlied@redhat.com>

Alex, please merge the two drm core precursor with patch 3.

Dave.
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found] ` <1505248934-1819-1-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2017-09-12 20:42   ` [PATCH 2/3] drm/syncobj: add a new helper drm_syncobj_get_fd Marek Olšák
@ 2017-09-29  2:44   ` Chunming Zhou
  2017-09-29 14:13     ` Marek Olšák
  1 sibling, 1 reply; 36+ messages in thread
From: Chunming Zhou @ 2017-09-29  2:44 UTC (permalink / raw)
  To: Marek Olšák,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2017年09月13日 04:42, Marek Olšák wrote:
> From: Marek Olšák <marek.olsak@amd.com>
>
> For amdgpu.
>
> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>
> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
> ---
>   drivers/gpu/drm/drm_syncobj.c | 49 +++++++++++++++++++++++++++++++++++++++----
>   include/drm/drm_syncobj.h     |  4 ++++
>   2 files changed, 49 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
> index 0422b8c..0bb1741 100644
> --- a/drivers/gpu/drm/drm_syncobj.c
> +++ b/drivers/gpu/drm/drm_syncobj.c
> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>   }
>   EXPORT_SYMBOL(drm_syncobj_free);
>   
> -static int drm_syncobj_create(struct drm_file *file_private,
> -			      u32 *handle, uint32_t flags)
You can add a new parameter for passing dma fence, then in patch3, you 
can directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.

otherwise the set looks good to me.

Regards,
David Zhou

> +/**
> + * drm_syncobj_create - create a new syncobj
> + * @out_syncobj: returned syncobj
> + * @flags: DRM_SYNCOBJ_* flags
> + * @fence: if non-NULL, the syncobj will represent this fence
> + */
> +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
> +		       struct dma_fence *fence)
>   {
>   	int ret;
>   	struct drm_syncobj *syncobj;
> @@ -284,6 +290,25 @@ static int drm_syncobj_create(struct drm_file *file_private,
>   		}
>   	}
>   
> +	if (fence)
> +		drm_syncobj_replace_fence(syncobj, fence);
> +
> +	*out_syncobj = syncobj;
> +	return 0;
> +}
> +EXPORT_SYMBOL(drm_syncobj_create);
> +
> +/**
> + * drm_syncobj_get_handle - get a handle from a syncobj
> + */
> +int drm_syncobj_get_handle(struct drm_file *file_private,
> +			   struct drm_syncobj *syncobj, u32 *handle)
> +{
> +	int ret;
> +
> +	/* take a reference to put in the idr */
> +	drm_syncobj_get(syncobj);
> +
>   	idr_preload(GFP_KERNEL);
>   	spin_lock(&file_private->syncobj_table_lock);
>   	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
> @@ -299,6 +324,22 @@ static int drm_syncobj_create(struct drm_file *file_private,
>   	*handle = ret;
>   	return 0;
>   }
> +EXPORT_SYMBOL(drm_syncobj_get_handle);
> +
> +static int drm_syncobj_create_as_handle(struct drm_file *file_private,
> +					u32 *handle, uint32_t flags)
> +{
> +	int ret;
> +	struct drm_syncobj *syncobj;
> +
> +	ret = drm_syncobj_create(&syncobj, flags, NULL);
> +	if (ret)
> +		return ret;
> +
> +	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
> +	drm_syncobj_put(syncobj);
> +	return ret;
> +}
>   
>   static int drm_syncobj_destroy(struct drm_file *file_private,
>   			       u32 handle)
> @@ -522,8 +563,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
>   	if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
>   		return -EINVAL;
>   
> -	return drm_syncobj_create(file_private,
> -				  &args->handle, args->flags);
> +	return drm_syncobj_create_as_handle(file_private,
> +					    &args->handle, args->flags);
>   }
>   
>   int
> diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
> index c00fee5..e7f0035 100644
> --- a/include/drm/drm_syncobj.h
> +++ b/include/drm/drm_syncobj.h
> @@ -136,5 +136,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
>   			   u32 handle,
>   			   struct dma_fence **fence);
>   void drm_syncobj_free(struct kref *kref);
> +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
> +		       struct dma_fence *fence);
> +int drm_syncobj_get_handle(struct drm_file *file_private,
> +			   struct drm_syncobj *syncobj, u32 *handle);
>   
>   #endif

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file
       [not found]         ` <CAPM=9ty8YhG16QSE8OQTDaAPQxT7mXE=cRn1OJXXznUtcBaRSA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-29 12:35           ` Marek Olšák
  0 siblings, 0 replies; 36+ messages in thread
From: Marek Olšák @ 2017-09-29 12:35 UTC (permalink / raw)
  To: Dave Airlie; +Cc: amd-gfx mailing list, dri-devel

On Fri, Sep 29, 2017 at 1:42 AM, Dave Airlie <airlied@gmail.com> wrote:
> On 29 September 2017 at 06:41, Marek Olšák <maraeo@gmail.com> wrote:
>> Can I get Rb for this series?
>>
>
> For the series,
>
> Reviewed-by: Dave Airlie <airlied@redhat.com>
>
> Alex, please merge the two drm core precursor with patch 3.

Alex, this is for drm-next, where I can't push.

Thanks,
Marek
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
  2017-09-29  2:44   ` [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Chunming Zhou
@ 2017-09-29 14:13     ` Marek Olšák
       [not found]       ` <CAAxE2A42G4-eD4x6VunDe4rW2c_GuaChn0Df+HE7wbvQYK0QFA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-29 14:13 UTC (permalink / raw)
  To: Chunming Zhou; +Cc: amd-gfx mailing list, dri-devel

On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>
>
> On 2017年09月13日 04:42, Marek Olšák wrote:
>>
>> From: Marek Olšák <marek.olsak@amd.com>
>>
>> For amdgpu.
>>
>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>
>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>> ---
>>   drivers/gpu/drm/drm_syncobj.c | 49
>> +++++++++++++++++++++++++++++++++++++++----
>>   include/drm/drm_syncobj.h     |  4 ++++
>>   2 files changed, 49 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
>> index 0422b8c..0bb1741 100644
>> --- a/drivers/gpu/drm/drm_syncobj.c
>> +++ b/drivers/gpu/drm/drm_syncobj.c
>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>   }
>>   EXPORT_SYMBOL(drm_syncobj_free);
>>   -static int drm_syncobj_create(struct drm_file *file_private,
>> -                             u32 *handle, uint32_t flags)
>
> You can add a new parameter for passing dma fence, then in patch3, you can
> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>
> otherwise the set looks good to me.

Sorry I just pushed this.

Marek
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]       ` <CAAxE2A42G4-eD4x6VunDe4rW2c_GuaChn0Df+HE7wbvQYK0QFA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-29 14:15         ` Marek Olšák
       [not found]           ` <CAAxE2A6ecZnArHSu8P_DuHPOGhz0+6TfXtQshKWvhU1=1V4Lyg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-29 14:15 UTC (permalink / raw)
  To: Chunming Zhou; +Cc: amd-gfx mailing list, dri-devel

On Fri, Sep 29, 2017 at 4:13 PM, Marek Olšák <maraeo@gmail.com> wrote:
> On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>>
>>
>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>
>>> From: Marek Olšák <marek.olsak@amd.com>
>>>
>>> For amdgpu.
>>>
>>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>>
>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>> ---
>>>   drivers/gpu/drm/drm_syncobj.c | 49
>>> +++++++++++++++++++++++++++++++++++++++----
>>>   include/drm/drm_syncobj.h     |  4 ++++
>>>   2 files changed, 49 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
>>> index 0422b8c..0bb1741 100644
>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>>   }
>>>   EXPORT_SYMBOL(drm_syncobj_free);
>>>   -static int drm_syncobj_create(struct drm_file *file_private,
>>> -                             u32 *handle, uint32_t flags)
>>
>> You can add a new parameter for passing dma fence, then in patch3, you can
>> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>>
>> otherwise the set looks good to me.
>
> Sorry I just pushed this.

Actually, you commented on a deleted line. The function already has
dma_fence among the parameters.

Marek
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]           ` <CAAxE2A6ecZnArHSu8P_DuHPOGhz0+6TfXtQshKWvhU1=1V4Lyg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-30  1:55             ` Chunming Zhou
  2017-09-30 15:55               ` Marek Olšák
  0 siblings, 1 reply; 36+ messages in thread
From: Chunming Zhou @ 2017-09-30  1:55 UTC (permalink / raw)
  To: Marek Olšák; +Cc: amd-gfx mailing list, dri-devel

[-- Attachment #1: Type: text/plain, Size: 1696 bytes --]

My mean is like the attached, I revert part of yours.

Regards,

David zhou


On 2017年09月29日 22:15, Marek Olšák wrote:
> On Fri, Sep 29, 2017 at 4:13 PM, Marek Olšák <maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>> On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1-5C7GfCeVMHo@public.gmane.org> wrote:
>>>
>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>> From: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
>>>>
>>>> For amdgpu.
>>>>
>>>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>>>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>>>
>>>> Signed-off-by: Marek Olšák <marek.olsak-5C7GfCeVMHo@public.gmane.org>
>>>> ---
>>>>    drivers/gpu/drm/drm_syncobj.c | 49
>>>> +++++++++++++++++++++++++++++++++++++++----
>>>>    include/drm/drm_syncobj.h     |  4 ++++
>>>>    2 files changed, 49 insertions(+), 4 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
>>>> index 0422b8c..0bb1741 100644
>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>>>    }
>>>>    EXPORT_SYMBOL(drm_syncobj_free);
>>>>    -static int drm_syncobj_create(struct drm_file *file_private,
>>>> -                             u32 *handle, uint32_t flags)
>>> You can add a new parameter for passing dma fence, then in patch3, you can
>>> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>>>
>>> otherwise the set looks good to me.
>> Sorry I just pushed this.
> Actually, you commented on a deleted line. The function already has
> dma_fence among the parameters.
>
> Marek


[-- Attachment #2: 0001-drm-Don-t-split-drm_syncobj_create.patch --]
[-- Type: text/x-patch, Size: 4344 bytes --]

>From a34c466f4a8617c18bf191d83bff3a3382166b00 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou-5C7GfCeVMHo@public.gmane.org>
Date: Sat, 30 Sep 2017 09:53:53 +0800
Subject: [PATCH] drm: Don't split drm_syncobj_create

Change-Id: Icc6e4d8e94236675d6267d211e53698834d29869
Signed-off-by: Chunming Zhou <david1.zhou-5C7GfCeVMHo@public.gmane.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |  6 +----
 drivers/gpu/drm/drm_syncobj.c          | 42 +++++-----------------------------
 include/drm/drm_syncobj.h              |  7 +++---
 3 files changed, 10 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ab83dfc..882becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1351,14 +1351,10 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
 
 	switch (info->in.what) {
 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
-		r = drm_syncobj_create(&syncobj, 0, fence);
+		r = drm_syncobj_create(filp, fence, &info->out.handle, 0);
 		dma_fence_put(fence);
 		if (r)
 			return r;
-		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
-		drm_syncobj_put(syncobj);
-		return r;
-
 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
 		r = drm_syncobj_create(&syncobj, 0, fence);
 		dma_fence_put(fence);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 62adc7a..28e1463 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -268,8 +268,9 @@ EXPORT_SYMBOL(drm_syncobj_free);
  * @flags: DRM_SYNCOBJ_* flags
  * @fence: if non-NULL, the syncobj will represent this fence
  */
-int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
-		       struct dma_fence *fence)
+int drm_syncobj_create(struct drm_file *file_private,
+		       struct dma_fence *fence,
+		       u32 *handle, uint32_t flags)
 {
 	int ret;
 	struct drm_syncobj *syncobj;
@@ -293,22 +294,6 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 	if (fence)
 		drm_syncobj_replace_fence(syncobj, fence);
 
-	*out_syncobj = syncobj;
-	return 0;
-}
-EXPORT_SYMBOL(drm_syncobj_create);
-
-/**
- * drm_syncobj_get_handle - get a handle from a syncobj
- */
-int drm_syncobj_get_handle(struct drm_file *file_private,
-			   struct drm_syncobj *syncobj, u32 *handle)
-{
-	int ret;
-
-	/* take a reference to put in the idr */
-	drm_syncobj_get(syncobj);
-
 	idr_preload(GFP_KERNEL);
 	spin_lock(&file_private->syncobj_table_lock);
 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
@@ -324,22 +309,7 @@ int drm_syncobj_get_handle(struct drm_file *file_private,
 	*handle = ret;
 	return 0;
 }
-EXPORT_SYMBOL(drm_syncobj_get_handle);
-
-static int drm_syncobj_create_as_handle(struct drm_file *file_private,
-					u32 *handle, uint32_t flags)
-{
-	int ret;
-	struct drm_syncobj *syncobj;
-
-	ret = drm_syncobj_create(&syncobj, flags, NULL);
-	if (ret)
-		return ret;
-
-	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
-	drm_syncobj_put(syncobj);
-	return ret;
-}
+EXPORT_SYMBOL(drm_syncobj_create);
 
 static int drm_syncobj_destroy(struct drm_file *file_private,
 			       u32 handle)
@@ -568,8 +538,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
 	if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
 		return -EINVAL;
 
-	return drm_syncobj_create_as_handle(file_private,
-					    &args->handle, args->flags);
+	return drm_syncobj_create(file_private, NULL,
+				  &args->handle, args->flags);
 }
 
 int
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 43e2f38..4e3025e 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -136,10 +136,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 			   u32 handle,
 			   struct dma_fence **fence);
 void drm_syncobj_free(struct kref *kref);
-int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
-		       struct dma_fence *fence);
-int drm_syncobj_get_handle(struct drm_file *file_private,
-			   struct drm_syncobj *syncobj, u32 *handle);
+int drm_syncobj_create(struct drm_file *file_private,
+		       struct dma_fence *fence,
+		       u32 *handle, uint32_t flags);
 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd);
 
 #endif
-- 
2.7.4


[-- Attachment #3: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
  2017-09-30  1:55             ` Chunming Zhou
@ 2017-09-30 15:55               ` Marek Olšák
       [not found]                 ` <CAAxE2A6gfDHWU73ji+KVbtZH8pG1f8-1Ts2_FUh9MjyYa+b=oA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Marek Olšák @ 2017-09-30 15:55 UTC (permalink / raw)
  To: Chunming Zhou; +Cc: amd-gfx mailing list, dri-devel

The idea sounds good.

Marek

On Sat, Sep 30, 2017 at 3:55 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
> My mean is like the attached, I revert part of yours.
>
> Regards,
>
> David zhou
>
>
>
> On 2017年09月29日 22:15, Marek Olšák wrote:
>>
>> On Fri, Sep 29, 2017 at 4:13 PM, Marek Olšák <maraeo@gmail.com> wrote:
>>>
>>> On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>>>>
>>>>
>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>>
>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>
>>>>> For amdgpu.
>>>>>
>>>>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>>>>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>>>>
>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>> ---
>>>>>    drivers/gpu/drm/drm_syncobj.c | 49
>>>>> +++++++++++++++++++++++++++++++++++++++----
>>>>>    include/drm/drm_syncobj.h     |  4 ++++
>>>>>    2 files changed, 49 insertions(+), 4 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
>>>>> b/drivers/gpu/drm/drm_syncobj.c
>>>>> index 0422b8c..0bb1741 100644
>>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>>>>    }
>>>>>    EXPORT_SYMBOL(drm_syncobj_free);
>>>>>    -static int drm_syncobj_create(struct drm_file *file_private,
>>>>> -                             u32 *handle, uint32_t flags)
>>>>
>>>> You can add a new parameter for passing dma fence, then in patch3, you
>>>> can
>>>> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>>>>
>>>> otherwise the set looks good to me.
>>>
>>> Sorry I just pushed this.
>>
>> Actually, you commented on a deleted line. The function already has
>> dma_fence among the parameters.
>>
>> Marek
>
>
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]                 ` <CAAxE2A6gfDHWU73ji+KVbtZH8pG1f8-1Ts2_FUh9MjyYa+b=oA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-09-30 23:20                   ` Zhou, David(ChunMing)
       [not found]                     ` <MWHPR1201MB02061877B54A8E252C7D2196B47F0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 36+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-30 23:20 UTC (permalink / raw)
  To: Marek Ol?醟
  Cc: Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 2040 bytes --]

Could you test and review it? On hand, I have no env.

Regards,
David Zhou


发自坚果 Pro

Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月30日 下午11:56写道:

The idea sounds good.

Marek

On Sat, Sep 30, 2017 at 3:55 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
> My mean is like the attached, I revert part of yours.
>
> Regards,
>
> David zhou
>
>
>
> On 2017年09月29日 22:15, Marek Olšák wrote:
>>
>> On Fri, Sep 29, 2017 at 4:13 PM, Marek Olšák <maraeo@gmail.com> wrote:
>>>
>>> On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>>>>
>>>>
>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>>
>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>
>>>>> For amdgpu.
>>>>>
>>>>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>>>>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>>>>
>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>> ---
>>>>>    drivers/gpu/drm/drm_syncobj.c | 49
>>>>> +++++++++++++++++++++++++++++++++++++++----
>>>>>    include/drm/drm_syncobj.h     |  4 ++++
>>>>>    2 files changed, 49 insertions(+), 4 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
>>>>> b/drivers/gpu/drm/drm_syncobj.c
>>>>> index 0422b8c..0bb1741 100644
>>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>>>>    }
>>>>>    EXPORT_SYMBOL(drm_syncobj_free);
>>>>>    -static int drm_syncobj_create(struct drm_file *file_private,
>>>>> -                             u32 *handle, uint32_t flags)
>>>>
>>>> You can add a new parameter for passing dma fence, then in patch3, you
>>>> can
>>>> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>>>>
>>>> otherwise the set looks good to me.
>>>
>>> Sorry I just pushed this.
>>
>> Actually, you commented on a deleted line. The function already has
>> dma_fence among the parameters.
>>
>> Marek
>
>

[-- Attachment #1.2: Type: text/html, Size: 4629 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create
       [not found]                     ` <MWHPR1201MB02061877B54A8E252C7D2196B47F0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-10-12 20:33                       ` Marek Olšák
  0 siblings, 0 replies; 36+ messages in thread
From: Marek Olšák @ 2017-10-12 20:33 UTC (permalink / raw)
  To: Zhou, David(ChunMing)
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, amd-gfx mailing list

Sorry, this codepath is not tested by radeonsi.

Marek

On Sun, Oct 1, 2017 at 1:20 AM, Zhou, David(ChunMing)
<David1.Zhou@amd.com> wrote:
> Could you test and review it? On hand, I have no env.
>
> Regards,
> David Zhou
>
> 发自坚果 Pro
>
> Marek Ol?醟 <maraeo@gmail.com> 于 2017年9月30日 下午11:56写道:
>
> The idea sounds good.
>
> Marek
>
> On Sat, Sep 30, 2017 at 3:55 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>> My mean is like the attached, I revert part of yours.
>>
>> Regards,
>>
>> David zhou
>>
>>
>>
>> On 2017年09月29日 22:15, Marek Olšák wrote:
>>>
>>> On Fri, Sep 29, 2017 at 4:13 PM, Marek Olšák <maraeo@gmail.com> wrote:
>>>>
>>>> On Fri, Sep 29, 2017 at 4:44 AM, Chunming Zhou <zhoucm1@amd.com> wrote:
>>>>>
>>>>>
>>>>> On 2017年09月13日 04:42, Marek Olšák wrote:
>>>>>>
>>>>>> From: Marek Olšák <marek.olsak@amd.com>
>>>>>>
>>>>>> For amdgpu.
>>>>>>
>>>>>> drm_syncobj_create is renamed to drm_syncobj_create_as_handle, and new
>>>>>> helpers drm_syncobj_create and drm_syncobj_get_handle are added.
>>>>>>
>>>>>> Signed-off-by: Marek Olšák <marek.olsak@amd.com>
>>>>>> ---
>>>>>>    drivers/gpu/drm/drm_syncobj.c | 49
>>>>>> +++++++++++++++++++++++++++++++++++++++----
>>>>>>    include/drm/drm_syncobj.h     |  4 ++++
>>>>>>    2 files changed, 49 insertions(+), 4 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/drm_syncobj.c
>>>>>> b/drivers/gpu/drm/drm_syncobj.c
>>>>>> index 0422b8c..0bb1741 100644
>>>>>> --- a/drivers/gpu/drm/drm_syncobj.c
>>>>>> +++ b/drivers/gpu/drm/drm_syncobj.c
>>>>>> @@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
>>>>>>    }
>>>>>>    EXPORT_SYMBOL(drm_syncobj_free);
>>>>>>    -static int drm_syncobj_create(struct drm_file *file_private,
>>>>>> -                             u32 *handle, uint32_t flags)
>>>>>
>>>>> You can add a new parameter for passing dma fence, then in patch3, you
>>>>> can
>>>>> directly use it for AMDGPU_FENCE_TO HANDLE_GET_SYNCOBJ.
>>>>>
>>>>> otherwise the set looks good to me.
>>>>
>>>> Sorry I just pushed this.
>>>
>>> Actually, you commented on a deleted line. The function already has
>>> dma_fence among the parameters.
>>>
>>> Marek
>>
>>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2017-10-12 20:33 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-12 20:42 [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Marek Olšák
2017-09-12 20:42 ` [PATCH 3/3] drm/amdgpu: add FENCE_TO_HANDLE ioctl that returns syncobj or sync_file Marek Olšák
     [not found]   ` <1505248934-1819-3-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-09-13  3:03     ` zhoucm1
2017-09-13  6:57       ` Christian König
     [not found]         ` <10bed18c-f41b-5570-c299-2be190767a20-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-13  7:03           ` zhoucm1
2017-09-13  7:09             ` Christian König
     [not found]               ` <a6c564e8-fe75-0e05-d267-e6dc7255b4af-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-13  7:39                 ` zhoucm1
2017-09-13  8:07                   ` Christian König
     [not found]                     ` <58914d9d-f140-9cf3-f91b-78db7d913d12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-13  9:57                       ` zhoucm1
2017-09-13 10:11       ` Marek Olšák
     [not found]         ` <CAAxE2A7aR15Y9C2r_V_+6fTp-kEuFKwDApQRCnx_rxdsDOKtmw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-13 11:32           ` Zhou, David(ChunMing)
     [not found]             ` <MWHPR1201MB020689874960A24D32C4A39CB46E0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-09-13 12:06               ` Marek Olšák
2017-09-13 12:25                 ` Zhou, David(ChunMing)
2017-09-13 13:04                   ` Christian König
     [not found]                     ` <8927d6b6-f87c-00d9-57af-fdf4fb6dfcb8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-13 13:46                       ` Zhou, David(ChunMing)
2017-09-13 14:06                         ` Marek Olšák
     [not found]                           ` <CAAxE2A72RAC61KOF10GTuged-pM4kpKGnnMJh-nmtF-xM4f9AQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-13 14:16                             ` Christian König
     [not found]                               ` <be64ba03-3aa9-6eaf-9267-c3ec51882c12-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-14  1:52                                 ` Dave Airlie
     [not found]                                   ` <CAPM=9typSOcf3L5Y6acDChY6R5fKMQ44LdCvKp_YZbX7B1LWiw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-14  2:07                                     ` zhoucm1
     [not found]                                       ` <8a800613-07ce-b417-8a1e-0db5a2e31758-5C7GfCeVMHo@public.gmane.org>
2017-09-14  2:26                                         ` zhoucm1
2017-09-14  2:33                                           ` Dave Airlie
2017-09-14  2:34                                             ` zhoucm1
2017-09-28 20:41     ` Marek Olšák
2017-09-28 23:42       ` Dave Airlie
     [not found]         ` <CAPM=9ty8YhG16QSE8OQTDaAPQxT7mXE=cRn1OJXXznUtcBaRSA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-29 12:35           ` Marek Olšák
2017-09-14  7:56 ` [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Emil Velikov
     [not found]   ` <CACvgo50uprnZZ_+wXLH59Knq2SUCZwoFsyYH6ydcNJQ9Eiaw3A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-14  8:01     ` Emil Velikov
     [not found]       ` <CACvgo51-pfQJgas_VhVv3gyp7DDAQfAtpb6pxNN7c54xDc46wQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-28 22:06         ` Marek Olšák
     [not found] ` <1505248934-1819-1-git-send-email-maraeo-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-09-12 20:42   ` [PATCH 2/3] drm/syncobj: add a new helper drm_syncobj_get_fd Marek Olšák
2017-09-29  2:44   ` [PATCH 1/3] drm/syncobj: extract two helpers from drm_syncobj_create Chunming Zhou
2017-09-29 14:13     ` Marek Olšák
     [not found]       ` <CAAxE2A42G4-eD4x6VunDe4rW2c_GuaChn0Df+HE7wbvQYK0QFA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-29 14:15         ` Marek Olšák
     [not found]           ` <CAAxE2A6ecZnArHSu8P_DuHPOGhz0+6TfXtQshKWvhU1=1V4Lyg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-30  1:55             ` Chunming Zhou
2017-09-30 15:55               ` Marek Olšák
     [not found]                 ` <CAAxE2A6gfDHWU73ji+KVbtZH8pG1f8-1Ts2_FUh9MjyYa+b=oA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-09-30 23:20                   ` Zhou, David(ChunMing)
     [not found]                     ` <MWHPR1201MB02061877B54A8E252C7D2196B47F0-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-10-12 20:33                       ` Marek Olšák

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.