All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3)
@ 2016-09-12 16:40 Alex Deucher
       [not found] ` <1473698408-17817-1-git-send-email-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 5+ messages in thread
From: Alex Deucher @ 2016-09-12 16:40 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Junwei Zhang

From: Junwei Zhang <Jerry.Zhang@amd.com>

v2: agd: rebase and squash in all the previous optimizations and
changes so everything compiles.
v3: squash in Slava's 32bit build fix

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |   2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 171 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   1 +
 include/uapi/drm/amdgpu_drm.h           |  27 +++++
 4 files changed, 201 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 39baabe..4d6c42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1855,6 +1855,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *filp);
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
 
 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e29e7b9..384856c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1105,6 +1105,177 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 }
 
 /**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+					 struct drm_file *filp,
+					 struct drm_amdgpu_fence *user)
+{
+	struct amdgpu_ring *ring;
+	struct amdgpu_ctx *ctx;
+	struct fence *fence;
+	int r;
+
+	r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+			       user->ring, &ring);
+	if (r)
+		return ERR_PTR(r);
+
+	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+	if (ctx == NULL)
+		return ERR_PTR(-EINVAL);
+
+	fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+	amdgpu_ctx_put(ctx);
+
+	return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+				     struct drm_file *filp,
+				     union drm_amdgpu_wait_fences *wait,
+				     struct drm_amdgpu_fence *fences)
+{
+	uint32_t fence_count = wait->in.fence_count;
+	unsigned i;
+	long r = 1;
+
+	for (i = 0; i < fence_count; i++) {
+		struct fence *fence;
+		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+		if (IS_ERR(fence))
+			return PTR_ERR(fence);
+		else if (!fence)
+			continue;
+
+		r = fence_wait_timeout(fence, true, timeout);
+		if (r < 0)
+			return r;
+
+		if (r == 0)
+			break;
+	}
+
+	memset(wait, 0, sizeof(*wait));
+	wait->out.status = (r > 0);
+
+	return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+				    struct drm_file *filp,
+				    union drm_amdgpu_wait_fences *wait,
+				    struct drm_amdgpu_fence *fences)
+{
+	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+	uint32_t fence_count = wait->in.fence_count;
+	struct fence **array;
+	unsigned i;
+	long r;
+
+	/* Prepare the fence array */
+	array = (struct fence **)kcalloc(fence_count, sizeof(struct fence *),
+			GFP_KERNEL);
+	if (array == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < fence_count; i++) {
+		struct fence *fence;
+
+		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+		if (IS_ERR(fence)) {
+			r = PTR_ERR(fence);
+			goto err_free_fence_array;
+		} else if (fence) {
+			array[i] = fence;
+		} else { /* NULL, the fence has been already signaled */
+			r = 1;
+			goto out;
+		}
+	}
+
+	r = fence_wait_any_timeout(array, fence_count, true, timeout);
+	if (r < 0)
+		goto err_free_fence_array;
+
+out:
+	memset(wait, 0, sizeof(*wait));
+	wait->out.status = (r > 0);
+	/* set return value 0 to indicate success */
+	r = 0;
+
+err_free_fence_array:
+	for (i = 0; i < fence_count; i++)
+		fence_put(array[i]);
+	kfree(array);
+
+	return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	union drm_amdgpu_wait_fences *wait = data;
+	uint32_t fence_count = wait->in.fence_count;
+	struct drm_amdgpu_fence *fences_user;
+	struct drm_amdgpu_fence *fences;
+	int r;
+
+	/* Get the fences from userspace */
+	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+			GFP_KERNEL);
+	if (fences == NULL)
+		return -ENOMEM;
+
+	fences_user = (void __user *)(unsigned long)(wait->in.fences);
+	if (copy_from_user(fences, fences_user,
+		sizeof(struct drm_amdgpu_fence) * fence_count)) {
+		r = -EFAULT;
+		goto err_free_fences;
+	}
+
+	if (wait->in.wait_all)
+		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+	else
+		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+	kfree(fences);
+
+	return r;
+}
+
+/**
  * amdgpu_cs_find_bo_va - find bo_va for VM address
  *
  * @parser: command submission parser context
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb1..88ca1dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -775,6 +775,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21..dd9c99c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_CS		0x09
 #define DRM_AMDGPU_GEM_OP		0x10
 #define DRM_AMDGPU_GEM_USERPTR		0x11
+#define DRM_AMDGPU_WAIT_FENCES		0x12
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_CS	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
 #define DRM_IOCTL_AMDGPU_GEM_OP		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
 #define DRM_IOCTL_AMDGPU_GEM_USERPTR	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
 
 #define AMDGPU_GEM_DOMAIN_CPU		0x1
 #define AMDGPU_GEM_DOMAIN_GTT		0x2
@@ -305,6 +307,31 @@ union drm_amdgpu_wait_cs {
 	struct drm_amdgpu_wait_cs_out out;
 };
 
+struct drm_amdgpu_fence {
+	__u32 ctx_id;
+	__u32 ip_type;
+	__u32 ip_instance;
+	__u32 ring;
+	__u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+	/** This points to uint64_t * which points to fences */
+	__u64 fences;
+	__u32 fence_count;
+	__u32 wait_all;
+	__u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+	__u64 status;
+};
+
+union drm_amdgpu_wait_fences {
+	struct drm_amdgpu_wait_fences_in in;
+	struct drm_amdgpu_wait_fences_out out;
+};
+
 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO	0
 #define AMDGPU_GEM_OP_SET_PLACEMENT		1
 
-- 
2.5.5

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3)
       [not found] ` <1473698408-17817-1-git-send-email-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
@ 2016-09-12 18:34   ` Bas Nieuwenhuizen
       [not found]     ` <CAP+8YyGL9C9rM0c5r5R0GuRQhQptE=3baaeLB71PLGYMJuNS+A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 5+ messages in thread
From: Bas Nieuwenhuizen @ 2016-09-12 18:34 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Junwei Zhang, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Seems like this is missing the update corresponding to libdrm patch
"amdgpu: cs_wait_fences now can return the first signaled fence
index".  Is the kernel patch for that update also available or going
to be sent to the list?

Yours sincerely,
Bas Nieuwenhuizen

On Mon, Sep 12, 2016 at 6:40 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> From: Junwei Zhang <Jerry.Zhang@amd.com>
>
> v2: agd: rebase and squash in all the previous optimizations and
> changes so everything compiles.
> v3: squash in Slava's 32bit build fix
>
> Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
> Reviewed-by: Monk Liu <monk.liu@amd.com>
> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h     |   2 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 171 ++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   1 +
>  include/uapi/drm/amdgpu_drm.h           |  27 +++++
>  4 files changed, 201 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 39baabe..4d6c42f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1855,6 +1855,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>                         struct drm_file *filp);
>  int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
>  int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
> +                               struct drm_file *filp);
>
>  int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
>                                 struct drm_file *filp);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index e29e7b9..384856c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1105,6 +1105,177 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
>  }
>
>  /**
> + * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
> + *
> + * @adev: amdgpu device
> + * @filp: file private
> + * @user: drm_amdgpu_fence copied from user space
> + */
> +static struct fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
> +                                        struct drm_file *filp,
> +                                        struct drm_amdgpu_fence *user)
> +{
> +       struct amdgpu_ring *ring;
> +       struct amdgpu_ctx *ctx;
> +       struct fence *fence;
> +       int r;
> +
> +       r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
> +                              user->ring, &ring);
> +       if (r)
> +               return ERR_PTR(r);
> +
> +       ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
> +       if (ctx == NULL)
> +               return ERR_PTR(-EINVAL);
> +
> +       fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
> +       amdgpu_ctx_put(ctx);
> +
> +       return fence;
> +}
> +
> +/**
> + * amdgpu_cs_wait_all_fence - wait on all fences to signal
> + *
> + * @adev: amdgpu device
> + * @filp: file private
> + * @wait: wait parameters
> + * @fences: array of drm_amdgpu_fence
> + */
> +static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
> +                                    struct drm_file *filp,
> +                                    union drm_amdgpu_wait_fences *wait,
> +                                    struct drm_amdgpu_fence *fences)
> +{
> +       uint32_t fence_count = wait->in.fence_count;
> +       unsigned i;
> +       long r = 1;
> +
> +       for (i = 0; i < fence_count; i++) {
> +               struct fence *fence;
> +               unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
> +
> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
> +               if (IS_ERR(fence))
> +                       return PTR_ERR(fence);
> +               else if (!fence)
> +                       continue;
> +
> +               r = fence_wait_timeout(fence, true, timeout);
> +               if (r < 0)
> +                       return r;
> +
> +               if (r == 0)
> +                       break;
> +       }
> +
> +       memset(wait, 0, sizeof(*wait));
> +       wait->out.status = (r > 0);
> +
> +       return 0;
> +}
> +
> +/**
> + * amdgpu_cs_wait_any_fence - wait on any fence to signal
> + *
> + * @adev: amdgpu device
> + * @filp: file private
> + * @wait: wait parameters
> + * @fences: array of drm_amdgpu_fence
> + */
> +static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
> +                                   struct drm_file *filp,
> +                                   union drm_amdgpu_wait_fences *wait,
> +                                   struct drm_amdgpu_fence *fences)
> +{
> +       unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
> +       uint32_t fence_count = wait->in.fence_count;
> +       struct fence **array;
> +       unsigned i;
> +       long r;
> +
> +       /* Prepare the fence array */
> +       array = (struct fence **)kcalloc(fence_count, sizeof(struct fence *),
> +                       GFP_KERNEL);
> +       if (array == NULL)
> +               return -ENOMEM;
> +
> +       for (i = 0; i < fence_count; i++) {
> +               struct fence *fence;
> +
> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
> +               if (IS_ERR(fence)) {
> +                       r = PTR_ERR(fence);
> +                       goto err_free_fence_array;
> +               } else if (fence) {
> +                       array[i] = fence;
> +               } else { /* NULL, the fence has been already signaled */
> +                       r = 1;
> +                       goto out;
> +               }
> +       }
> +
> +       r = fence_wait_any_timeout(array, fence_count, true, timeout);
> +       if (r < 0)
> +               goto err_free_fence_array;
> +
> +out:
> +       memset(wait, 0, sizeof(*wait));
> +       wait->out.status = (r > 0);
> +       /* set return value 0 to indicate success */
> +       r = 0;
> +
> +err_free_fence_array:
> +       for (i = 0; i < fence_count; i++)
> +               fence_put(array[i]);
> +       kfree(array);
> +
> +       return r;
> +}
> +
> +/**
> + * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
> + *
> + * @dev: drm device
> + * @data: data from userspace
> + * @filp: file private
> + */
> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
> +                               struct drm_file *filp)
> +{
> +       struct amdgpu_device *adev = dev->dev_private;
> +       union drm_amdgpu_wait_fences *wait = data;
> +       uint32_t fence_count = wait->in.fence_count;
> +       struct drm_amdgpu_fence *fences_user;
> +       struct drm_amdgpu_fence *fences;
> +       int r;
> +
> +       /* Get the fences from userspace */
> +       fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
> +                       GFP_KERNEL);
> +       if (fences == NULL)
> +               return -ENOMEM;
> +
> +       fences_user = (void __user *)(unsigned long)(wait->in.fences);
> +       if (copy_from_user(fences, fences_user,
> +               sizeof(struct drm_amdgpu_fence) * fence_count)) {
> +               r = -EFAULT;
> +               goto err_free_fences;
> +       }
> +
> +       if (wait->in.wait_all)
> +               r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
> +       else
> +               r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
> +
> +err_free_fences:
> +       kfree(fences);
> +
> +       return r;
> +}
> +
> +/**
>   * amdgpu_cs_find_bo_va - find bo_va for VM address
>   *
>   * @parser: command submission parser context
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index c2c7fb1..88ca1dc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -775,6 +775,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
>         DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>         DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index d6b5a21..dd9c99c 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -50,6 +50,7 @@ extern "C" {
>  #define DRM_AMDGPU_WAIT_CS             0x09
>  #define DRM_AMDGPU_GEM_OP              0x10
>  #define DRM_AMDGPU_GEM_USERPTR         0x11
> +#define DRM_AMDGPU_WAIT_FENCES         0x12
>
>  #define DRM_IOCTL_AMDGPU_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>  #define DRM_IOCTL_AMDGPU_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -63,6 +64,7 @@ extern "C" {
>  #define DRM_IOCTL_AMDGPU_WAIT_CS       DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
>  #define DRM_IOCTL_AMDGPU_GEM_OP                DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
>  #define DRM_IOCTL_AMDGPU_GEM_USERPTR   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
> +#define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>
>  #define AMDGPU_GEM_DOMAIN_CPU          0x1
>  #define AMDGPU_GEM_DOMAIN_GTT          0x2
> @@ -305,6 +307,31 @@ union drm_amdgpu_wait_cs {
>         struct drm_amdgpu_wait_cs_out out;
>  };
>
> +struct drm_amdgpu_fence {
> +       __u32 ctx_id;
> +       __u32 ip_type;
> +       __u32 ip_instance;
> +       __u32 ring;
> +       __u64 seq_no;
> +};
> +
> +struct drm_amdgpu_wait_fences_in {
> +       /** This points to uint64_t * which points to fences */
> +       __u64 fences;
> +       __u32 fence_count;
> +       __u32 wait_all;
> +       __u64 timeout_ns;
> +};
> +
> +struct drm_amdgpu_wait_fences_out {
> +       __u64 status;
> +};
> +
> +union drm_amdgpu_wait_fences {
> +       struct drm_amdgpu_wait_fences_in in;
> +       struct drm_amdgpu_wait_fences_out out;
> +};
> +
>  #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO      0
>  #define AMDGPU_GEM_OP_SET_PLACEMENT            1
>
> --
> 2.5.5
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3)
       [not found]     ` <CAP+8YyGL9C9rM0c5r5R0GuRQhQptE=3baaeLB71PLGYMJuNS+A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2016-09-12 18:45       ` Christian König
       [not found]         ` <d1d8996e-88af-4ea1-4659-e0bc8e190898-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 5+ messages in thread
From: Christian König @ 2016-09-12 18:45 UTC (permalink / raw)
  To: Bas Nieuwenhuizen, Alex Deucher
  Cc: Junwei Zhang, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Yeah, we added that later on.

Looks like Alex (or somebody else) squashed the original and follow up 
patch for libdrm together, but forgot to do so for the kernel side.

Just wait a moment, if Alex can't find that of hand I'm going to take a 
look through the branches tomorrow.

Regards,
Christian.

Am 12.09.2016 um 20:34 schrieb Bas Nieuwenhuizen:
> Seems like this is missing the update corresponding to libdrm patch
> "amdgpu: cs_wait_fences now can return the first signaled fence
> index".  Is the kernel patch for that update also available or going
> to be sent to the list?
>
> Yours sincerely,
> Bas Nieuwenhuizen
>
> On Mon, Sep 12, 2016 at 6:40 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>> From: Junwei Zhang <Jerry.Zhang@amd.com>
>>
>> v2: agd: rebase and squash in all the previous optimizations and
>> changes so everything compiles.
>> v3: squash in Slava's 32bit build fix
>>
>> Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
>> Reviewed-by: Monk Liu <monk.liu@amd.com>
>> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |   2 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 171 ++++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   1 +
>>   include/uapi/drm/amdgpu_drm.h           |  27 +++++
>>   4 files changed, 201 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 39baabe..4d6c42f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1855,6 +1855,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>>                          struct drm_file *filp);
>>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
>>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
>> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>> +                               struct drm_file *filp);
>>
>>   int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
>>                                  struct drm_file *filp);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index e29e7b9..384856c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -1105,6 +1105,177 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
>>   }
>>
>>   /**
>> + * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
>> + *
>> + * @adev: amdgpu device
>> + * @filp: file private
>> + * @user: drm_amdgpu_fence copied from user space
>> + */
>> +static struct fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
>> +                                        struct drm_file *filp,
>> +                                        struct drm_amdgpu_fence *user)
>> +{
>> +       struct amdgpu_ring *ring;
>> +       struct amdgpu_ctx *ctx;
>> +       struct fence *fence;
>> +       int r;
>> +
>> +       r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
>> +                              user->ring, &ring);
>> +       if (r)
>> +               return ERR_PTR(r);
>> +
>> +       ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
>> +       if (ctx == NULL)
>> +               return ERR_PTR(-EINVAL);
>> +
>> +       fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
>> +       amdgpu_ctx_put(ctx);
>> +
>> +       return fence;
>> +}
>> +
>> +/**
>> + * amdgpu_cs_wait_all_fence - wait on all fences to signal
>> + *
>> + * @adev: amdgpu device
>> + * @filp: file private
>> + * @wait: wait parameters
>> + * @fences: array of drm_amdgpu_fence
>> + */
>> +static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
>> +                                    struct drm_file *filp,
>> +                                    union drm_amdgpu_wait_fences *wait,
>> +                                    struct drm_amdgpu_fence *fences)
>> +{
>> +       uint32_t fence_count = wait->in.fence_count;
>> +       unsigned i;
>> +       long r = 1;
>> +
>> +       for (i = 0; i < fence_count; i++) {
>> +               struct fence *fence;
>> +               unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
>> +
>> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
>> +               if (IS_ERR(fence))
>> +                       return PTR_ERR(fence);
>> +               else if (!fence)
>> +                       continue;
>> +
>> +               r = fence_wait_timeout(fence, true, timeout);
>> +               if (r < 0)
>> +                       return r;
>> +
>> +               if (r == 0)
>> +                       break;
>> +       }
>> +
>> +       memset(wait, 0, sizeof(*wait));
>> +       wait->out.status = (r > 0);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_cs_wait_any_fence - wait on any fence to signal
>> + *
>> + * @adev: amdgpu device
>> + * @filp: file private
>> + * @wait: wait parameters
>> + * @fences: array of drm_amdgpu_fence
>> + */
>> +static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
>> +                                   struct drm_file *filp,
>> +                                   union drm_amdgpu_wait_fences *wait,
>> +                                   struct drm_amdgpu_fence *fences)
>> +{
>> +       unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
>> +       uint32_t fence_count = wait->in.fence_count;
>> +       struct fence **array;
>> +       unsigned i;
>> +       long r;
>> +
>> +       /* Prepare the fence array */
>> +       array = (struct fence **)kcalloc(fence_count, sizeof(struct fence *),
>> +                       GFP_KERNEL);
>> +       if (array == NULL)
>> +               return -ENOMEM;
>> +
>> +       for (i = 0; i < fence_count; i++) {
>> +               struct fence *fence;
>> +
>> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
>> +               if (IS_ERR(fence)) {
>> +                       r = PTR_ERR(fence);
>> +                       goto err_free_fence_array;
>> +               } else if (fence) {
>> +                       array[i] = fence;
>> +               } else { /* NULL, the fence has been already signaled */
>> +                       r = 1;
>> +                       goto out;
>> +               }
>> +       }
>> +
>> +       r = fence_wait_any_timeout(array, fence_count, true, timeout);
>> +       if (r < 0)
>> +               goto err_free_fence_array;
>> +
>> +out:
>> +       memset(wait, 0, sizeof(*wait));
>> +       wait->out.status = (r > 0);
>> +       /* set return value 0 to indicate success */
>> +       r = 0;
>> +
>> +err_free_fence_array:
>> +       for (i = 0; i < fence_count; i++)
>> +               fence_put(array[i]);
>> +       kfree(array);
>> +
>> +       return r;
>> +}
>> +
>> +/**
>> + * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
>> + *
>> + * @dev: drm device
>> + * @data: data from userspace
>> + * @filp: file private
>> + */
>> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
>> +                               struct drm_file *filp)
>> +{
>> +       struct amdgpu_device *adev = dev->dev_private;
>> +       union drm_amdgpu_wait_fences *wait = data;
>> +       uint32_t fence_count = wait->in.fence_count;
>> +       struct drm_amdgpu_fence *fences_user;
>> +       struct drm_amdgpu_fence *fences;
>> +       int r;
>> +
>> +       /* Get the fences from userspace */
>> +       fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
>> +                       GFP_KERNEL);
>> +       if (fences == NULL)
>> +               return -ENOMEM;
>> +
>> +       fences_user = (void __user *)(unsigned long)(wait->in.fences);
>> +       if (copy_from_user(fences, fences_user,
>> +               sizeof(struct drm_amdgpu_fence) * fence_count)) {
>> +               r = -EFAULT;
>> +               goto err_free_fences;
>> +       }
>> +
>> +       if (wait->in.wait_all)
>> +               r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
>> +       else
>> +               r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
>> +
>> +err_free_fences:
>> +       kfree(fences);
>> +
>> +       return r;
>> +}
>> +
>> +/**
>>    * amdgpu_cs_find_bo_va - find bo_va for VM address
>>    *
>>    * @parser: command submission parser context
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index c2c7fb1..88ca1dc 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -775,6 +775,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
>>          DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>          DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>          DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>> +       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
>> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
>> index d6b5a21..dd9c99c 100644
>> --- a/include/uapi/drm/amdgpu_drm.h
>> +++ b/include/uapi/drm/amdgpu_drm.h
>> @@ -50,6 +50,7 @@ extern "C" {
>>   #define DRM_AMDGPU_WAIT_CS             0x09
>>   #define DRM_AMDGPU_GEM_OP              0x10
>>   #define DRM_AMDGPU_GEM_USERPTR         0x11
>> +#define DRM_AMDGPU_WAIT_FENCES         0x12
>>
>>   #define DRM_IOCTL_AMDGPU_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
>>   #define DRM_IOCTL_AMDGPU_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
>> @@ -63,6 +64,7 @@ extern "C" {
>>   #define DRM_IOCTL_AMDGPU_WAIT_CS       DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
>>   #define DRM_IOCTL_AMDGPU_GEM_OP                DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
>>   #define DRM_IOCTL_AMDGPU_GEM_USERPTR   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
>> +#define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
>>
>>   #define AMDGPU_GEM_DOMAIN_CPU          0x1
>>   #define AMDGPU_GEM_DOMAIN_GTT          0x2
>> @@ -305,6 +307,31 @@ union drm_amdgpu_wait_cs {
>>          struct drm_amdgpu_wait_cs_out out;
>>   };
>>
>> +struct drm_amdgpu_fence {
>> +       __u32 ctx_id;
>> +       __u32 ip_type;
>> +       __u32 ip_instance;
>> +       __u32 ring;
>> +       __u64 seq_no;
>> +};
>> +
>> +struct drm_amdgpu_wait_fences_in {
>> +       /** This points to uint64_t * which points to fences */
>> +       __u64 fences;
>> +       __u32 fence_count;
>> +       __u32 wait_all;
>> +       __u64 timeout_ns;
>> +};
>> +
>> +struct drm_amdgpu_wait_fences_out {
>> +       __u64 status;
>> +};
>> +
>> +union drm_amdgpu_wait_fences {
>> +       struct drm_amdgpu_wait_fences_in in;
>> +       struct drm_amdgpu_wait_fences_out out;
>> +};
>> +
>>   #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO      0
>>   #define AMDGPU_GEM_OP_SET_PLACEMENT            1
>>
>> --
>> 2.5.5
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3)
       [not found]         ` <d1d8996e-88af-4ea1-4659-e0bc8e190898-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2016-09-12 18:48           ` Deucher, Alexander
       [not found]             ` <BN6PR12MB16839DB76854145B5D00E505F7FF0-/b2+HYfkarTZ9ihocuPUdQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 5+ messages in thread
From: Deucher, Alexander @ 2016-09-12 18:48 UTC (permalink / raw)
  To: 'Christian König', Bas Nieuwenhuizen, Alex Deucher
  Cc: Zhang, Jerry, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf
> Of Christian König
> Sent: Monday, September 12, 2016 2:46 PM
> To: Bas Nieuwenhuizen; Alex Deucher
> Cc: Zhang, Jerry; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: add the interface of waiting multiple
> fences (v3)
> 
> Yeah, we added that later on.
> 
> Looks like Alex (or somebody else) squashed the original and follow up
> patch for libdrm together, but forgot to do so for the kernel side.
> 
> Just wait a moment, if Alex can't find that of hand I'm going to take a
> look through the branches tomorrow.

I found it.  Looks like the commit message got messed up.  I'll fix it up and send it out shortly.

Alex

> 
> Regards,
> Christian.
> 
> Am 12.09.2016 um 20:34 schrieb Bas Nieuwenhuizen:
> > Seems like this is missing the update corresponding to libdrm patch
> > "amdgpu: cs_wait_fences now can return the first signaled fence
> > index".  Is the kernel patch for that update also available or going
> > to be sent to the list?
> >
> > Yours sincerely,
> > Bas Nieuwenhuizen
> >
> > On Mon, Sep 12, 2016 at 6:40 PM, Alex Deucher <alexdeucher@gmail.com>
> wrote:
> >> From: Junwei Zhang <Jerry.Zhang@amd.com>
> >>
> >> v2: agd: rebase and squash in all the previous optimizations and
> >> changes so everything compiles.
> >> v3: squash in Slava's 32bit build fix
> >>
> >> Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
> >> Reviewed-by: Monk Liu <monk.liu@amd.com>
> >> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
> >> ---
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |   2 +
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  | 171
> ++++++++++++++++++++++++++++++++
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   1 +
> >>   include/uapi/drm/amdgpu_drm.h           |  27 +++++
> >>   4 files changed, 201 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> >> index 39baabe..4d6c42f 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> >> @@ -1855,6 +1855,8 @@ int amdgpu_gem_op_ioctl(struct drm_device
> *dev, void *data,
> >>                          struct drm_file *filp);
> >>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file
> *filp);
> >>   int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct
> drm_file *filp);
> >> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
> >> +                               struct drm_file *filp);
> >>
> >>   int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
> >>                                  struct drm_file *filp);
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >> index e29e7b9..384856c 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >> @@ -1105,6 +1105,177 @@ int amdgpu_cs_wait_ioctl(struct drm_device
> *dev, void *data,
> >>   }
> >>
> >>   /**
> >> + * amdgpu_cs_get_fence - helper to get fence from
> drm_amdgpu_fence
> >> + *
> >> + * @adev: amdgpu device
> >> + * @filp: file private
> >> + * @user: drm_amdgpu_fence copied from user space
> >> + */
> >> +static struct fence *amdgpu_cs_get_fence(struct amdgpu_device
> *adev,
> >> +                                        struct drm_file *filp,
> >> +                                        struct drm_amdgpu_fence *user)
> >> +{
> >> +       struct amdgpu_ring *ring;
> >> +       struct amdgpu_ctx *ctx;
> >> +       struct fence *fence;
> >> +       int r;
> >> +
> >> +       r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
> >> +                              user->ring, &ring);
> >> +       if (r)
> >> +               return ERR_PTR(r);
> >> +
> >> +       ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
> >> +       if (ctx == NULL)
> >> +               return ERR_PTR(-EINVAL);
> >> +
> >> +       fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
> >> +       amdgpu_ctx_put(ctx);
> >> +
> >> +       return fence;
> >> +}
> >> +
> >> +/**
> >> + * amdgpu_cs_wait_all_fence - wait on all fences to signal
> >> + *
> >> + * @adev: amdgpu device
> >> + * @filp: file private
> >> + * @wait: wait parameters
> >> + * @fences: array of drm_amdgpu_fence
> >> + */
> >> +static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
> >> +                                    struct drm_file *filp,
> >> +                                    union drm_amdgpu_wait_fences *wait,
> >> +                                    struct drm_amdgpu_fence *fences)
> >> +{
> >> +       uint32_t fence_count = wait->in.fence_count;
> >> +       unsigned i;
> >> +       long r = 1;
> >> +
> >> +       for (i = 0; i < fence_count; i++) {
> >> +               struct fence *fence;
> >> +               unsigned long timeout = amdgpu_gem_timeout(wait-
> >in.timeout_ns);
> >> +
> >> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
> >> +               if (IS_ERR(fence))
> >> +                       return PTR_ERR(fence);
> >> +               else if (!fence)
> >> +                       continue;
> >> +
> >> +               r = fence_wait_timeout(fence, true, timeout);
> >> +               if (r < 0)
> >> +                       return r;
> >> +
> >> +               if (r == 0)
> >> +                       break;
> >> +       }
> >> +
> >> +       memset(wait, 0, sizeof(*wait));
> >> +       wait->out.status = (r > 0);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * amdgpu_cs_wait_any_fence - wait on any fence to signal
> >> + *
> >> + * @adev: amdgpu device
> >> + * @filp: file private
> >> + * @wait: wait parameters
> >> + * @fences: array of drm_amdgpu_fence
> >> + */
> >> +static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
> >> +                                   struct drm_file *filp,
> >> +                                   union drm_amdgpu_wait_fences *wait,
> >> +                                   struct drm_amdgpu_fence *fences)
> >> +{
> >> +       unsigned long timeout = amdgpu_gem_timeout(wait-
> >in.timeout_ns);
> >> +       uint32_t fence_count = wait->in.fence_count;
> >> +       struct fence **array;
> >> +       unsigned i;
> >> +       long r;
> >> +
> >> +       /* Prepare the fence array */
> >> +       array = (struct fence **)kcalloc(fence_count, sizeof(struct fence *),
> >> +                       GFP_KERNEL);
> >> +       if (array == NULL)
> >> +               return -ENOMEM;
> >> +
> >> +       for (i = 0; i < fence_count; i++) {
> >> +               struct fence *fence;
> >> +
> >> +               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
> >> +               if (IS_ERR(fence)) {
> >> +                       r = PTR_ERR(fence);
> >> +                       goto err_free_fence_array;
> >> +               } else if (fence) {
> >> +                       array[i] = fence;
> >> +               } else { /* NULL, the fence has been already signaled */
> >> +                       r = 1;
> >> +                       goto out;
> >> +               }
> >> +       }
> >> +
> >> +       r = fence_wait_any_timeout(array, fence_count, true, timeout);
> >> +       if (r < 0)
> >> +               goto err_free_fence_array;
> >> +
> >> +out:
> >> +       memset(wait, 0, sizeof(*wait));
> >> +       wait->out.status = (r > 0);
> >> +       /* set return value 0 to indicate success */
> >> +       r = 0;
> >> +
> >> +err_free_fence_array:
> >> +       for (i = 0; i < fence_count; i++)
> >> +               fence_put(array[i]);
> >> +       kfree(array);
> >> +
> >> +       return r;
> >> +}
> >> +
> >> +/**
> >> + * amdgpu_cs_wait_fences_ioctl - wait for multiple command
> submissions to finish
> >> + *
> >> + * @dev: drm device
> >> + * @data: data from userspace
> >> + * @filp: file private
> >> + */
> >> +int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
> >> +                               struct drm_file *filp)
> >> +{
> >> +       struct amdgpu_device *adev = dev->dev_private;
> >> +       union drm_amdgpu_wait_fences *wait = data;
> >> +       uint32_t fence_count = wait->in.fence_count;
> >> +       struct drm_amdgpu_fence *fences_user;
> >> +       struct drm_amdgpu_fence *fences;
> >> +       int r;
> >> +
> >> +       /* Get the fences from userspace */
> >> +       fences = kmalloc_array(fence_count, sizeof(struct
> drm_amdgpu_fence),
> >> +                       GFP_KERNEL);
> >> +       if (fences == NULL)
> >> +               return -ENOMEM;
> >> +
> >> +       fences_user = (void __user *)(unsigned long)(wait->in.fences);
> >> +       if (copy_from_user(fences, fences_user,
> >> +               sizeof(struct drm_amdgpu_fence) * fence_count)) {
> >> +               r = -EFAULT;
> >> +               goto err_free_fences;
> >> +       }
> >> +
> >> +       if (wait->in.wait_all)
> >> +               r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
> >> +       else
> >> +               r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
> >> +
> >> +err_free_fences:
> >> +       kfree(fences);
> >> +
> >> +       return r;
> >> +}
> >> +
> >> +/**
> >>    * amdgpu_cs_find_bo_va - find bo_va for VM address
> >>    *
> >>    * @parser: command submission parser context
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> >> index c2c7fb1..88ca1dc 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> >> @@ -775,6 +775,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[]
> = {
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl,
> DRM_AUTH|DRM_RENDER_ALLOW),
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl,
> DRM_AUTH|DRM_RENDER_ALLOW),
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl,
> DRM_AUTH|DRM_RENDER_ALLOW),
> >> +       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES,
> amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA,
> amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl,
> DRM_AUTH|DRM_RENDER_ALLOW),
> >>          DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl,
> DRM_AUTH|DRM_RENDER_ALLOW),
> >> diff --git a/include/uapi/drm/amdgpu_drm.h
> b/include/uapi/drm/amdgpu_drm.h
> >> index d6b5a21..dd9c99c 100644
> >> --- a/include/uapi/drm/amdgpu_drm.h
> >> +++ b/include/uapi/drm/amdgpu_drm.h
> >> @@ -50,6 +50,7 @@ extern "C" {
> >>   #define DRM_AMDGPU_WAIT_CS             0x09
> >>   #define DRM_AMDGPU_GEM_OP              0x10
> >>   #define DRM_AMDGPU_GEM_USERPTR         0x11
> >> +#define DRM_AMDGPU_WAIT_FENCES         0x12
> >>
> >>   #define DRM_IOCTL_AMDGPU_GEM_CREATE
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE,
> union drm_amdgpu_gem_create)
> >>   #define DRM_IOCTL_AMDGPU_GEM_MMAP
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union
> drm_amdgpu_gem_mmap)
> >> @@ -63,6 +64,7 @@ extern "C" {
> >>   #define DRM_IOCTL_AMDGPU_WAIT_CS
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union
> drm_amdgpu_wait_cs)
> >>   #define DRM_IOCTL_AMDGPU_GEM_OP
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct
> drm_amdgpu_gem_op)
> >>   #define DRM_IOCTL_AMDGPU_GEM_USERPTR
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR,
> struct drm_amdgpu_gem_userptr)
> >> +#define DRM_IOCTL_AMDGPU_WAIT_FENCES
> DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES,
> union drm_amdgpu_wait_fences)
> >>
> >>   #define AMDGPU_GEM_DOMAIN_CPU          0x1
> >>   #define AMDGPU_GEM_DOMAIN_GTT          0x2
> >> @@ -305,6 +307,31 @@ union drm_amdgpu_wait_cs {
> >>          struct drm_amdgpu_wait_cs_out out;
> >>   };
> >>
> >> +struct drm_amdgpu_fence {
> >> +       __u32 ctx_id;
> >> +       __u32 ip_type;
> >> +       __u32 ip_instance;
> >> +       __u32 ring;
> >> +       __u64 seq_no;
> >> +};
> >> +
> >> +struct drm_amdgpu_wait_fences_in {
> >> +       /** This points to uint64_t * which points to fences */
> >> +       __u64 fences;
> >> +       __u32 fence_count;
> >> +       __u32 wait_all;
> >> +       __u64 timeout_ns;
> >> +};
> >> +
> >> +struct drm_amdgpu_wait_fences_out {
> >> +       __u64 status;
> >> +};
> >> +
> >> +union drm_amdgpu_wait_fences {
> >> +       struct drm_amdgpu_wait_fences_in in;
> >> +       struct drm_amdgpu_wait_fences_out out;
> >> +};
> >> +
> >>   #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO      0
> >>   #define AMDGPU_GEM_OP_SET_PLACEMENT            1
> >>
> >> --
> >> 2.5.5
> >>
> >> _______________________________________________
> >> amd-gfx mailing list
> >> amd-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> 
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3)
       [not found]             ` <BN6PR12MB16839DB76854145B5D00E505F7FF0-/b2+HYfkarTZ9ihocuPUdQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2016-09-26 19:54               ` Dave Airlie
  0 siblings, 0 replies; 5+ messages in thread
From: Dave Airlie @ 2016-09-26 19:54 UTC (permalink / raw)
  To: Deucher, Alexander
  Cc: Alex Deucher, Zhang, Jerry, Christian König,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Bas Nieuwenhuizen

On 13 September 2016 at 04:48, Deucher, Alexander
<Alexander.Deucher@amd.com> wrote:
>> -----Original Message-----
>> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf
>> Of Christian König
>> Sent: Monday, September 12, 2016 2:46 PM
>> To: Bas Nieuwenhuizen; Alex Deucher
>> Cc: Zhang, Jerry; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH] drm/amdgpu: add the interface of waiting multiple
>> fences (v3)
>>
>> Yeah, we added that later on.
>>
>> Looks like Alex (or somebody else) squashed the original and follow up
>> patch for libdrm together, but forgot to do so for the kernel side.
>>
>> Just wait a moment, if Alex can't find that of hand I'm going to take a
>> look through the branches tomorrow.
>
> I found it.  Looks like the commit message got messed up.  I'll fix it up and send it out shortly.
>

Is this also missing an interface version bump so we know we can use it?

Dave.
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-09-26 19:54 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-12 16:40 [PATCH] drm/amdgpu: add the interface of waiting multiple fences (v3) Alex Deucher
     [not found] ` <1473698408-17817-1-git-send-email-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2016-09-12 18:34   ` Bas Nieuwenhuizen
     [not found]     ` <CAP+8YyGL9C9rM0c5r5R0GuRQhQptE=3baaeLB71PLGYMJuNS+A-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-09-12 18:45       ` Christian König
     [not found]         ` <d1d8996e-88af-4ea1-4659-e0bc8e190898-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2016-09-12 18:48           ` Deucher, Alexander
     [not found]             ` <BN6PR12MB16839DB76854145B5D00E505F7FF0-/b2+HYfkarTZ9ihocuPUdQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2016-09-26 19:54               ` Dave Airlie

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.