All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] High priority usermode contexts
@ 2017-01-03 22:54 Andres Rodriguez
       [not found] ` <1483484076-10424-1-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-03 22:54 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

This patch series provides the initial APIs for high priority contexts.

The current implementation is based on top of the SW scheduler, there
are no HW priorities set yet.

This doesn't provide the quality of service we need for VR. Further changes
to enable HW priorities will be required.

For some performance data collected with multiple apps see:
https://lists.freedesktop.org/archives/amd-gfx/2016-December/004257.html

Test apps available here:
https://github.com/lostgoat/Vulkan

Related libdrm/radv patches to follow
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found] ` <1483484076-10424-1-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-01-03 22:54   ` Andres Rodriguez
       [not found]     ` <1483484076-10424-2-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-03 22:54 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Andres Rodriguez

Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag
results in the allocated context receiving a higher scheduler priority
that other contexts system-wide.

Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24 ++++++++++++++++++------
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
 include/uapi/drm/amdgpu_drm.h                 |  3 ++-
 3 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 400c66b..ce470e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,11 +25,17 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority, struct amdgpu_ctx *ctx)
 {
 	unsigned i, j;
 	int r;
 
+	if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
+		return -EINVAL;
+
+	if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	memset(ctx, 0, sizeof(*ctx));
 	ctx->adev = adev;
 	kref_init(&ctx->refcount);
@@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		struct amdgpu_ring *ring = adev->rings[i];
 		struct amd_sched_rq *rq;
 
-		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+		rq = &ring->sched.sched_rq[priority];
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
@@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 			    struct amdgpu_fpriv *fpriv,
+			    int flags,
 			    uint32_t *id)
 {
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 	struct amdgpu_ctx *ctx;
-	int r;
+	int r, priority;
+
+	priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
+		AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 		kfree(ctx);
 		return r;
 	}
+
 	*id = (uint32_t)r;
-	r = amdgpu_ctx_init(adev, ctx);
+	r = amdgpu_ctx_init(adev, priority, ctx);
 	if (r) {
 		idr_remove(&mgr->ctx_handles, *id);
 		*id = 0;
@@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
 	int r;
-	uint32_t id;
+	uint32_t id, flags;
 
 	union drm_amdgpu_ctx *args = data;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 
 	r = 0;
 	id = args->in.ctx_id;
+	flags = args->in.flags;
 
 	switch (args->in.op) {
 	case AMDGPU_CTX_OP_ALLOC_CTX:
-		r = amdgpu_ctx_alloc(adev, fpriv, &id);
+		r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
 		args->out.alloc.ctx_id = id;
 		break;
 	case AMDGPU_CTX_OP_FREE_CTX:
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index d8dc681..2e458de 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
 
 enum amd_sched_priority {
 	AMD_SCHED_PRIORITY_KERNEL = 0,
+	AMD_SCHED_PRIORITY_HIGH,
 	AMD_SCHED_PRIORITY_NORMAL,
 	AMD_SCHED_MAX_PRIORITY
 };
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 3961836..702332e 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
 /* unknown cause */
 #define AMDGPU_CTX_UNKNOWN_RESET	3
 
+#define AMDGPU_CTX_FLAG_HIGHPRIORITY	(1 << 0)
 struct drm_amdgpu_ctx_in {
 	/** AMDGPU_CTX_OP_* */
 	__u32	op;
-	/** For future use, no flags defined so far */
+	/** AMDGPU_CTX_FLAG_* */
 	__u32	flags;
 	__u32	ctx_id;
 	__u32	_pad;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]     ` <1483484076-10424-2-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-01-03 22:59       ` Alex Deucher
       [not found]         ` <CADnq5_Mt_-61h6DG77kqepp5psS2JGHJOA-mek513s5HgvtTDA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2017-01-04 11:03       ` Sagalovitch, Serguei
  2017-01-04 11:54       ` Mao, David
  2 siblings, 1 reply; 14+ messages in thread
From: Alex Deucher @ 2017-01-03 22:59 UTC (permalink / raw)
  To: Andres Rodriguez; +Cc: amd-gfx list

On Tue, Jan 3, 2017 at 5:54 PM, Andres Rodriguez <andresx7@gmail.com> wrote:
> Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag
> results in the allocated context receiving a higher scheduler priority
> that other contexts system-wide.
>
> Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24 ++++++++++++++++++------
>  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
>  include/uapi/drm/amdgpu_drm.h                 |  3 ++-
>  3 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 400c66b..ce470e2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -25,11 +25,17 @@
>  #include <drm/drmP.h>
>  #include "amdgpu.h"
>
> -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
> +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority, struct amdgpu_ctx *ctx)
>  {
>         unsigned i, j;
>         int r;
>
> +       if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
> +               return -EINVAL;
> +
> +       if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
> +               return -EACCES;
> +
>         memset(ctx, 0, sizeof(*ctx));
>         ctx->adev = adev;
>         kref_init(&ctx->refcount);
> @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
>                 struct amdgpu_ring *ring = adev->rings[i];
>                 struct amd_sched_rq *rq;
>
> -               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
> +               rq = &ring->sched.sched_rq[priority];
>                 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
>                                           rq, amdgpu_sched_jobs);
>                 if (r)
> @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
>
>  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>                             struct amdgpu_fpriv *fpriv,
> +                           int flags,
>                             uint32_t *id)
>  {
>         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
>         struct amdgpu_ctx *ctx;
> -       int r;
> +       int r, priority;
> +
> +       priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
> +               AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
>
>         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>         if (!ctx)
> @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>                 kfree(ctx);
>                 return r;
>         }
> +
>         *id = (uint32_t)r;
> -       r = amdgpu_ctx_init(adev, ctx);
> +       r = amdgpu_ctx_init(adev, priority, ctx);
>         if (r) {
>                 idr_remove(&mgr->ctx_handles, *id);
>                 *id = 0;
> @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>                      struct drm_file *filp)
>  {
>         int r;
> -       uint32_t id;
> +       uint32_t id, flags;
>
>         union drm_amdgpu_ctx *args = data;
>         struct amdgpu_device *adev = dev->dev_private;
> @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>
>         r = 0;
>         id = args->in.ctx_id;
> +       flags = args->in.flags;
>
>         switch (args->in.op) {
>         case AMDGPU_CTX_OP_ALLOC_CTX:
> -               r = amdgpu_ctx_alloc(adev, fpriv, &id);
> +               r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
>                 args->out.alloc.ctx_id = id;
>                 break;
>         case AMDGPU_CTX_OP_FREE_CTX:
> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> index d8dc681..2e458de 100644
> --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
>
>  enum amd_sched_priority {
>         AMD_SCHED_PRIORITY_KERNEL = 0,
> +       AMD_SCHED_PRIORITY_HIGH,
>         AMD_SCHED_PRIORITY_NORMAL,
>         AMD_SCHED_MAX_PRIORITY
>  };
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 3961836..702332e 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
>  /* unknown cause */
>  #define AMDGPU_CTX_UNKNOWN_RESET       3
>
> +#define AMDGPU_CTX_FLAG_HIGHPRIORITY   (1 << 0)

Would it be better to expose a priority level rather than a single
flag?  If we want to expose more than just normal/high in the future
for example.

Alex

>  struct drm_amdgpu_ctx_in {
>         /** AMDGPU_CTX_OP_* */
>         __u32   op;
> -       /** For future use, no flags defined so far */
> +       /** AMDGPU_CTX_FLAG_* */
>         __u32   flags;
>         __u32   ctx_id;
>         __u32   _pad;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]         ` <CADnq5_Mt_-61h6DG77kqepp5psS2JGHJOA-mek513s5HgvtTDA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-01-03 23:00           ` Andres Rodriguez
  2017-01-03 23:02             ` Alex Deucher
  0 siblings, 1 reply; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-03 23:00 UTC (permalink / raw)
  To: Alex Deucher; +Cc: amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 5872 bytes --]

I was thinking of that originally, but the allocation context already has a
flags variable which allows us to preserve the IOCTL ABI.

We could reserve a few bits of the flags for a priority level instead if
that sounds good?

Regards,
Andres

On Tue, Jan 3, 2017 at 5:59 PM, Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> On Tue, Jan 3, 2017 at 5:54 PM, Andres Rodriguez <andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> wrote:
> > Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag
> > results in the allocated context receiving a higher scheduler priority
> > that other contexts system-wide.
> >
> > Signed-off-by: Andres Rodriguez <andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24
> ++++++++++++++++++------
> >  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
> >  include/uapi/drm/amdgpu_drm.h                 |  3 ++-
> >  3 files changed, 21 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > index 400c66b..ce470e2 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > @@ -25,11 +25,17 @@
> >  #include <drm/drmP.h>
> >  #include "amdgpu.h"
> >
> > -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct
> amdgpu_ctx *ctx)
> > +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority,
> struct amdgpu_ctx *ctx)
> >  {
> >         unsigned i, j;
> >         int r;
> >
> > +       if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
> > +               return -EINVAL;
> > +
> > +       if (priority == AMD_SCHED_PRIORITY_HIGH &&
> !capable(CAP_SYS_ADMIN))
> > +               return -EACCES;
> > +
> >         memset(ctx, 0, sizeof(*ctx));
> >         ctx->adev = adev;
> >         kref_init(&ctx->refcount);
> > @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
> struct amdgpu_ctx *ctx)
> >                 struct amdgpu_ring *ring = adev->rings[i];
> >                 struct amd_sched_rq *rq;
> >
> > -               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
> > +               rq = &ring->sched.sched_rq[priority];
> >                 r = amd_sched_entity_init(&ring->sched,
> &ctx->rings[i].entity,
> >                                           rq, amdgpu_sched_jobs);
> >                 if (r)
> > @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
> >
> >  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
> >                             struct amdgpu_fpriv *fpriv,
> > +                           int flags,
> >                             uint32_t *id)
> >  {
> >         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
> >         struct amdgpu_ctx *ctx;
> > -       int r;
> > +       int r, priority;
> > +
> > +       priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
> > +               AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
> >
> >         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> >         if (!ctx)
> > @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device
> *adev,
> >                 kfree(ctx);
> >                 return r;
> >         }
> > +
> >         *id = (uint32_t)r;
> > -       r = amdgpu_ctx_init(adev, ctx);
> > +       r = amdgpu_ctx_init(adev, priority, ctx);
> >         if (r) {
> >                 idr_remove(&mgr->ctx_handles, *id);
> >                 *id = 0;
> > @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void
> *data,
> >                      struct drm_file *filp)
> >  {
> >         int r;
> > -       uint32_t id;
> > +       uint32_t id, flags;
> >
> >         union drm_amdgpu_ctx *args = data;
> >         struct amdgpu_device *adev = dev->dev_private;
> > @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void
> *data,
> >
> >         r = 0;
> >         id = args->in.ctx_id;
> > +       flags = args->in.flags;
> >
> >         switch (args->in.op) {
> >         case AMDGPU_CTX_OP_ALLOC_CTX:
> > -               r = amdgpu_ctx_alloc(adev, fpriv, &id);
> > +               r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
> >                 args->out.alloc.ctx_id = id;
> >                 break;
> >         case AMDGPU_CTX_OP_FREE_CTX:
> > diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> > index d8dc681..2e458de 100644
> > --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> > +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> > @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
> >
> >  enum amd_sched_priority {
> >         AMD_SCHED_PRIORITY_KERNEL = 0,
> > +       AMD_SCHED_PRIORITY_HIGH,
> >         AMD_SCHED_PRIORITY_NORMAL,
> >         AMD_SCHED_MAX_PRIORITY
> >  };
> > diff --git a/include/uapi/drm/amdgpu_drm.h
> b/include/uapi/drm/amdgpu_drm.h
> > index 3961836..702332e 100644
> > --- a/include/uapi/drm/amdgpu_drm.h
> > +++ b/include/uapi/drm/amdgpu_drm.h
> > @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
> >  /* unknown cause */
> >  #define AMDGPU_CTX_UNKNOWN_RESET       3
> >
> > +#define AMDGPU_CTX_FLAG_HIGHPRIORITY   (1 << 0)
>
> Would it be better to expose a priority level rather than a single
> flag?  If we want to expose more than just normal/high in the future
> for example.
>
> Alex
>
> >  struct drm_amdgpu_ctx_in {
> >         /** AMDGPU_CTX_OP_* */
> >         __u32   op;
> > -       /** For future use, no flags defined so far */
> > +       /** AMDGPU_CTX_FLAG_* */
> >         __u32   flags;
> >         __u32   ctx_id;
> >         __u32   _pad;
> > --
> > 2.7.4
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

[-- Attachment #1.2: Type: text/html, Size: 8067 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
  2017-01-03 23:00           ` Andres Rodriguez
@ 2017-01-03 23:02             ` Alex Deucher
       [not found]               ` <CADnq5_Mu4t4i9prGhZakMdd3cFB5NRWE6zh0zFp05T1Z_WONmQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Alex Deucher @ 2017-01-03 23:02 UTC (permalink / raw)
  To: Andres Rodriguez; +Cc: amd-gfx list

On Tue, Jan 3, 2017 at 6:00 PM, Andres Rodriguez <andresx7@gmail.com> wrote:
> I was thinking of that originally, but the allocation context already has a
> flags variable which allows us to preserve the IOCTL ABI.
>
> We could reserve a few bits of the flags for a priority level instead if
> that sounds good?

We can also use _pad for something else.

Alex

>
> Regards,
> Andres
>
> On Tue, Jan 3, 2017 at 5:59 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>
>> On Tue, Jan 3, 2017 at 5:54 PM, Andres Rodriguez <andresx7@gmail.com>
>> wrote:
>> > Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag
>> > results in the allocated context receiving a higher scheduler priority
>> > that other contexts system-wide.
>> >
>> > Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
>> > ---
>> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24
>> > ++++++++++++++++++------
>> >  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
>> >  include/uapi/drm/amdgpu_drm.h                 |  3 ++-
>> >  3 files changed, 21 insertions(+), 7 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> > index 400c66b..ce470e2 100644
>> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> > @@ -25,11 +25,17 @@
>> >  #include <drm/drmP.h>
>> >  #include "amdgpu.h"
>> >
>> > -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct
>> > amdgpu_ctx *ctx)
>> > +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority,
>> > struct amdgpu_ctx *ctx)
>> >  {
>> >         unsigned i, j;
>> >         int r;
>> >
>> > +       if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
>> > +               return -EINVAL;
>> > +
>> > +       if (priority == AMD_SCHED_PRIORITY_HIGH &&
>> > !capable(CAP_SYS_ADMIN))
>> > +               return -EACCES;
>> > +
>> >         memset(ctx, 0, sizeof(*ctx));
>> >         ctx->adev = adev;
>> >         kref_init(&ctx->refcount);
>> > @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>> > struct amdgpu_ctx *ctx)
>> >                 struct amdgpu_ring *ring = adev->rings[i];
>> >                 struct amd_sched_rq *rq;
>> >
>> > -               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
>> > +               rq = &ring->sched.sched_rq[priority];
>> >                 r = amd_sched_entity_init(&ring->sched,
>> > &ctx->rings[i].entity,
>> >                                           rq, amdgpu_sched_jobs);
>> >                 if (r)
>> > @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
>> >
>> >  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>> >                             struct amdgpu_fpriv *fpriv,
>> > +                           int flags,
>> >                             uint32_t *id)
>> >  {
>> >         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
>> >         struct amdgpu_ctx *ctx;
>> > -       int r;
>> > +       int r, priority;
>> > +
>> > +       priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
>> > +               AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
>> >
>> >         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>> >         if (!ctx)
>> > @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device
>> > *adev,
>> >                 kfree(ctx);
>> >                 return r;
>> >         }
>> > +
>> >         *id = (uint32_t)r;
>> > -       r = amdgpu_ctx_init(adev, ctx);
>> > +       r = amdgpu_ctx_init(adev, priority, ctx);
>> >         if (r) {
>> >                 idr_remove(&mgr->ctx_handles, *id);
>> >                 *id = 0;
>> > @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void
>> > *data,
>> >                      struct drm_file *filp)
>> >  {
>> >         int r;
>> > -       uint32_t id;
>> > +       uint32_t id, flags;
>> >
>> >         union drm_amdgpu_ctx *args = data;
>> >         struct amdgpu_device *adev = dev->dev_private;
>> > @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void
>> > *data,
>> >
>> >         r = 0;
>> >         id = args->in.ctx_id;
>> > +       flags = args->in.flags;
>> >
>> >         switch (args->in.op) {
>> >         case AMDGPU_CTX_OP_ALLOC_CTX:
>> > -               r = amdgpu_ctx_alloc(adev, fpriv, &id);
>> > +               r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
>> >                 args->out.alloc.ctx_id = id;
>> >                 break;
>> >         case AMDGPU_CTX_OP_FREE_CTX:
>> > diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
>> > b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
>> > index d8dc681..2e458de 100644
>> > --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
>> > +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
>> > @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
>> >
>> >  enum amd_sched_priority {
>> >         AMD_SCHED_PRIORITY_KERNEL = 0,
>> > +       AMD_SCHED_PRIORITY_HIGH,
>> >         AMD_SCHED_PRIORITY_NORMAL,
>> >         AMD_SCHED_MAX_PRIORITY
>> >  };
>> > diff --git a/include/uapi/drm/amdgpu_drm.h
>> > b/include/uapi/drm/amdgpu_drm.h
>> > index 3961836..702332e 100644
>> > --- a/include/uapi/drm/amdgpu_drm.h
>> > +++ b/include/uapi/drm/amdgpu_drm.h
>> > @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
>> >  /* unknown cause */
>> >  #define AMDGPU_CTX_UNKNOWN_RESET       3
>> >
>> > +#define AMDGPU_CTX_FLAG_HIGHPRIORITY   (1 << 0)
>>
>> Would it be better to expose a priority level rather than a single
>> flag?  If we want to expose more than just normal/high in the future
>> for example.
>>
>> Alex
>>
>> >  struct drm_amdgpu_ctx_in {
>> >         /** AMDGPU_CTX_OP_* */
>> >         __u32   op;
>> > -       /** For future use, no flags defined so far */
>> > +       /** AMDGPU_CTX_FLAG_* */
>> >         __u32   flags;
>> >         __u32   ctx_id;
>> >         __u32   _pad;
>> > --
>> > 2.7.4
>> >
>> > _______________________________________________
>> > amd-gfx mailing list
>> > amd-gfx@lists.freedesktop.org
>> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]               ` <CADnq5_Mu4t4i9prGhZakMdd3cFB5NRWE6zh0zFp05T1Z_WONmQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2017-01-03 23:09                 ` Andres Rodriguez
  0 siblings, 0 replies; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-03 23:09 UTC (permalink / raw)
  To: Alex Deucher; +Cc: amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 7207 bytes --]

Cool. Follow up question then:

We can either:
1. Expose a priority number
2. Expose a priority enum, e.g. PRIORITY_HIGH, PRIORITY_NORMAL (to start)

The HW can do numerical priorities, but the SW scheduler would require a
bit of rework to consume arbitrary priorities.

At least for our use case, I don't really see a need for super fine-grained
priorities. Something like HIGH/NORMAL is sufficient, with possibility of
expanding to add REALTIME/LOW. So I think going for an enum is
simpler/better.

yay or nay on enum?


On Tue, Jan 3, 2017 at 6:02 PM, Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> On Tue, Jan 3, 2017 at 6:00 PM, Andres Rodriguez <andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> wrote:
> > I was thinking of that originally, but the allocation context already
> has a
> > flags variable which allows us to preserve the IOCTL ABI.
> >
> > We could reserve a few bits of the flags for a priority level instead if
> > that sounds good?
>
> We can also use _pad for something else.
>
> Alex
>
> >
> > Regards,
> > Andres
> >
> > On Tue, Jan 3, 2017 at 5:59 PM, Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> wrote:
> >>
> >> On Tue, Jan 3, 2017 at 5:54 PM, Andres Rodriguez <andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> >> wrote:
> >> > Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This
> flag
> >> > results in the allocated context receiving a higher scheduler priority
> >> > that other contexts system-wide.
> >> >
> >> > Signed-off-by: Andres Rodriguez <andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> >> > ---
> >> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24
> >> > ++++++++++++++++++------
> >> >  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
> >> >  include/uapi/drm/amdgpu_drm.h                 |  3 ++-
> >> >  3 files changed, 21 insertions(+), 7 deletions(-)
> >> >
> >> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> >> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> >> > index 400c66b..ce470e2 100644
> >> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> >> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> >> > @@ -25,11 +25,17 @@
> >> >  #include <drm/drmP.h>
> >> >  #include "amdgpu.h"
> >> >
> >> > -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct
> >> > amdgpu_ctx *ctx)
> >> > +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority,
> >> > struct amdgpu_ctx *ctx)
> >> >  {
> >> >         unsigned i, j;
> >> >         int r;
> >> >
> >> > +       if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
> >> > +               return -EINVAL;
> >> > +
> >> > +       if (priority == AMD_SCHED_PRIORITY_HIGH &&
> >> > !capable(CAP_SYS_ADMIN))
> >> > +               return -EACCES;
> >> > +
> >> >         memset(ctx, 0, sizeof(*ctx));
> >> >         ctx->adev = adev;
> >> >         kref_init(&ctx->refcount);
> >> > @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device
> *adev,
> >> > struct amdgpu_ctx *ctx)
> >> >                 struct amdgpu_ring *ring = adev->rings[i];
> >> >                 struct amd_sched_rq *rq;
> >> >
> >> > -               rq = &ring->sched.sched_rq[AMD_
> SCHED_PRIORITY_NORMAL];
> >> > +               rq = &ring->sched.sched_rq[priority];
> >> >                 r = amd_sched_entity_init(&ring->sched,
> >> > &ctx->rings[i].entity,
> >> >                                           rq, amdgpu_sched_jobs);
> >> >                 if (r)
> >> > @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx
> *ctx)
> >> >
> >> >  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
> >> >                             struct amdgpu_fpriv *fpriv,
> >> > +                           int flags,
> >> >                             uint32_t *id)
> >> >  {
> >> >         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
> >> >         struct amdgpu_ctx *ctx;
> >> > -       int r;
> >> > +       int r, priority;
> >> > +
> >> > +       priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
> >> > +               AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
> >> >
> >> >         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> >> >         if (!ctx)
> >> > @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device
> >> > *adev,
> >> >                 kfree(ctx);
> >> >                 return r;
> >> >         }
> >> > +
> >> >         *id = (uint32_t)r;
> >> > -       r = amdgpu_ctx_init(adev, ctx);
> >> > +       r = amdgpu_ctx_init(adev, priority, ctx);
> >> >         if (r) {
> >> >                 idr_remove(&mgr->ctx_handles, *id);
> >> >                 *id = 0;
> >> > @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void
> >> > *data,
> >> >                      struct drm_file *filp)
> >> >  {
> >> >         int r;
> >> > -       uint32_t id;
> >> > +       uint32_t id, flags;
> >> >
> >> >         union drm_amdgpu_ctx *args = data;
> >> >         struct amdgpu_device *adev = dev->dev_private;
> >> > @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev,
> void
> >> > *data,
> >> >
> >> >         r = 0;
> >> >         id = args->in.ctx_id;
> >> > +       flags = args->in.flags;
> >> >
> >> >         switch (args->in.op) {
> >> >         case AMDGPU_CTX_OP_ALLOC_CTX:
> >> > -               r = amdgpu_ctx_alloc(adev, fpriv, &id);
> >> > +               r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
> >> >                 args->out.alloc.ctx_id = id;
> >> >                 break;
> >> >         case AMDGPU_CTX_OP_FREE_CTX:
> >> > diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> >> > b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> >> > index d8dc681..2e458de 100644
> >> > --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> >> > +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> >> > @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
> >> >
> >> >  enum amd_sched_priority {
> >> >         AMD_SCHED_PRIORITY_KERNEL = 0,
> >> > +       AMD_SCHED_PRIORITY_HIGH,
> >> >         AMD_SCHED_PRIORITY_NORMAL,
> >> >         AMD_SCHED_MAX_PRIORITY
> >> >  };
> >> > diff --git a/include/uapi/drm/amdgpu_drm.h
> >> > b/include/uapi/drm/amdgpu_drm.h
> >> > index 3961836..702332e 100644
> >> > --- a/include/uapi/drm/amdgpu_drm.h
> >> > +++ b/include/uapi/drm/amdgpu_drm.h
> >> > @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
> >> >  /* unknown cause */
> >> >  #define AMDGPU_CTX_UNKNOWN_RESET       3
> >> >
> >> > +#define AMDGPU_CTX_FLAG_HIGHPRIORITY   (1 << 0)
> >>
> >> Would it be better to expose a priority level rather than a single
> >> flag?  If we want to expose more than just normal/high in the future
> >> for example.
> >>
> >> Alex
> >>
> >> >  struct drm_amdgpu_ctx_in {
> >> >         /** AMDGPU_CTX_OP_* */
> >> >         __u32   op;
> >> > -       /** For future use, no flags defined so far */
> >> > +       /** AMDGPU_CTX_FLAG_* */
> >> >         __u32   flags;
> >> >         __u32   ctx_id;
> >> >         __u32   _pad;
> >> > --
> >> > 2.7.4
> >> >
> >> > _______________________________________________
> >> > amd-gfx mailing list
> >> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> >> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> >
> >
>

[-- Attachment #1.2: Type: text/html, Size: 10718 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]     ` <1483484076-10424-2-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2017-01-03 22:59       ` Alex Deucher
@ 2017-01-04 11:03       ` Sagalovitch, Serguei
       [not found]         ` <SN1PR12MB07031776F52022AF4D659AA6FE610-z7L1TMIYDg6P/i5UxMCIqAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2017-01-04 11:54       ` Mao, David
  2 siblings, 1 reply; 14+ messages in thread
From: Sagalovitch, Serguei @ 2017-01-04 11:03 UTC (permalink / raw)
  To: Andres Rodriguez, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Andres,

I have on rather generic design question:

Why we want to restrict it to CAP_SYS_ADMIN?

+       if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
+               return -EACCES;

Should we make it generic? My understanding is that If we follow "nice" semantic
then it will not require such privilege. 

Sincerely yours,
Serguei Sagalovitch

    
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]     ` <1483484076-10424-2-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2017-01-03 22:59       ` Alex Deucher
  2017-01-04 11:03       ` Sagalovitch, Serguei
@ 2017-01-04 11:54       ` Mao, David
       [not found]         ` <BN4PR12MB07876FAB038319F8E263B0CCEE610-aH9FTdWx9BancvD3hK8fMAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2 siblings, 1 reply; 14+ messages in thread
From: Mao, David @ 2017-01-04 11:54 UTC (permalink / raw)
  To: Andres Rodriguez, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi Andres,
I did not follow the previous discussion, so please remind me if my questions have been covered already~
- The priority should be the queue properties, and do we really want to expose high priority on none-compute queue?
- Another question is we may need to do per submission priority tweak if we don't reserve one compute queue ahead of time. 
From the patch, it seems you only tweak the GPU scheduler's priority, but I think it is still insufficient given we don't have OS preemption supported,
 
Thanks. 
Best Regards,
David

-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Andres Rodriguez
Sent: Wednesday, January 4, 2017 6:55 AM
To: amd-gfx@lists.freedesktop.org
Cc: Andres Rodriguez <andresx7@gmail.com>
Subject: [PATCH] drm/amdgpu: add flag for high priority contexts

Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag results in the allocated context receiving a higher scheduler priority that other contexts system-wide.

Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24 ++++++++++++++++++------
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
 include/uapi/drm/amdgpu_drm.h                 |  3 ++-
 3 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 400c66b..ce470e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,11 +25,17 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority, 
+struct amdgpu_ctx *ctx)
 {
 	unsigned i, j;
 	int r;
 
+	if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
+		return -EINVAL;
+
+	if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	memset(ctx, 0, sizeof(*ctx));
 	ctx->adev = adev;
 	kref_init(&ctx->refcount);
@@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		struct amdgpu_ring *ring = adev->rings[i];
 		struct amd_sched_rq *rq;
 
-		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+		rq = &ring->sched.sched_rq[priority];
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
@@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 			    struct amdgpu_fpriv *fpriv,
+			    int flags,
 			    uint32_t *id)
 {
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 	struct amdgpu_ctx *ctx;
-	int r;
+	int r, priority;
+
+	priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
+		AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 		kfree(ctx);
 		return r;
 	}
+
 	*id = (uint32_t)r;
-	r = amdgpu_ctx_init(adev, ctx);
+	r = amdgpu_ctx_init(adev, priority, ctx);
 	if (r) {
 		idr_remove(&mgr->ctx_handles, *id);
 		*id = 0;
@@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
 	int r;
-	uint32_t id;
+	uint32_t id, flags;
 
 	union drm_amdgpu_ctx *args = data;
 	struct amdgpu_device *adev = dev->dev_private; @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 
 	r = 0;
 	id = args->in.ctx_id;
+	flags = args->in.flags;
 
 	switch (args->in.op) {
 	case AMDGPU_CTX_OP_ALLOC_CTX:
-		r = amdgpu_ctx_alloc(adev, fpriv, &id);
+		r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
 		args->out.alloc.ctx_id = id;
 		break;
 	case AMDGPU_CTX_OP_FREE_CTX:
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index d8dc681..2e458de 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
 
 enum amd_sched_priority {
 	AMD_SCHED_PRIORITY_KERNEL = 0,
+	AMD_SCHED_PRIORITY_HIGH,
 	AMD_SCHED_PRIORITY_NORMAL,
 	AMD_SCHED_MAX_PRIORITY
 };
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3961836..702332e 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
 /* unknown cause */
 #define AMDGPU_CTX_UNKNOWN_RESET	3
 
+#define AMDGPU_CTX_FLAG_HIGHPRIORITY	(1 << 0)
 struct drm_amdgpu_ctx_in {
 	/** AMDGPU_CTX_OP_* */
 	__u32	op;
-	/** For future use, no flags defined so far */
+	/** AMDGPU_CTX_FLAG_* */
 	__u32	flags;
 	__u32	ctx_id;
 	__u32	_pad;
--
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]         ` <SN1PR12MB07031776F52022AF4D659AA6FE610-z7L1TMIYDg6P/i5UxMCIqAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2017-01-04 12:56           ` Christian König
       [not found]             ` <19988121-36b8-00e4-afc9-bb9f8a0621e4-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Christian König @ 2017-01-04 12:56 UTC (permalink / raw)
  To: Sagalovitch, Serguei, Andres Rodriguez,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 04.01.2017 um 12:03 schrieb Sagalovitch, Serguei:
> Andres,
>
> I have on rather generic design question:
>
> Why we want to restrict it to CAP_SYS_ADMIN?
>
> +       if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
> +               return -EACCES;
>
> Should we make it generic? My understanding is that If we follow "nice" semantic
> then it will not require such privilege.

Well it follows the "nice" semantic, the the documentation of the nice 
system call:

        nice()  adds inc to the nice value for the calling process. (A 
higher nice value means a low priority.)  Only the superuser may specify 
a negative increment, or priority increase.  The range
        for nice values is described in getpriority(2).

Of course the nice limit is more fine grained these days. IIRC it was a 
soft resource limit now the last time I looked.

We would essentially need something similar for the GPU if we want to 
allow a regular process to get a higher priority.

Regards,
Christian.

>
> Sincerely yours,
> Serguei Sagalovitch
>
>      
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]         ` <BN4PR12MB07876FAB038319F8E263B0CCEE610-aH9FTdWx9BancvD3hK8fMAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2017-01-04 20:51           ` Andres Rodriguez
       [not found]             ` <edb8b730-35ce-1df2-4919-39c868224b8d-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-04 20:51 UTC (permalink / raw)
  To: Mao, David, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi David,


On 2017-01-04 06:54 AM, Mao, David wrote:
> Hi Andres,
> I did not follow the previous discussion, so please remind me if my questions have been covered already~
> - The priority should be the queue properties, and do we really want to expose high priority on none-compute queue?
I exposing the concept across all queues is good for consistency. It 
will probably end up staying as a SW-scheduler feature for all queues 
except for compute.

However, if we do end up getting some HW features that enable priority 
support on other engines, then the API will be ready, and we'll only 
need a kernel side change to enable the support.

> - Another question is we may need to do per submission priority tweak if we don't reserve one compute queue ahead of time.
>  From the patch, it seems you only tweak the GPU scheduler's priority, but I think it is still insufficient given we don't have OS preemption supported,
Correct, the current patch is just to get an environment in place to 
start doing measurements. With this framework in place we can start 
adding HW support to increase the priorities.

Dynamic tweaking might be necessary depending on how CU reservation is 
going to work.

>   
> Thanks.
> Best Regards,
> David
>
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Andres Rodriguez
> Sent: Wednesday, January 4, 2017 6:55 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Andres Rodriguez <andresx7@gmail.com>
> Subject: [PATCH] drm/amdgpu: add flag for high priority contexts
>
> Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag results in the allocated context receiving a higher scheduler priority that other contexts system-wide.
>
> Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24 ++++++++++++++++++------
>   drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
>   include/uapi/drm/amdgpu_drm.h                 |  3 ++-
>   3 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 400c66b..ce470e2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -25,11 +25,17 @@
>   #include <drm/drmP.h>
>   #include "amdgpu.h"
>   
> -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
> +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority,
> +struct amdgpu_ctx *ctx)
>   {
>   	unsigned i, j;
>   	int r;
>   
> +	if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
> +		return -EINVAL;
> +
> +	if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
> +		return -EACCES;
> +
>   	memset(ctx, 0, sizeof(*ctx));
>   	ctx->adev = adev;
>   	kref_init(&ctx->refcount);
> @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
>   		struct amdgpu_ring *ring = adev->rings[i];
>   		struct amd_sched_rq *rq;
>   
> -		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
> +		rq = &ring->sched.sched_rq[priority];
>   		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
>   					  rq, amdgpu_sched_jobs);
>   		if (r)
> @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
>   
>   static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>   			    struct amdgpu_fpriv *fpriv,
> +			    int flags,
>   			    uint32_t *id)
>   {
>   	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
>   	struct amdgpu_ctx *ctx;
> -	int r;
> +	int r, priority;
> +
> +	priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
> +		AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
>   
>   	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>   	if (!ctx)
> @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>   		kfree(ctx);
>   		return r;
>   	}
> +
>   	*id = (uint32_t)r;
> -	r = amdgpu_ctx_init(adev, ctx);
> +	r = amdgpu_ctx_init(adev, priority, ctx);
>   	if (r) {
>   		idr_remove(&mgr->ctx_handles, *id);
>   		*id = 0;
> @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>   		     struct drm_file *filp)
>   {
>   	int r;
> -	uint32_t id;
> +	uint32_t id, flags;
>   
>   	union drm_amdgpu_ctx *args = data;
>   	struct amdgpu_device *adev = dev->dev_private; @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>   
>   	r = 0;
>   	id = args->in.ctx_id;
> +	flags = args->in.flags;
>   
>   	switch (args->in.op) {
>   	case AMDGPU_CTX_OP_ALLOC_CTX:
> -		r = amdgpu_ctx_alloc(adev, fpriv, &id);
> +		r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
>   		args->out.alloc.ctx_id = id;
>   		break;
>   	case AMDGPU_CTX_OP_FREE_CTX:
> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> index d8dc681..2e458de 100644
> --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
>   
>   enum amd_sched_priority {
>   	AMD_SCHED_PRIORITY_KERNEL = 0,
> +	AMD_SCHED_PRIORITY_HIGH,
>   	AMD_SCHED_PRIORITY_NORMAL,
>   	AMD_SCHED_MAX_PRIORITY
>   };
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3961836..702332e 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
>   /* unknown cause */
>   #define AMDGPU_CTX_UNKNOWN_RESET	3
>   
> +#define AMDGPU_CTX_FLAG_HIGHPRIORITY	(1 << 0)
>   struct drm_amdgpu_ctx_in {
>   	/** AMDGPU_CTX_OP_* */
>   	__u32	op;
> -	/** For future use, no flags defined so far */
> +	/** AMDGPU_CTX_FLAG_* */
>   	__u32	flags;
>   	__u32	ctx_id;
>   	__u32	_pad;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]             ` <19988121-36b8-00e4-afc9-bb9f8a0621e4-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-01-04 20:56               ` Andres Rodriguez
       [not found]                 ` <b4f3b2df-5b32-b7c4-9289-acf3111f6fdf-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-04 20:56 UTC (permalink / raw)
  To: Christian König, Sagalovitch, Serguei,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hey Serguei


On 2017-01-04 07:56 AM, Christian König wrote:
> Am 04.01.2017 um 12:03 schrieb Sagalovitch, Serguei:
>> Andres,
>>
>> I have on rather generic design question:
>>
>> Why we want to restrict it to CAP_SYS_ADMIN?
>>
>> +       if (priority == AMD_SCHED_PRIORITY_HIGH && 
>> !capable(CAP_SYS_ADMIN))
>> +               return -EACCES;
>>
>> Should we make it generic? My understanding is that If we follow 
>> "nice" semantic
>> then it will not require such privilege.
>
> Well it follows the "nice" semantic, the the documentation of the nice 
> system call:
>
>        nice()  adds inc to the nice value for the calling process. (A 
> higher nice value means a low priority.)  Only the superuser may 
> specify a negative increment, or priority increase.  The range
>        for nice values is described in getpriority(2).
>
> Of course the nice limit is more fine grained these days. IIRC it was 
> a soft resource limit now the last time I looked.
>
> We would essentially need something similar for the GPU if we want to 
> allow a regular process to get a higher priority.

As Christian mentioned this is based on the same restrictions as nice.

Also thinking along the lines of, it would be easier in the future to 
relax requirements than to increase them.

If we end up in a scenario where we want looser restrictions, that can 
be implemented as a new feature.

However, the opposite is not true. If we require to tighten restrictions 
that would result in an backwards compatibility breakage, and the patch 
would not be allowed. Apps that used to have access no longer have access.

Regards,
Andres

>
> Regards,
> Christian.
>
>>
>> Sincerely yours,
>> Serguei Sagalovitch
>>
>>      _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]             ` <edb8b730-35ce-1df2-4919-39c868224b8d-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-01-05  7:15               ` Michel Dänzer
       [not found]                 ` <296af036-da31-72c6-4b4e-718ce0cf27bc-otUistvHUpPR7s880joybQ@public.gmane.org>
  0 siblings, 1 reply; 14+ messages in thread
From: Michel Dänzer @ 2017-01-05  7:15 UTC (permalink / raw)
  To: Andres Rodriguez, Mao, David; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 05/01/17 05:51 AM, Andres Rodriguez wrote:
> On 2017-01-04 06:54 AM, Mao, David wrote:
>> Hi Andres,
>> I did not follow the previous discussion, so please remind me if my
>> questions have been covered already~
>> - The priority should be the queue properties, and do we really want
>> to expose high priority on none-compute queue?
> I exposing the concept across all queues is good for consistency. It
> will probably end up staying as a SW-scheduler feature for all queues
> except for compute.
> 
> However, if we do end up getting some HW features that enable priority
> support on other engines, then the API will be ready, and we'll only
> need a kernel side change to enable the support.
> 
>> - Another question is we may need to do per submission priority tweak
>> if we don't reserve one compute queue ahead of time.
>>  From the patch, it seems you only tweak the GPU scheduler's priority,
>> but I think it is still insufficient given we don't have OS preemption
>> supported,
> Correct, the current patch is just to get an environment in place to
> start doing measurements. With this framework in place we can start
> adding HW support to increase the priorities.
> 
> Dynamic tweaking might be necessary depending on how CU reservation is
> going to work.

It sounds like it's still too early to tell if this simple priority
mechanism will end up being useful. It's probably better to hold off on
merging this upstream until you have at least a proof of concept showing
that it's actually useful in practice.


-- 
Earthling Michel Dänzer               |               http://www.amd.com
Libre software enthusiast             |             Mesa and X developer
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]                 ` <b4f3b2df-5b32-b7c4-9289-acf3111f6fdf-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-01-05  9:08                   ` Christian König
  0 siblings, 0 replies; 14+ messages in thread
From: Christian König @ 2017-01-05  9:08 UTC (permalink / raw)
  To: Andres Rodriguez, Sagalovitch, Serguei,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 04.01.2017 um 21:56 schrieb Andres Rodriguez:
> Hey Serguei
>
>
> On 2017-01-04 07:56 AM, Christian König wrote:
>> Am 04.01.2017 um 12:03 schrieb Sagalovitch, Serguei:
>>> Andres,
>>>
>>> I have on rather generic design question:
>>>
>>> Why we want to restrict it to CAP_SYS_ADMIN?
>>>
>>> +       if (priority == AMD_SCHED_PRIORITY_HIGH && 
>>> !capable(CAP_SYS_ADMIN))
>>> +               return -EACCES;
>>>
>>> Should we make it generic? My understanding is that If we follow 
>>> "nice" semantic
>>> then it will not require such privilege.
>>
>> Well it follows the "nice" semantic, the the documentation of the 
>> nice system call:
>>
>>        nice()  adds inc to the nice value for the calling process. (A 
>> higher nice value means a low priority.)  Only the superuser may 
>> specify a negative increment, or priority increase.  The range
>>        for nice values is described in getpriority(2).
>>
>> Of course the nice limit is more fine grained these days. IIRC it was 
>> a soft resource limit now the last time I looked.
>>
>> We would essentially need something similar for the GPU if we want to 
>> allow a regular process to get a higher priority.
>
> As Christian mentioned this is based on the same restrictions as nice.
>
> Also thinking along the lines of, it would be easier in the future to 
> relax requirements than to increase them.

That is a rather interesting idea. Instead of increasing the priority 
for the compositor we could decrease it for everybody else. We could do 
this by setting an environment variable for the loged in user for example.

This would be similar to how nice() handles it and could avoid some of 
the security problems.

>
> If we end up in a scenario where we want looser restrictions, that can 
> be implemented as a new feature.
>
> However, the opposite is not true. If we require to tighten 
> restrictions that would result in an backwards compatibility breakage, 
> and the patch would not be allowed. Apps that used to have access no 
> longer have access.

Yes exactly. As soon as an interface gets upstream it is nailed in stone.

This means that we can't break existing userspace by adding more 
restrictions, but we can easily lower restrictions.

Regards,
Christian.

>
> Regards,
> Andres
>
>>
>> Regards,
>> Christian.
>>
>>>
>>> Sincerely yours,
>>> Serguei Sagalovitch
>>>
>>>      _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drm/amdgpu: add flag for high priority contexts
       [not found]                 ` <296af036-da31-72c6-4b4e-718ce0cf27bc-otUistvHUpPR7s880joybQ@public.gmane.org>
@ 2017-01-05 19:42                   ` Andres Rodriguez
  0 siblings, 0 replies; 14+ messages in thread
From: Andres Rodriguez @ 2017-01-05 19:42 UTC (permalink / raw)
  To: Michel Dänzer, Mao, David; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hey Michel,

Sounds fair. I don't think we are in a rush to get this merged until it 
actually can provide the guarantees that we need.

I've gotten some good feedback on code change improvements, and that 
should be enough to get me to the next stage with HW support.

I'll send and updated patchset when I get new data on that.

Regards,
Andres

On 2017-01-05 02:15 AM, Michel Dänzer wrote:
> On 05/01/17 05:51 AM, Andres Rodriguez wrote:
>> On 2017-01-04 06:54 AM, Mao, David wrote:
>>> Hi Andres,
>>> I did not follow the previous discussion, so please remind me if my
>>> questions have been covered already~
>>> - The priority should be the queue properties, and do we really want
>>> to expose high priority on none-compute queue?
>> I exposing the concept across all queues is good for consistency. It
>> will probably end up staying as a SW-scheduler feature for all queues
>> except for compute.
>>
>> However, if we do end up getting some HW features that enable priority
>> support on other engines, then the API will be ready, and we'll only
>> need a kernel side change to enable the support.
>>
>>> - Another question is we may need to do per submission priority tweak
>>> if we don't reserve one compute queue ahead of time.
>>>   From the patch, it seems you only tweak the GPU scheduler's priority,
>>> but I think it is still insufficient given we don't have OS preemption
>>> supported,
>> Correct, the current patch is just to get an environment in place to
>> start doing measurements. With this framework in place we can start
>> adding HW support to increase the priorities.
>>
>> Dynamic tweaking might be necessary depending on how CU reservation is
>> going to work.
> It sounds like it's still too early to tell if this simple priority
> mechanism will end up being useful. It's probably better to hold off on
> merging this upstream until you have at least a proof of concept showing
> that it's actually useful in practice.
>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2017-01-05 19:42 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-03 22:54 [PATCH] High priority usermode contexts Andres Rodriguez
     [not found] ` <1483484076-10424-1-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-01-03 22:54   ` [PATCH] drm/amdgpu: add flag for high priority contexts Andres Rodriguez
     [not found]     ` <1483484076-10424-2-git-send-email-andresx7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-01-03 22:59       ` Alex Deucher
     [not found]         ` <CADnq5_Mt_-61h6DG77kqepp5psS2JGHJOA-mek513s5HgvtTDA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-01-03 23:00           ` Andres Rodriguez
2017-01-03 23:02             ` Alex Deucher
     [not found]               ` <CADnq5_Mu4t4i9prGhZakMdd3cFB5NRWE6zh0zFp05T1Z_WONmQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2017-01-03 23:09                 ` Andres Rodriguez
2017-01-04 11:03       ` Sagalovitch, Serguei
     [not found]         ` <SN1PR12MB07031776F52022AF4D659AA6FE610-z7L1TMIYDg6P/i5UxMCIqAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-01-04 12:56           ` Christian König
     [not found]             ` <19988121-36b8-00e4-afc9-bb9f8a0621e4-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-01-04 20:56               ` Andres Rodriguez
     [not found]                 ` <b4f3b2df-5b32-b7c4-9289-acf3111f6fdf-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-01-05  9:08                   ` Christian König
2017-01-04 11:54       ` Mao, David
     [not found]         ` <BN4PR12MB07876FAB038319F8E263B0CCEE610-aH9FTdWx9BancvD3hK8fMAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-01-04 20:51           ` Andres Rodriguez
     [not found]             ` <edb8b730-35ce-1df2-4919-39c868224b8d-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-01-05  7:15               ` Michel Dänzer
     [not found]                 ` <296af036-da31-72c6-4b4e-718ce0cf27bc-otUistvHUpPR7s880joybQ@public.gmane.org>
2017-01-05 19:42                   ` Andres Rodriguez

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.