From mboxrd@z Thu Jan 1 00:00:00 1970 From: John.C.Harrison@Intel.com Subject: [RFC 34/44] drm/i915: Added scheduler queue throttling by DRM file handle Date: Thu, 26 Jun 2014 18:24:25 +0100 Message-ID: <1403803475-16337-35-git-send-email-John.C.Harrison@Intel.com> References: <1403803475-16337-1-git-send-email-John.C.Harrison@Intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id 6316B6E24E for ; Thu, 26 Jun 2014 10:25:50 -0700 (PDT) In-Reply-To: <1403803475-16337-1-git-send-email-John.C.Harrison@Intel.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" To: Intel-GFX@lists.freedesktop.org List-Id: intel-gfx@lists.freedesktop.org From: John Harrison The scheduler decouples the submission of batch buffers to the driver from their subsequent submission to the hardware. This means that an application which is continuously submitting buffers as fast as it can could potentialy flood the driver. To prevent this, the driver now tracks how many buffers are in progress (queued in software or executing in hardware) and limits this to a given (tunable) number. If this number is exceeded then the queue to the driver will return EAGAIN and thus prevent the scheduler's queue becoming arbitrarily large. --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 12 +++++++++++ drivers/gpu/drm/i915/i915_scheduler.c | 32 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_scheduler.h | 5 +++++ 4 files changed, 51 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4d52c67..872e869 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1785,6 +1785,8 @@ struct drm_i915_file_private { atomic_t rps_wait_boost; struct intel_engine_cs *bsd_ring; + + u32 scheduler_queue_length; }; /* diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bf19e02..3227a39 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1614,6 +1614,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } +#ifdef CONFIG_DRM_I915_SCHEDULER + /* Throttle batch requests per device file */ + if (i915_scheduler_file_queue_is_full(file)) + return -EAGAIN; +#endif + /* Copy in the exec list from userland */ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); @@ -1702,6 +1708,12 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EINVAL; } +#ifdef CONFIG_DRM_I915_SCHEDULER + /* Throttle batch requests per device file */ + if (i915_scheduler_file_queue_is_full(file)) + return -EAGAIN; +#endif + exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (exec2_list == NULL) diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 6d0f4cb..6782249 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -61,6 +61,7 @@ int i915_scheduler_init(struct drm_device *dev) scheduler->priority_level_max = ~0U; scheduler->priority_level_preempt = 900; scheduler->min_flying = 2; + scheduler->file_queue_max = 64; dev_priv->scheduler = scheduler; @@ -211,6 +212,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe) list_add_tail(&node->link, &scheduler->node_queue[ring->id]); + i915_scheduler_file_queue_inc(node->params.file); + if (i915.scheduler_override & i915_so_submit_on_queue) not_flying = true; else @@ -530,6 +533,12 @@ int i915_scheduler_remove(struct intel_engine_cs *ring) /* Strip the dependency info while the mutex is still locked */ i915_scheduler_remove_dependent(scheduler, node); + /* Likewise clean up the file descriptor before it might disappear. */ + if (node->params.file) { + i915_scheduler_file_queue_dec(node->params.file); + node->params.file = NULL; + } + continue; } @@ -1079,6 +1088,29 @@ bool i915_scheduler_is_idle(struct intel_engine_cs *ring) return true; } +bool i915_scheduler_file_queue_is_full(struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = file_priv->dev_priv; + struct i915_scheduler *scheduler = dev_priv->scheduler; + + return (file_priv->scheduler_queue_length >= scheduler->file_queue_max); +} + +void i915_scheduler_file_queue_inc(struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + file_priv->scheduler_queue_length++; +} + +void i915_scheduler_file_queue_dec(struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + file_priv->scheduler_queue_length--; +} + #else /* CONFIG_DRM_I915_SCHEDULER */ int i915_scheduler_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index e824e700..78a92c9 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -112,6 +112,7 @@ struct i915_scheduler { uint32_t priority_level_max; uint32_t priority_level_preempt; uint32_t min_flying; + uint32_t file_queue_max; }; /* Flag bits for i915_scheduler::flags */ @@ -149,6 +150,10 @@ int i915_scheduler_priority_bump(struct i915_scheduler *scheduler, bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring, uint32_t seqno, bool *completed); +bool i915_scheduler_file_queue_is_full(struct drm_file *file); +void i915_scheduler_file_queue_inc(struct drm_file *file); +void i915_scheduler_file_queue_dec(struct drm_file *file); + #endif /* CONFIG_DRM_I915_SCHEDULER */ int i915_gem_do_execbuffer_final(struct i915_execbuffer_params *params); -- 1.7.9.5