intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
@ 2021-07-21 18:32 Daniel Vetter
  2021-07-21 20:17 ` Jason Ekstrand
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Daniel Vetter @ 2021-07-21 18:32 UTC (permalink / raw)
  To: DRI Development
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development, Daniel Vetter

This essentially reverts

commit 84a1074920523430f9dc30ff907f4801b4820072
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Jan 24 11:36:08 2018 +0000

    drm/i915: Shrink the GEM kmem_caches upon idling

mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
then we need to fix that there, not hand-roll our own slab shrinking
code in i915.

Noticed while reviewing a patch set from Jason to fix up some issues
in our i915_init() and i915_exit() module load/cleanup code. Now that
i915_globals.c isn't any different than normal init/exit functions, we
should convert them over to one unified table and remove
i915_globals.[hc] entirely.

Cc: David Airlie <airlied@linux.ie>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
 drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
 drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
 drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
 drivers/gpu/drm/i915/i915_active.c          |  6 --
 drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
 drivers/gpu/drm/i915/i915_globals.h         |  3 -
 drivers/gpu/drm/i915/i915_request.c         |  7 --
 drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
 drivers/gpu/drm/i915/i915_vma.c             |  6 --
 10 files changed, 146 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 7d6f52d8a801..bf2a2319353a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
 #include "selftests/i915_gem_context.c"
 #endif
 
-static void i915_global_gem_context_shrink(void)
-{
-	kmem_cache_shrink(global.slab_luts);
-}
-
 static void i915_global_gem_context_exit(void)
 {
 	kmem_cache_destroy(global.slab_luts);
 }
 
 static struct i915_global_gem_context global = { {
-	.shrink = i915_global_gem_context_shrink,
 	.exit = i915_global_gem_context_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 9da7b288b7ed..5c21cff33199 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
-static void i915_global_objects_shrink(void)
-{
-	kmem_cache_shrink(global.slab_objects);
-}
-
 static void i915_global_objects_exit(void)
 {
 	kmem_cache_destroy(global.slab_objects);
 }
 
 static struct i915_global_object global = { {
-	.shrink = i915_global_objects_shrink,
 	.exit = i915_global_objects_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index bd63813c8a80..c1338441cc1d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
 	i915_active_fini(&ce->active);
 }
 
-static void i915_global_context_shrink(void)
-{
-	kmem_cache_shrink(global.slab_ce);
-}
-
 static void i915_global_context_exit(void)
 {
 	kmem_cache_destroy(global.slab_ce);
 }
 
 static struct i915_global_context global = { {
-	.shrink = i915_global_context_shrink,
 	.exit = i915_global_context_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index aef3084e8b16..d86825437516 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
 
 	GT_TRACE(gt, "\n");
 
-	i915_globals_unpark();
-
 	/*
 	 * It seems that the DMC likes to transition between the DC states a lot
 	 * when there are no connected displays (no active power domains) during
@@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
 	GEM_BUG_ON(!wakeref);
 	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
-	i915_globals_park();
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index b1aa1c482c32..91723123ae9f 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
 #include "selftests/i915_active.c"
 #endif
 
-static void i915_global_active_shrink(void)
-{
-	kmem_cache_shrink(global.slab_cache);
-}
-
 static void i915_global_active_exit(void)
 {
 	kmem_cache_destroy(global.slab_cache);
 }
 
 static struct i915_global_active global = { {
-	.shrink = i915_global_active_shrink,
 	.exit = i915_global_active_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 77f1911c463b..7fe2e503897b 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -17,61 +17,8 @@
 
 static LIST_HEAD(globals);
 
-static atomic_t active;
-static atomic_t epoch;
-static struct park_work {
-	struct delayed_work work;
-	struct rcu_head rcu;
-	unsigned long flags;
-#define PENDING 0
-	int epoch;
-} park;
-
-static void i915_globals_shrink(void)
-{
-	struct i915_global *global;
-
-	/*
-	 * kmem_cache_shrink() discards empty slabs and reorders partially
-	 * filled slabs to prioritise allocating from the mostly full slabs,
-	 * with the aim of reducing fragmentation.
-	 */
-	list_for_each_entry(global, &globals, link)
-		global->shrink();
-}
-
-static void __i915_globals_grace(struct rcu_head *rcu)
-{
-	/* Ratelimit parking as shrinking is quite slow */
-	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
-}
-
-static void __i915_globals_queue_rcu(void)
-{
-	park.epoch = atomic_inc_return(&epoch);
-	if (!atomic_read(&active)) {
-		init_rcu_head(&park.rcu);
-		call_rcu(&park.rcu, __i915_globals_grace);
-	}
-}
-
-static void __i915_globals_park(struct work_struct *work)
-{
-	destroy_rcu_head(&park.rcu);
-
-	/* Confirm nothing woke up in the last grace period */
-	if (park.epoch != atomic_read(&epoch)) {
-		__i915_globals_queue_rcu();
-		return;
-	}
-
-	clear_bit(PENDING, &park.flags);
-	i915_globals_shrink();
-}
-
 void __init i915_global_register(struct i915_global *global)
 {
-	GEM_BUG_ON(!global->shrink);
 	GEM_BUG_ON(!global->exit);
 
 	list_add_tail(&global->link, &globals);
@@ -109,52 +56,10 @@ int __init i915_globals_init(void)
 		}
 	}
 
-	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
 	return 0;
 }
 
-void i915_globals_park(void)
-{
-	/*
-	 * Defer shrinking the global slab caches (and other work) until
-	 * after a RCU grace period has completed with no activity. This
-	 * is to try and reduce the latency impact on the consumers caused
-	 * by us shrinking the caches the same time as they are trying to
-	 * allocate, with the assumption being that if we idle long enough
-	 * for an RCU grace period to elapse since the last use, it is likely
-	 * to be longer until we need the caches again.
-	 */
-	if (!atomic_dec_and_test(&active))
-		return;
-
-	/* Queue cleanup after the next RCU grace period has freed slabs */
-	if (!test_and_set_bit(PENDING, &park.flags))
-		__i915_globals_queue_rcu();
-}
-
-void i915_globals_unpark(void)
-{
-	atomic_inc(&epoch);
-	atomic_inc(&active);
-}
-
-static void __exit __i915_globals_flush(void)
-{
-	atomic_inc(&active); /* skip shrinking */
-
-	rcu_barrier(); /* wait for the work to be queued */
-	flush_delayed_work(&park.work);
-
-	atomic_dec(&active);
-}
-
 void __exit i915_globals_exit(void)
 {
-	GEM_BUG_ON(atomic_read(&active));
-
-	__i915_globals_flush();
 	__i915_globals_cleanup();
-
-	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
-	rcu_barrier();
 }
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 2d199f411a4a..9e6b4fd07528 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
 struct i915_global {
 	struct list_head link;
 
-	i915_global_func_t shrink;
 	i915_global_func_t exit;
 };
 
 void i915_global_register(struct i915_global *global);
 
 int i915_globals_init(void);
-void i915_globals_park(void);
-void i915_globals_unpark(void);
 void i915_globals_exit(void);
 
 /* constructors */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 09ebea9a0090..d3de9f60e03a 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
 #include "selftests/i915_request.c"
 #endif
 
-static void i915_global_request_shrink(void)
-{
-	kmem_cache_shrink(global.slab_execute_cbs);
-	kmem_cache_shrink(global.slab_requests);
-}
-
 static void i915_global_request_exit(void)
 {
 	kmem_cache_destroy(global.slab_execute_cbs);
@@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
 }
 
 static struct i915_global_request global = { {
-	.shrink = i915_global_request_shrink,
 	.exit = i915_global_request_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 3a58a9130309..561c649e59f7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
 	return sched_engine;
 }
 
-static void i915_global_scheduler_shrink(void)
-{
-	kmem_cache_shrink(global.slab_dependencies);
-	kmem_cache_shrink(global.slab_priorities);
-}
-
 static void i915_global_scheduler_exit(void)
 {
 	kmem_cache_destroy(global.slab_dependencies);
@@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
 }
 
 static struct i915_global_scheduler global = { {
-	.shrink = i915_global_scheduler_shrink,
 	.exit = i915_global_scheduler_exit,
 } };
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5b9dce0f443b..09a7c47926f7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
 #include "selftests/i915_vma.c"
 #endif
 
-static void i915_global_vma_shrink(void)
-{
-	kmem_cache_shrink(global.slab_vmas);
-}
-
 static void i915_global_vma_exit(void)
 {
 	kmem_cache_destroy(global.slab_vmas);
 }
 
 static struct i915_global_vma global = { {
-	.shrink = i915_global_vma_shrink,
 	.exit = i915_global_vma_exit,
 } };
 
-- 
2.32.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
@ 2021-07-21 20:17 ` Jason Ekstrand
  2021-07-21 20:24   ` Daniel Vetter
  2021-07-23 15:42   ` Tvrtko Ursulin
  2021-07-21 20:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 13+ messages in thread
From: Jason Ekstrand @ 2021-07-21 20:17 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development, DRI Development

On Wed, Jul 21, 2021 at 1:32 PM Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
>
> This essentially reverts
>
> commit 84a1074920523430f9dc30ff907f4801b4820072
> Author: Chris Wilson <chris@chris-wilson.co.uk>
> Date:   Wed Jan 24 11:36:08 2018 +0000
>
>     drm/i915: Shrink the GEM kmem_caches upon idling
>
> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> then we need to fix that there, not hand-roll our own slab shrinking
> code in i915.
>
> Noticed while reviewing a patch set from Jason to fix up some issues
> in our i915_init() and i915_exit() module load/cleanup code. Now that
> i915_globals.c isn't any different than normal init/exit functions, we
> should convert them over to one unified table and remove
> i915_globals.[hc] entirely.

Mind throwing in a comment somewhere about how i915 is one of only two
users of kmem_cache_shrink() in the entire kernel?  That also seems to
be pretty good evidence that it's not useful.

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

Feel free to land at-will and I'll deal with merge conflicts on my end.

> Cc: David Airlie <airlied@linux.ie>
> Cc: Jason Ekstrand <jason@jlekstrand.net>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
>  drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
>  drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
>  drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
>  drivers/gpu/drm/i915/i915_active.c          |  6 --
>  drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
>  drivers/gpu/drm/i915/i915_globals.h         |  3 -
>  drivers/gpu/drm/i915/i915_request.c         |  7 --
>  drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
>  drivers/gpu/drm/i915/i915_vma.c             |  6 --
>  10 files changed, 146 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 7d6f52d8a801..bf2a2319353a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
>  #include "selftests/i915_gem_context.c"
>  #endif
>
> -static void i915_global_gem_context_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_luts);
> -}
> -
>  static void i915_global_gem_context_exit(void)
>  {
>         kmem_cache_destroy(global.slab_luts);
>  }
>
>  static struct i915_global_gem_context global = { {
> -       .shrink = i915_global_gem_context_shrink,
>         .exit = i915_global_gem_context_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 9da7b288b7ed..5c21cff33199 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
>         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
>  }
>
> -static void i915_global_objects_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_objects);
> -}
> -
>  static void i915_global_objects_exit(void)
>  {
>         kmem_cache_destroy(global.slab_objects);
>  }
>
>  static struct i915_global_object global = { {
> -       .shrink = i915_global_objects_shrink,
>         .exit = i915_global_objects_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index bd63813c8a80..c1338441cc1d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
>         i915_active_fini(&ce->active);
>  }
>
> -static void i915_global_context_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_ce);
> -}
> -
>  static void i915_global_context_exit(void)
>  {
>         kmem_cache_destroy(global.slab_ce);
>  }
>
>  static struct i915_global_context global = { {
> -       .shrink = i915_global_context_shrink,
>         .exit = i915_global_context_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index aef3084e8b16..d86825437516 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
>
>         GT_TRACE(gt, "\n");
>
> -       i915_globals_unpark();
> -
>         /*
>          * It seems that the DMC likes to transition between the DC states a lot
>          * when there are no connected displays (no active power domains) during
> @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
>         GEM_BUG_ON(!wakeref);
>         intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
>
> -       i915_globals_park();
> -
>         return 0;
>  }
>
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index b1aa1c482c32..91723123ae9f 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
>  #include "selftests/i915_active.c"
>  #endif
>
> -static void i915_global_active_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_cache);
> -}
> -
>  static void i915_global_active_exit(void)
>  {
>         kmem_cache_destroy(global.slab_cache);
>  }
>
>  static struct i915_global_active global = { {
> -       .shrink = i915_global_active_shrink,
>         .exit = i915_global_active_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> index 77f1911c463b..7fe2e503897b 100644
> --- a/drivers/gpu/drm/i915/i915_globals.c
> +++ b/drivers/gpu/drm/i915/i915_globals.c
> @@ -17,61 +17,8 @@
>
>  static LIST_HEAD(globals);
>
> -static atomic_t active;
> -static atomic_t epoch;
> -static struct park_work {
> -       struct delayed_work work;
> -       struct rcu_head rcu;
> -       unsigned long flags;
> -#define PENDING 0
> -       int epoch;
> -} park;
> -
> -static void i915_globals_shrink(void)
> -{
> -       struct i915_global *global;
> -
> -       /*
> -        * kmem_cache_shrink() discards empty slabs and reorders partially
> -        * filled slabs to prioritise allocating from the mostly full slabs,
> -        * with the aim of reducing fragmentation.
> -        */
> -       list_for_each_entry(global, &globals, link)
> -               global->shrink();
> -}
> -
> -static void __i915_globals_grace(struct rcu_head *rcu)
> -{
> -       /* Ratelimit parking as shrinking is quite slow */
> -       schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> -}
> -
> -static void __i915_globals_queue_rcu(void)
> -{
> -       park.epoch = atomic_inc_return(&epoch);
> -       if (!atomic_read(&active)) {
> -               init_rcu_head(&park.rcu);
> -               call_rcu(&park.rcu, __i915_globals_grace);
> -       }
> -}
> -
> -static void __i915_globals_park(struct work_struct *work)
> -{
> -       destroy_rcu_head(&park.rcu);
> -
> -       /* Confirm nothing woke up in the last grace period */
> -       if (park.epoch != atomic_read(&epoch)) {
> -               __i915_globals_queue_rcu();
> -               return;
> -       }
> -
> -       clear_bit(PENDING, &park.flags);
> -       i915_globals_shrink();
> -}
> -
>  void __init i915_global_register(struct i915_global *global)
>  {
> -       GEM_BUG_ON(!global->shrink);
>         GEM_BUG_ON(!global->exit);
>
>         list_add_tail(&global->link, &globals);
> @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
>                 }
>         }
>
> -       INIT_DELAYED_WORK(&park.work, __i915_globals_park);
>         return 0;
>  }
>
> -void i915_globals_park(void)
> -{
> -       /*
> -        * Defer shrinking the global slab caches (and other work) until
> -        * after a RCU grace period has completed with no activity. This
> -        * is to try and reduce the latency impact on the consumers caused
> -        * by us shrinking the caches the same time as they are trying to
> -        * allocate, with the assumption being that if we idle long enough
> -        * for an RCU grace period to elapse since the last use, it is likely
> -        * to be longer until we need the caches again.
> -        */
> -       if (!atomic_dec_and_test(&active))
> -               return;
> -
> -       /* Queue cleanup after the next RCU grace period has freed slabs */
> -       if (!test_and_set_bit(PENDING, &park.flags))
> -               __i915_globals_queue_rcu();
> -}
> -
> -void i915_globals_unpark(void)
> -{
> -       atomic_inc(&epoch);
> -       atomic_inc(&active);
> -}
> -
> -static void __exit __i915_globals_flush(void)
> -{
> -       atomic_inc(&active); /* skip shrinking */
> -
> -       rcu_barrier(); /* wait for the work to be queued */
> -       flush_delayed_work(&park.work);
> -
> -       atomic_dec(&active);
> -}
> -
>  void __exit i915_globals_exit(void)
>  {
> -       GEM_BUG_ON(atomic_read(&active));
> -
> -       __i915_globals_flush();
>         __i915_globals_cleanup();
> -
> -       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> -       rcu_barrier();
>  }
> diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> index 2d199f411a4a..9e6b4fd07528 100644
> --- a/drivers/gpu/drm/i915/i915_globals.h
> +++ b/drivers/gpu/drm/i915/i915_globals.h
> @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
>  struct i915_global {
>         struct list_head link;
>
> -       i915_global_func_t shrink;
>         i915_global_func_t exit;
>  };
>
>  void i915_global_register(struct i915_global *global);
>
>  int i915_globals_init(void);
> -void i915_globals_park(void);
> -void i915_globals_unpark(void);
>  void i915_globals_exit(void);
>
>  /* constructors */
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 09ebea9a0090..d3de9f60e03a 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
>  #include "selftests/i915_request.c"
>  #endif
>
> -static void i915_global_request_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_execute_cbs);
> -       kmem_cache_shrink(global.slab_requests);
> -}
> -
>  static void i915_global_request_exit(void)
>  {
>         kmem_cache_destroy(global.slab_execute_cbs);
> @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
>  }
>
>  static struct i915_global_request global = { {
> -       .shrink = i915_global_request_shrink,
>         .exit = i915_global_request_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 3a58a9130309..561c649e59f7 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
>         return sched_engine;
>  }
>
> -static void i915_global_scheduler_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_dependencies);
> -       kmem_cache_shrink(global.slab_priorities);
> -}
> -
>  static void i915_global_scheduler_exit(void)
>  {
>         kmem_cache_destroy(global.slab_dependencies);
> @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
>  }
>
>  static struct i915_global_scheduler global = { {
> -       .shrink = i915_global_scheduler_shrink,
>         .exit = i915_global_scheduler_exit,
>  } };
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 5b9dce0f443b..09a7c47926f7 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
>  #include "selftests/i915_vma.c"
>  #endif
>
> -static void i915_global_vma_shrink(void)
> -{
> -       kmem_cache_shrink(global.slab_vmas);
> -}
> -
>  static void i915_global_vma_exit(void)
>  {
>         kmem_cache_destroy(global.slab_vmas);
>  }
>
>  static struct i915_global_vma global = { {
> -       .shrink = i915_global_vma_shrink,
>         .exit = i915_global_vma_exit,
>  } };
>
> --
> 2.32.0
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 20:17 ` Jason Ekstrand
@ 2021-07-21 20:24   ` Daniel Vetter
  2021-07-22  9:15     ` Daniel Vetter
  2021-07-23 15:42   ` Tvrtko Ursulin
  1 sibling, 1 reply; 13+ messages in thread
From: Daniel Vetter @ 2021-07-21 20:24 UTC (permalink / raw)
  To: Jason Ekstrand
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development, DRI Development

On Wed, Jul 21, 2021 at 10:17 PM Jason Ekstrand <jason@jlekstrand.net> wrote:
>
> On Wed, Jul 21, 2021 at 1:32 PM Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
> >
> > This essentially reverts
> >
> > commit 84a1074920523430f9dc30ff907f4801b4820072
> > Author: Chris Wilson <chris@chris-wilson.co.uk>
> > Date:   Wed Jan 24 11:36:08 2018 +0000
> >
> >     drm/i915: Shrink the GEM kmem_caches upon idling
> >
> > mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> > then we need to fix that there, not hand-roll our own slab shrinking
> > code in i915.
> >
> > Noticed while reviewing a patch set from Jason to fix up some issues
> > in our i915_init() and i915_exit() module load/cleanup code. Now that
> > i915_globals.c isn't any different than normal init/exit functions, we
> > should convert them over to one unified table and remove
> > i915_globals.[hc] entirely.
>
> Mind throwing in a comment somewhere about how i915 is one of only two
> users of kmem_cache_shrink() in the entire kernel?  That also seems to
> be pretty good evidence that it's not useful.

I missed one, there's also on in kunit (I think just got in, it's from
this year at least per commit). That one seems actually legit, it's a
selftest for some statistics around slabs I think, so has a legit
reason to carefully control the state and trim anything that just
hangs around.

I'll add something and push when CI approves.

> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
>
> Feel free to land at-will and I'll deal with merge conflicts on my end.

I think if we can land this, then yours, then I type the conversion to
explicit init/exit and we're done.
-Daniel

>
> > Cc: David Airlie <airlied@linux.ie>
> > Cc: Jason Ekstrand <jason@jlekstrand.net>
> > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > ---
> >  drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
> >  drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
> >  drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
> >  drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
> >  drivers/gpu/drm/i915/i915_active.c          |  6 --
> >  drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
> >  drivers/gpu/drm/i915/i915_globals.h         |  3 -
> >  drivers/gpu/drm/i915/i915_request.c         |  7 --
> >  drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
> >  drivers/gpu/drm/i915/i915_vma.c             |  6 --
> >  10 files changed, 146 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > index 7d6f52d8a801..bf2a2319353a 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
> >  #include "selftests/i915_gem_context.c"
> >  #endif
> >
> > -static void i915_global_gem_context_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_luts);
> > -}
> > -
> >  static void i915_global_gem_context_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_luts);
> >  }
> >
> >  static struct i915_global_gem_context global = { {
> > -       .shrink = i915_global_gem_context_shrink,
> >         .exit = i915_global_gem_context_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > index 9da7b288b7ed..5c21cff33199 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
> >         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
> >  }
> >
> > -static void i915_global_objects_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_objects);
> > -}
> > -
> >  static void i915_global_objects_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_objects);
> >  }
> >
> >  static struct i915_global_object global = { {
> > -       .shrink = i915_global_objects_shrink,
> >         .exit = i915_global_objects_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> > index bd63813c8a80..c1338441cc1d 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_context.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> > @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
> >         i915_active_fini(&ce->active);
> >  }
> >
> > -static void i915_global_context_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_ce);
> > -}
> > -
> >  static void i915_global_context_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_ce);
> >  }
> >
> >  static struct i915_global_context global = { {
> > -       .shrink = i915_global_context_shrink,
> >         .exit = i915_global_context_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > index aef3084e8b16..d86825437516 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
> >
> >         GT_TRACE(gt, "\n");
> >
> > -       i915_globals_unpark();
> > -
> >         /*
> >          * It seems that the DMC likes to transition between the DC states a lot
> >          * when there are no connected displays (no active power domains) during
> > @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
> >         GEM_BUG_ON(!wakeref);
> >         intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
> >
> > -       i915_globals_park();
> > -
> >         return 0;
> >  }
> >
> > diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> > index b1aa1c482c32..91723123ae9f 100644
> > --- a/drivers/gpu/drm/i915/i915_active.c
> > +++ b/drivers/gpu/drm/i915/i915_active.c
> > @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
> >  #include "selftests/i915_active.c"
> >  #endif
> >
> > -static void i915_global_active_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_cache);
> > -}
> > -
> >  static void i915_global_active_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_cache);
> >  }
> >
> >  static struct i915_global_active global = { {
> > -       .shrink = i915_global_active_shrink,
> >         .exit = i915_global_active_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> > index 77f1911c463b..7fe2e503897b 100644
> > --- a/drivers/gpu/drm/i915/i915_globals.c
> > +++ b/drivers/gpu/drm/i915/i915_globals.c
> > @@ -17,61 +17,8 @@
> >
> >  static LIST_HEAD(globals);
> >
> > -static atomic_t active;
> > -static atomic_t epoch;
> > -static struct park_work {
> > -       struct delayed_work work;
> > -       struct rcu_head rcu;
> > -       unsigned long flags;
> > -#define PENDING 0
> > -       int epoch;
> > -} park;
> > -
> > -static void i915_globals_shrink(void)
> > -{
> > -       struct i915_global *global;
> > -
> > -       /*
> > -        * kmem_cache_shrink() discards empty slabs and reorders partially
> > -        * filled slabs to prioritise allocating from the mostly full slabs,
> > -        * with the aim of reducing fragmentation.
> > -        */
> > -       list_for_each_entry(global, &globals, link)
> > -               global->shrink();
> > -}
> > -
> > -static void __i915_globals_grace(struct rcu_head *rcu)
> > -{
> > -       /* Ratelimit parking as shrinking is quite slow */
> > -       schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> > -}
> > -
> > -static void __i915_globals_queue_rcu(void)
> > -{
> > -       park.epoch = atomic_inc_return(&epoch);
> > -       if (!atomic_read(&active)) {
> > -               init_rcu_head(&park.rcu);
> > -               call_rcu(&park.rcu, __i915_globals_grace);
> > -       }
> > -}
> > -
> > -static void __i915_globals_park(struct work_struct *work)
> > -{
> > -       destroy_rcu_head(&park.rcu);
> > -
> > -       /* Confirm nothing woke up in the last grace period */
> > -       if (park.epoch != atomic_read(&epoch)) {
> > -               __i915_globals_queue_rcu();
> > -               return;
> > -       }
> > -
> > -       clear_bit(PENDING, &park.flags);
> > -       i915_globals_shrink();
> > -}
> > -
> >  void __init i915_global_register(struct i915_global *global)
> >  {
> > -       GEM_BUG_ON(!global->shrink);
> >         GEM_BUG_ON(!global->exit);
> >
> >         list_add_tail(&global->link, &globals);
> > @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
> >                 }
> >         }
> >
> > -       INIT_DELAYED_WORK(&park.work, __i915_globals_park);
> >         return 0;
> >  }
> >
> > -void i915_globals_park(void)
> > -{
> > -       /*
> > -        * Defer shrinking the global slab caches (and other work) until
> > -        * after a RCU grace period has completed with no activity. This
> > -        * is to try and reduce the latency impact on the consumers caused
> > -        * by us shrinking the caches the same time as they are trying to
> > -        * allocate, with the assumption being that if we idle long enough
> > -        * for an RCU grace period to elapse since the last use, it is likely
> > -        * to be longer until we need the caches again.
> > -        */
> > -       if (!atomic_dec_and_test(&active))
> > -               return;
> > -
> > -       /* Queue cleanup after the next RCU grace period has freed slabs */
> > -       if (!test_and_set_bit(PENDING, &park.flags))
> > -               __i915_globals_queue_rcu();
> > -}
> > -
> > -void i915_globals_unpark(void)
> > -{
> > -       atomic_inc(&epoch);
> > -       atomic_inc(&active);
> > -}
> > -
> > -static void __exit __i915_globals_flush(void)
> > -{
> > -       atomic_inc(&active); /* skip shrinking */
> > -
> > -       rcu_barrier(); /* wait for the work to be queued */
> > -       flush_delayed_work(&park.work);
> > -
> > -       atomic_dec(&active);
> > -}
> > -
> >  void __exit i915_globals_exit(void)
> >  {
> > -       GEM_BUG_ON(atomic_read(&active));
> > -
> > -       __i915_globals_flush();
> >         __i915_globals_cleanup();
> > -
> > -       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> > -       rcu_barrier();
> >  }
> > diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> > index 2d199f411a4a..9e6b4fd07528 100644
> > --- a/drivers/gpu/drm/i915/i915_globals.h
> > +++ b/drivers/gpu/drm/i915/i915_globals.h
> > @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
> >  struct i915_global {
> >         struct list_head link;
> >
> > -       i915_global_func_t shrink;
> >         i915_global_func_t exit;
> >  };
> >
> >  void i915_global_register(struct i915_global *global);
> >
> >  int i915_globals_init(void);
> > -void i915_globals_park(void);
> > -void i915_globals_unpark(void);
> >  void i915_globals_exit(void);
> >
> >  /* constructors */
> > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > index 09ebea9a0090..d3de9f60e03a 100644
> > --- a/drivers/gpu/drm/i915/i915_request.c
> > +++ b/drivers/gpu/drm/i915/i915_request.c
> > @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
> >  #include "selftests/i915_request.c"
> >  #endif
> >
> > -static void i915_global_request_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_execute_cbs);
> > -       kmem_cache_shrink(global.slab_requests);
> > -}
> > -
> >  static void i915_global_request_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_execute_cbs);
> > @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
> >  }
> >
> >  static struct i915_global_request global = { {
> > -       .shrink = i915_global_request_shrink,
> >         .exit = i915_global_request_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 3a58a9130309..561c649e59f7 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
> >         return sched_engine;
> >  }
> >
> > -static void i915_global_scheduler_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_dependencies);
> > -       kmem_cache_shrink(global.slab_priorities);
> > -}
> > -
> >  static void i915_global_scheduler_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_dependencies);
> > @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
> >  }
> >
> >  static struct i915_global_scheduler global = { {
> > -       .shrink = i915_global_scheduler_shrink,
> >         .exit = i915_global_scheduler_exit,
> >  } };
> >
> > diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> > index 5b9dce0f443b..09a7c47926f7 100644
> > --- a/drivers/gpu/drm/i915/i915_vma.c
> > +++ b/drivers/gpu/drm/i915/i915_vma.c
> > @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
> >  #include "selftests/i915_vma.c"
> >  #endif
> >
> > -static void i915_global_vma_shrink(void)
> > -{
> > -       kmem_cache_shrink(global.slab_vmas);
> > -}
> > -
> >  static void i915_global_vma_exit(void)
> >  {
> >         kmem_cache_destroy(global.slab_vmas);
> >  }
> >
> >  static struct i915_global_vma global = { {
> > -       .shrink = i915_global_vma_shrink,
> >         .exit = i915_global_vma_exit,
> >  } };
> >
> > --
> > 2.32.0
> >



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
  2021-07-21 20:17 ` Jason Ekstrand
@ 2021-07-21 20:46 ` Patchwork
  2021-07-21 21:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-07-21 20:46 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Ditch i915 globals shrink infrastructure
URL   : https://patchwork.freedesktop.org/series/92841/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
9fdbba219dd5 drm/i915: Ditch i915 globals shrink infrastructure
-:8: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 84a107492052 ("drm/i915: Shrink the GEM kmem_caches upon idling")'
#8: 
commit 84a1074920523430f9dc30ff907f4801b4820072

-:354: WARNING:FROM_SIGN_OFF_MISMATCH: From:/Signed-off-by: email address mismatch: 'From: Daniel Vetter <daniel.vetter@ffwll.ch>' != 'Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>'

total: 1 errors, 1 warnings, 0 checks, 272 lines checked


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
  2021-07-21 20:17 ` Jason Ekstrand
  2021-07-21 20:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2021-07-21 21:15 ` Patchwork
  2021-07-22  2:31 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  2021-07-22 10:02 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-07-21 21:15 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 3217 bytes --]

== Series Details ==

Series: drm/i915: Ditch i915 globals shrink infrastructure
URL   : https://patchwork.freedesktop.org/series/92841/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10367 -> Patchwork_20667
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/index.html

Known issues
------------

  Here are the changes found in Patchwork_20667 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@amdgpu/amd_basic@cs-gfx:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][1] ([fdo#109271]) +17 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/fi-kbl-soraka/igt@amdgpu/amd_basic@cs-gfx.html

  
#### Possible fixes ####

  * igt@i915_selftest@live@execlists:
    - fi-kbl-soraka:      [INCOMPLETE][2] ([i915#2782] / [i915#794]) -> [PASS][3]
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/fi-kbl-soraka/igt@i915_selftest@live@execlists.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/fi-kbl-soraka/igt@i915_selftest@live@execlists.html

  * igt@kms_chamelium@common-hpd-after-suspend:
    - fi-kbl-7500u:       [DMESG-FAIL][4] ([i915#165]) -> [PASS][5]
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/fi-kbl-7500u/igt@kms_chamelium@common-hpd-after-suspend.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/fi-kbl-7500u/igt@kms_chamelium@common-hpd-after-suspend.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-kbl-7500u:       [FAIL][6] ([i915#1372]) -> [PASS][7]
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#1372]: https://gitlab.freedesktop.org/drm/intel/issues/1372
  [i915#165]: https://gitlab.freedesktop.org/drm/intel/issues/165
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#2782]: https://gitlab.freedesktop.org/drm/intel/issues/2782
  [i915#2867]: https://gitlab.freedesktop.org/drm/intel/issues/2867
  [i915#794]: https://gitlab.freedesktop.org/drm/intel/issues/794


Participating hosts (38 -> 35)
------------------------------

  Missing    (3): fi-ilk-m540 fi-bdw-samus fi-hsw-4200u 


Build changes
-------------

  * Linux: CI_DRM_10367 -> Patchwork_20667

  CI-20190529: 20190529
  CI_DRM_10367: 598494d0149b67545593dfb1b5fa60278907749e @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6146: 6caef22e4aafed275771f564d4ea4cab09896ebc @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_20667: 9fdbba219dd54ae98c21b0344687d8d3ac2860bf @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

9fdbba219dd5 drm/i915: Ditch i915 globals shrink infrastructure

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/index.html

[-- Attachment #1.2: Type: text/html, Size: 3773 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
                   ` (2 preceding siblings ...)
  2021-07-21 21:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2021-07-22  2:31 ` Patchwork
  2021-07-22 10:02 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-07-22  2:31 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 30271 bytes --]

== Series Details ==

Series: drm/i915: Ditch i915 globals shrink infrastructure
URL   : https://patchwork.freedesktop.org/series/92841/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_10367_full -> Patchwork_20667_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_20667_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_20667_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_20667_full:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_pm_rpm@basic-rte:
    - {shard-rkl}:        NOTRUN -> [FAIL][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@i915_pm_rpm@basic-rte.html

  * igt@i915_pm_rpm@modeset-lpsp-stress-no-wait:
    - {shard-rkl}:        [SKIP][2] ([i915#1397]) -> [SKIP][3]
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html

  * igt@i915_pm_rpm@modeset-pc8-residency-stress:
    - {shard-rkl}:        [SKIP][4] ([fdo#109506]) -> [SKIP][5]
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@i915_pm_rpm@modeset-pc8-residency-stress.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@i915_pm_rpm@modeset-pc8-residency-stress.html

  * igt@i915_pm_rpm@system-suspend-devices:
    - {shard-rkl}:        [PASS][6] -> [SKIP][7]
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@i915_pm_rpm@system-suspend-devices.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@i915_pm_rpm@system-suspend-devices.html

  * igt@kms_ccs@pipe-a-bad-rotation-90-yf_tiled_ccs:
    - {shard-rkl}:        NOTRUN -> [SKIP][8]
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_ccs@pipe-a-bad-rotation-90-yf_tiled_ccs.html

  * igt@kms_vblank@pipe-b-ts-continuation-dpms-rpm:
    - {shard-rkl}:        [SKIP][9] ([i915#1845]) -> [SKIP][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_vblank@pipe-b-ts-continuation-dpms-rpm.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_vblank@pipe-b-ts-continuation-dpms-rpm.html

  * igt@sysfs_heartbeat_interval@precise@bcs0:
    - {shard-rkl}:        [PASS][11] -> [FAIL][12] +1 similar issue
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-2/igt@sysfs_heartbeat_interval@precise@bcs0.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-1/igt@sysfs_heartbeat_interval@precise@bcs0.html

  

### Piglit changes ###

#### Possible regressions ####

  * spec@amd_shader_trinary_minmax@execution@built-in-functions@gs-min3-ivec3-ivec3-ivec3 (NEW):
    - pig-snb-2600:       NOTRUN -> [FAIL][13] +1 similar issue
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/pig-snb-2600/spec@amd_shader_trinary_minmax@execution@built-in-functions@gs-min3-ivec3-ivec3-ivec3.html

  
New tests
---------

  New tests have been introduced between CI_DRM_10367_full and Patchwork_20667_full:

### New Piglit tests (1) ###

  * spec@amd_shader_trinary_minmax@execution@built-in-functions@gs-min3-ivec3-ivec3-ivec3:
    - Statuses : 1 fail(s)
    - Exec time: [0.19] s

  

Known issues
------------

  Here are the changes found in Patchwork_20667_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@feature_discovery@display-3x:
    - shard-glk:          NOTRUN -> [SKIP][14] ([fdo#109271]) +69 similar issues
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@feature_discovery@display-3x.html

  * igt@feature_discovery@display-4x:
    - shard-tglb:         NOTRUN -> [SKIP][15] ([i915#1839])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb5/igt@feature_discovery@display-4x.html

  * igt@gem_ctx_persistence@engines-hostile@vcs0:
    - shard-tglb:         [PASS][16] -> [FAIL][17] ([i915#2410])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-tglb5/igt@gem_ctx_persistence@engines-hostile@vcs0.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb3/igt@gem_ctx_persistence@engines-hostile@vcs0.html

  * igt@gem_ctx_persistence@smoketest:
    - shard-snb:          NOTRUN -> [SKIP][18] ([fdo#109271] / [i915#1099])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-snb2/igt@gem_ctx_persistence@smoketest.html

  * igt@gem_exec_fair@basic-deadline:
    - shard-apl:          NOTRUN -> [FAIL][19] ([i915#2846])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl1/igt@gem_exec_fair@basic-deadline.html

  * igt@gem_exec_fair@basic-none-solo@rcs0:
    - shard-glk:          [PASS][20] -> [FAIL][21] ([i915#2842]) +2 similar issues
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk7/igt@gem_exec_fair@basic-none-solo@rcs0.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk6/igt@gem_exec_fair@basic-none-solo@rcs0.html

  * igt@gem_exec_fair@basic-pace-solo@rcs0:
    - shard-iclb:         [PASS][22] -> [FAIL][23] ([i915#2842])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb2/igt@gem_exec_fair@basic-pace-solo@rcs0.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb3/igt@gem_exec_fair@basic-pace-solo@rcs0.html

  * igt@gem_exec_fair@basic-pace@bcs0:
    - shard-tglb:         [PASS][24] -> [FAIL][25] ([i915#2842]) +1 similar issue
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-tglb1/igt@gem_exec_fair@basic-pace@bcs0.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb6/igt@gem_exec_fair@basic-pace@bcs0.html

  * igt@gem_exec_fair@basic-pace@vcs1:
    - shard-kbl:          [PASS][26] -> [FAIL][27] ([i915#2842]) +1 similar issue
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-kbl7/igt@gem_exec_fair@basic-pace@vcs1.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-kbl1/igt@gem_exec_fair@basic-pace@vcs1.html

  * igt@gem_mmap_gtt@cpuset-big-copy-odd:
    - shard-iclb:         [PASS][28] -> [FAIL][29] ([i915#2428])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb4/igt@gem_mmap_gtt@cpuset-big-copy-odd.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb8/igt@gem_mmap_gtt@cpuset-big-copy-odd.html

  * igt@gem_pread@exhaustion:
    - shard-snb:          NOTRUN -> [WARN][30] ([i915#2658])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-snb7/igt@gem_pread@exhaustion.html

  * igt@gem_render_copy@y-tiled-to-vebox-linear:
    - shard-iclb:         NOTRUN -> [SKIP][31] ([i915#768])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb4/igt@gem_render_copy@y-tiled-to-vebox-linear.html

  * igt@i915_pm_rpm@dpms-mode-unset-lpsp:
    - shard-tglb:         NOTRUN -> [SKIP][32] ([i915#579])
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb7/igt@i915_pm_rpm@dpms-mode-unset-lpsp.html
    - shard-iclb:         NOTRUN -> [SKIP][33] ([i915#579])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb4/igt@i915_pm_rpm@dpms-mode-unset-lpsp.html

  * igt@i915_selftest@mock@uncore:
    - shard-glk:          NOTRUN -> [DMESG-WARN][34] ([i915#3746]) +17 similar issues
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk8/igt@i915_selftest@mock@uncore.html

  * igt@kms_big_fb@x-tiled-32bpp-rotate-0:
    - shard-glk:          [PASS][35] -> [DMESG-WARN][36] ([i915#118] / [i915#95])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk3/igt@kms_big_fb@x-tiled-32bpp-rotate-0.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk6/igt@kms_big_fb@x-tiled-32bpp-rotate-0.html

  * igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip:
    - shard-apl:          NOTRUN -> [SKIP][37] ([fdo#109271] / [i915#3777])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl8/igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip.html

  * igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip:
    - shard-glk:          NOTRUN -> [SKIP][38] ([fdo#109271] / [i915#3777])
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip.html

  * igt@kms_ccs@pipe-d-ccs-on-another-bo-yf_tiled_ccs:
    - shard-iclb:         NOTRUN -> [SKIP][39] ([fdo#109278]) +4 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb4/igt@kms_ccs@pipe-d-ccs-on-another-bo-yf_tiled_ccs.html

  * igt@kms_chamelium@dp-crc-multiple:
    - shard-glk:          NOTRUN -> [SKIP][40] ([fdo#109271] / [fdo#111827]) +3 similar issues
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk8/igt@kms_chamelium@dp-crc-multiple.html

  * igt@kms_chamelium@hdmi-crc-multiple:
    - shard-snb:          NOTRUN -> [SKIP][41] ([fdo#109271] / [fdo#111827]) +12 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-snb2/igt@kms_chamelium@hdmi-crc-multiple.html

  * igt@kms_color_chamelium@pipe-c-ctm-0-25:
    - shard-apl:          NOTRUN -> [SKIP][42] ([fdo#109271] / [fdo#111827]) +18 similar issues
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl1/igt@kms_color_chamelium@pipe-c-ctm-0-25.html

  * igt@kms_color_chamelium@pipe-d-ctm-0-25:
    - shard-tglb:         NOTRUN -> [SKIP][43] ([fdo#109284] / [fdo#111827])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb5/igt@kms_color_chamelium@pipe-d-ctm-0-25.html

  * igt@kms_content_protection@atomic-dpms:
    - shard-apl:          NOTRUN -> [TIMEOUT][44] ([i915#1319])
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl7/igt@kms_content_protection@atomic-dpms.html

  * igt@kms_cursor_crc@pipe-a-cursor-32x10-sliding:
    - shard-tglb:         NOTRUN -> [SKIP][45] ([i915#3359])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb7/igt@kms_cursor_crc@pipe-a-cursor-32x10-sliding.html

  * igt@kms_cursor_crc@pipe-b-cursor-64x64-random:
    - shard-skl:          [PASS][46] -> [FAIL][47] ([i915#3444])
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-64x64-random.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-64x64-random.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
    - shard-skl:          [PASS][48] -> [DMESG-WARN][49] ([i915#1982]) +1 similar issue
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl5/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl5/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-skl:          [PASS][50] -> [FAIL][51] ([i915#2346] / [i915#533])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl10/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_dither@fb-8bpc-vs-panel-8bpc@edp-1-pipe-a:
    - shard-iclb:         [PASS][52] -> [SKIP][53] ([i915#3788])
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb6/igt@kms_dither@fb-8bpc-vs-panel-8bpc@edp-1-pipe-a.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb2/igt@kms_dither@fb-8bpc-vs-panel-8bpc@edp-1-pipe-a.html

  * igt@kms_flip@2x-absolute-wf_vblank:
    - shard-kbl:          NOTRUN -> [SKIP][54] ([fdo#109271]) +2 similar issues
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-kbl4/igt@kms_flip@2x-absolute-wf_vblank.html

  * igt@kms_flip@2x-flip-vs-modeset-vs-hang:
    - shard-iclb:         NOTRUN -> [SKIP][55] ([fdo#109274])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb4/igt@kms_flip@2x-flip-vs-modeset-vs-hang.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1:
    - shard-skl:          [PASS][56] -> [FAIL][57] ([i915#79]) +1 similar issue
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl7/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl8/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html

  * igt@kms_flip@flip-vs-expired-vblank@a-hdmi-a1:
    - shard-glk:          [PASS][58] -> [FAIL][59] ([i915#79])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk8/igt@kms_flip@flip-vs-expired-vblank@a-hdmi-a1.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk5/igt@kms_flip@flip-vs-expired-vblank@a-hdmi-a1.html

  * igt@kms_flip@plain-flip-fb-recreate@b-edp1:
    - shard-skl:          [PASS][60] -> [FAIL][61] ([i915#2122])
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl6/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl3/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-wc:
    - shard-glk:          [PASS][62] -> [FAIL][63] ([i915#2546])
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk3/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-wc.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk6/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move:
    - shard-iclb:         NOTRUN -> [SKIP][64] ([fdo#109280]) +2 similar issues
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb4/igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move.html
    - shard-tglb:         NOTRUN -> [SKIP][65] ([fdo#111825]) +2 similar issues
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb7/igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move.html

  * igt@kms_frontbuffer_tracking@fbcpsr-farfromfence-mmap-gtt:
    - shard-skl:          NOTRUN -> [SKIP][66] ([fdo#109271]) +11 similar issues
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl10/igt@kms_frontbuffer_tracking@fbcpsr-farfromfence-mmap-gtt.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-apl:          [PASS][67] -> [DMESG-WARN][68] ([i915#180]) +1 similar issue
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-apl2/igt@kms_hdr@bpc-switch-suspend.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl6/igt@kms_hdr@bpc-switch-suspend.html

  * igt@kms_pipe_crc_basic@hang-read-crc-pipe-d:
    - shard-glk:          NOTRUN -> [SKIP][69] ([fdo#109271] / [i915#533])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@kms_pipe_crc_basic@hang-read-crc-pipe-d.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-d:
    - shard-apl:          NOTRUN -> [SKIP][70] ([fdo#109271] / [i915#533])
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl7/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-d.html

  * igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb:
    - shard-apl:          NOTRUN -> [FAIL][71] ([fdo#108145] / [i915#265]) +3 similar issues
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl2/igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max:
    - shard-glk:          NOTRUN -> [FAIL][72] ([fdo#108145] / [i915#265])
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][73] -> [FAIL][74] ([fdo#108145] / [i915#265]) +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl9/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl6/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_plane_scaling@scaler-with-clipping-clamping@pipe-c-scaler-with-clipping-clamping:
    - shard-glk:          NOTRUN -> [SKIP][75] ([fdo#109271] / [i915#2733])
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@kms_plane_scaling@scaler-with-clipping-clamping@pipe-c-scaler-with-clipping-clamping.html

  * igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-2:
    - shard-glk:          NOTRUN -> [SKIP][76] ([fdo#109271] / [i915#658])
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-2.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5:
    - shard-apl:          NOTRUN -> [SKIP][77] ([fdo#109271] / [i915#658]) +4 similar issues
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl7/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][78] -> [SKIP][79] ([fdo#109441]) +1 similar issue
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb8/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_setmode@basic:
    - shard-snb:          NOTRUN -> [FAIL][80] ([i915#31])
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-snb7/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-d-query-forked-hang:
    - shard-snb:          NOTRUN -> [SKIP][81] ([fdo#109271]) +216 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-snb2/igt@kms_vblank@pipe-d-query-forked-hang.html

  * igt@kms_vblank@pipe-d-ts-continuation-idle:
    - shard-apl:          NOTRUN -> [SKIP][82] ([fdo#109271]) +233 similar issues
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl7/igt@kms_vblank@pipe-d-ts-continuation-idle.html

  * igt@kms_writeback@writeback-invalid-parameters:
    - shard-apl:          NOTRUN -> [SKIP][83] ([fdo#109271] / [i915#2437])
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl3/igt@kms_writeback@writeback-invalid-parameters.html

  * igt@perf@polling-parameterized:
    - shard-skl:          [PASS][84] -> [FAIL][85] ([i915#1542])
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl2/igt@perf@polling-parameterized.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl2/igt@perf@polling-parameterized.html

  * igt@perf_pmu@rc6-runtime-pm:
    - shard-tglb:         NOTRUN -> [SKIP][86] ([fdo#111719])
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb6/igt@perf_pmu@rc6-runtime-pm.html

  * igt@perf_pmu@rc6-suspend:
    - shard-apl:          NOTRUN -> [DMESG-WARN][87] ([i915#180])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl3/igt@perf_pmu@rc6-suspend.html

  * igt@sysfs_clients@fair-7:
    - shard-apl:          NOTRUN -> [SKIP][88] ([fdo#109271] / [i915#2994]) +2 similar issues
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl1/igt@sysfs_clients@fair-7.html

  * igt@sysfs_clients@split-25:
    - shard-glk:          NOTRUN -> [SKIP][89] ([fdo#109271] / [i915#2994])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk4/igt@sysfs_clients@split-25.html

  
#### Possible fixes ####

  * igt@feature_discovery@psr1:
    - {shard-rkl}:        [SKIP][90] ([i915#658]) -> [PASS][91]
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@feature_discovery@psr1.html
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@feature_discovery@psr1.html

  * igt@feature_discovery@psr2:
    - shard-iclb:         [SKIP][92] ([i915#658]) -> [PASS][93]
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb8/igt@feature_discovery@psr2.html
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb2/igt@feature_discovery@psr2.html

  * igt@gem_ctx_persistence@many-contexts:
    - {shard-rkl}:        [FAIL][94] ([i915#2410]) -> [PASS][95]
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-2/igt@gem_ctx_persistence@many-contexts.html
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-5/igt@gem_ctx_persistence@many-contexts.html

  * igt@gem_exec_fair@basic-flow@rcs0:
    - shard-tglb:         [FAIL][96] ([i915#2842]) -> [PASS][97] +2 similar issues
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-tglb5/igt@gem_exec_fair@basic-flow@rcs0.html
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb2/igt@gem_exec_fair@basic-flow@rcs0.html

  * igt@gem_exec_fair@basic-none@vcs0:
    - shard-kbl:          [FAIL][98] ([i915#2842]) -> [PASS][99] +1 similar issue
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-kbl7/igt@gem_exec_fair@basic-none@vcs0.html
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-kbl7/igt@gem_exec_fair@basic-none@vcs0.html
    - shard-apl:          [FAIL][100] ([i915#2842]) -> [PASS][101]
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-apl7/igt@gem_exec_fair@basic-none@vcs0.html
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl3/igt@gem_exec_fair@basic-none@vcs0.html

  * igt@gem_exec_fair@basic-pace@vcs0:
    - shard-iclb:         [FAIL][102] ([i915#2842]) -> [PASS][103]
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-iclb1/igt@gem_exec_fair@basic-pace@vcs0.html
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-iclb2/igt@gem_exec_fair@basic-pace@vcs0.html

  * igt@gem_exec_fair@basic-throttle@rcs0:
    - shard-glk:          [FAIL][104] ([i915#2842]) -> [PASS][105]
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk1/igt@gem_exec_fair@basic-throttle@rcs0.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk8/igt@gem_exec_fair@basic-throttle@rcs0.html

  * igt@gem_huc_copy@huc-copy:
    - shard-tglb:         [SKIP][106] ([i915#2190]) -> [PASS][107]
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-tglb6/igt@gem_huc_copy@huc-copy.html
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-tglb1/igt@gem_huc_copy@huc-copy.html

  * igt@gem_mmap_offset@clear:
    - {shard-rkl}:        [FAIL][108] ([i915#3160]) -> [PASS][109]
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@gem_mmap_offset@clear.html
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-2/igt@gem_mmap_offset@clear.html

  * igt@gen9_exec_parse@allowed-all:
    - shard-glk:          [DMESG-WARN][110] ([i915#1436] / [i915#716]) -> [PASS][111]
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-glk3/igt@gen9_exec_parse@allowed-all.html
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-glk8/igt@gen9_exec_parse@allowed-all.html

  * igt@i915_pm_backlight@basic-brightness:
    - {shard-rkl}:        [SKIP][112] ([i915#3012]) -> [PASS][113]
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@i915_pm_backlight@basic-brightness.html
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@i915_pm_backlight@basic-brightness.html

  * igt@i915_suspend@fence-restore-untiled:
    - shard-apl:          [DMESG-WARN][114] ([i915#180]) -> [PASS][115] +1 similar issue
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-apl1/igt@i915_suspend@fence-restore-untiled.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-apl2/igt@i915_suspend@fence-restore-untiled.html

  * igt@kms_big_fb@linear-max-hw-stride-32bpp-rotate-180:
    - {shard-rkl}:        [SKIP][116] ([i915#3721]) -> [PASS][117] +1 similar issue
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_big_fb@linear-max-hw-stride-32bpp-rotate-180.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_big_fb@linear-max-hw-stride-32bpp-rotate-180.html

  * igt@kms_big_fb@x-tiled-8bpp-rotate-0:
    - {shard-rkl}:        [SKIP][118] ([i915#3638]) -> [PASS][119] +1 similar issue
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_big_fb@x-tiled-8bpp-rotate-0.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_big_fb@x-tiled-8bpp-rotate-0.html

  * igt@kms_big_fb@y-tiled-16bpp-rotate-270:
    - {shard-rkl}:        [SKIP][120] ([fdo#111614]) -> [PASS][121]
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_big_fb@y-tiled-16bpp-rotate-270.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_big_fb@y-tiled-16bpp-rotate-270.html

  * igt@kms_ccs@pipe-a-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc:
    - {shard-rkl}:        [FAIL][122] ([i915#3678]) -> [PASS][123] +2 similar issues
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_ccs@pipe-a-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_ccs@pipe-a-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc.html

  * igt@kms_color@pipe-c-ctm-negative:
    - {shard-rkl}:        [SKIP][124] ([i915#1149] / [i915#1849]) -> [PASS][125] +5 similar issues
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_color@pipe-c-ctm-negative.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_color@pipe-c-ctm-negative.html

  * igt@kms_cursor_crc@pipe-c-cursor-64x64-random:
    - {shard-rkl}:        [SKIP][126] ([fdo#112022]) -> [PASS][127] +9 similar issues
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_cursor_crc@pipe-c-cursor-64x64-random.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_cursor_crc@pipe-c-cursor-64x64-random.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions:
    - shard-skl:          [FAIL][128] ([i915#2346]) -> [PASS][129]
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl2/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl2/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html

  * igt@kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size:
    - {shard-rkl}:        [SKIP][130] ([fdo#111825]) -> [PASS][131] +3 similar issues
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size.html

  * igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-xtiled:
    - {shard-rkl}:        [SKIP][132] ([fdo#111314]) -> [PASS][133] +3 similar issues
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-xtiled.html
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-xtiled.html

  * igt@kms_draw_crc@draw-method-xrgb8888-render-xtiled:
    - shard-skl:          [FAIL][134] ([i915#3451]) -> [PASS][135]
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-xtiled.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl7/igt@kms_draw_crc@draw-method-xrgb8888-render-xtiled.html

  * igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1:
    - shard-skl:          [FAIL][136] ([i915#2122]) -> [PASS][137]
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl8/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl10/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-gtt:
    - {shard-rkl}:        [SKIP][138] ([i915#1849]) -> [PASS][139] +25 similar issues
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-rkl-5/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-gtt.html
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-rkl-6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-gtt.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-skl:          [FAIL][140] ([i915#1188]) -> [PASS][141]
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10367/shard-skl8/igt@kms_hdr@bpc-switch-suspend.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/shard-skl10/igt@kms_hdr@bpc-switch-suspend.html

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20667/index.html

[-- Attachment #1.2: Type: text/html, Size: 33377 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 20:24   ` Daniel Vetter
@ 2021-07-22  9:15     ` Daniel Vetter
  0 siblings, 0 replies; 13+ messages in thread
From: Daniel Vetter @ 2021-07-22  9:15 UTC (permalink / raw)
  To: Jason Ekstrand
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development, DRI Development

On Wed, Jul 21, 2021 at 10:24:01PM +0200, Daniel Vetter wrote:
> On Wed, Jul 21, 2021 at 10:17 PM Jason Ekstrand <jason@jlekstrand.net> wrote:
> >
> > On Wed, Jul 21, 2021 at 1:32 PM Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
> > >
> > > This essentially reverts
> > >
> > > commit 84a1074920523430f9dc30ff907f4801b4820072
> > > Author: Chris Wilson <chris@chris-wilson.co.uk>
> > > Date:   Wed Jan 24 11:36:08 2018 +0000
> > >
> > >     drm/i915: Shrink the GEM kmem_caches upon idling
> > >
> > > mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> > > then we need to fix that there, not hand-roll our own slab shrinking
> > > code in i915.
> > >
> > > Noticed while reviewing a patch set from Jason to fix up some issues
> > > in our i915_init() and i915_exit() module load/cleanup code. Now that
> > > i915_globals.c isn't any different than normal init/exit functions, we
> > > should convert them over to one unified table and remove
> > > i915_globals.[hc] entirely.
> >
> > Mind throwing in a comment somewhere about how i915 is one of only two
> > users of kmem_cache_shrink() in the entire kernel?  That also seems to
> > be pretty good evidence that it's not useful.
> 
> I missed one, there's also on in kunit (I think just got in, it's from
> this year at least per commit). That one seems actually legit, it's a
> selftest for some statistics around slabs I think, so has a legit
> reason to carefully control the state and trim anything that just
> hangs around.
> 
> I'll add something and push when CI approves.
> 
> > Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
> >
> > Feel free to land at-will and I'll deal with merge conflicts on my end.
> 
> I think if we can land this, then yours, then I type the conversion to
> explicit init/exit and we're done.

CI approved, merged as discussed. I'll look at your series now.
-Daniel

> > > Cc: David Airlie <airlied@linux.ie>
> > > Cc: Jason Ekstrand <jason@jlekstrand.net>
> > > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > > ---
> > >  drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
> > >  drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
> > >  drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
> > >  drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
> > >  drivers/gpu/drm/i915/i915_active.c          |  6 --
> > >  drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
> > >  drivers/gpu/drm/i915/i915_globals.h         |  3 -
> > >  drivers/gpu/drm/i915/i915_request.c         |  7 --
> > >  drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
> > >  drivers/gpu/drm/i915/i915_vma.c             |  6 --
> > >  10 files changed, 146 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > > index 7d6f52d8a801..bf2a2319353a 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > > @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
> > >  #include "selftests/i915_gem_context.c"
> > >  #endif
> > >
> > > -static void i915_global_gem_context_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_luts);
> > > -}
> > > -
> > >  static void i915_global_gem_context_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_luts);
> > >  }
> > >
> > >  static struct i915_global_gem_context global = { {
> > > -       .shrink = i915_global_gem_context_shrink,
> > >         .exit = i915_global_gem_context_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > > index 9da7b288b7ed..5c21cff33199 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > > @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
> > >         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
> > >  }
> > >
> > > -static void i915_global_objects_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_objects);
> > > -}
> > > -
> > >  static void i915_global_objects_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_objects);
> > >  }
> > >
> > >  static struct i915_global_object global = { {
> > > -       .shrink = i915_global_objects_shrink,
> > >         .exit = i915_global_objects_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> > > index bd63813c8a80..c1338441cc1d 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_context.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> > > @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
> > >         i915_active_fini(&ce->active);
> > >  }
> > >
> > > -static void i915_global_context_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_ce);
> > > -}
> > > -
> > >  static void i915_global_context_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_ce);
> > >  }
> > >
> > >  static struct i915_global_context global = { {
> > > -       .shrink = i915_global_context_shrink,
> > >         .exit = i915_global_context_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > > index aef3084e8b16..d86825437516 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > > @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
> > >
> > >         GT_TRACE(gt, "\n");
> > >
> > > -       i915_globals_unpark();
> > > -
> > >         /*
> > >          * It seems that the DMC likes to transition between the DC states a lot
> > >          * when there are no connected displays (no active power domains) during
> > > @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
> > >         GEM_BUG_ON(!wakeref);
> > >         intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
> > >
> > > -       i915_globals_park();
> > > -
> > >         return 0;
> > >  }
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> > > index b1aa1c482c32..91723123ae9f 100644
> > > --- a/drivers/gpu/drm/i915/i915_active.c
> > > +++ b/drivers/gpu/drm/i915/i915_active.c
> > > @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
> > >  #include "selftests/i915_active.c"
> > >  #endif
> > >
> > > -static void i915_global_active_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_cache);
> > > -}
> > > -
> > >  static void i915_global_active_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_cache);
> > >  }
> > >
> > >  static struct i915_global_active global = { {
> > > -       .shrink = i915_global_active_shrink,
> > >         .exit = i915_global_active_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> > > index 77f1911c463b..7fe2e503897b 100644
> > > --- a/drivers/gpu/drm/i915/i915_globals.c
> > > +++ b/drivers/gpu/drm/i915/i915_globals.c
> > > @@ -17,61 +17,8 @@
> > >
> > >  static LIST_HEAD(globals);
> > >
> > > -static atomic_t active;
> > > -static atomic_t epoch;
> > > -static struct park_work {
> > > -       struct delayed_work work;
> > > -       struct rcu_head rcu;
> > > -       unsigned long flags;
> > > -#define PENDING 0
> > > -       int epoch;
> > > -} park;
> > > -
> > > -static void i915_globals_shrink(void)
> > > -{
> > > -       struct i915_global *global;
> > > -
> > > -       /*
> > > -        * kmem_cache_shrink() discards empty slabs and reorders partially
> > > -        * filled slabs to prioritise allocating from the mostly full slabs,
> > > -        * with the aim of reducing fragmentation.
> > > -        */
> > > -       list_for_each_entry(global, &globals, link)
> > > -               global->shrink();
> > > -}
> > > -
> > > -static void __i915_globals_grace(struct rcu_head *rcu)
> > > -{
> > > -       /* Ratelimit parking as shrinking is quite slow */
> > > -       schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> > > -}
> > > -
> > > -static void __i915_globals_queue_rcu(void)
> > > -{
> > > -       park.epoch = atomic_inc_return(&epoch);
> > > -       if (!atomic_read(&active)) {
> > > -               init_rcu_head(&park.rcu);
> > > -               call_rcu(&park.rcu, __i915_globals_grace);
> > > -       }
> > > -}
> > > -
> > > -static void __i915_globals_park(struct work_struct *work)
> > > -{
> > > -       destroy_rcu_head(&park.rcu);
> > > -
> > > -       /* Confirm nothing woke up in the last grace period */
> > > -       if (park.epoch != atomic_read(&epoch)) {
> > > -               __i915_globals_queue_rcu();
> > > -               return;
> > > -       }
> > > -
> > > -       clear_bit(PENDING, &park.flags);
> > > -       i915_globals_shrink();
> > > -}
> > > -
> > >  void __init i915_global_register(struct i915_global *global)
> > >  {
> > > -       GEM_BUG_ON(!global->shrink);
> > >         GEM_BUG_ON(!global->exit);
> > >
> > >         list_add_tail(&global->link, &globals);
> > > @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
> > >                 }
> > >         }
> > >
> > > -       INIT_DELAYED_WORK(&park.work, __i915_globals_park);
> > >         return 0;
> > >  }
> > >
> > > -void i915_globals_park(void)
> > > -{
> > > -       /*
> > > -        * Defer shrinking the global slab caches (and other work) until
> > > -        * after a RCU grace period has completed with no activity. This
> > > -        * is to try and reduce the latency impact on the consumers caused
> > > -        * by us shrinking the caches the same time as they are trying to
> > > -        * allocate, with the assumption being that if we idle long enough
> > > -        * for an RCU grace period to elapse since the last use, it is likely
> > > -        * to be longer until we need the caches again.
> > > -        */
> > > -       if (!atomic_dec_and_test(&active))
> > > -               return;
> > > -
> > > -       /* Queue cleanup after the next RCU grace period has freed slabs */
> > > -       if (!test_and_set_bit(PENDING, &park.flags))
> > > -               __i915_globals_queue_rcu();
> > > -}
> > > -
> > > -void i915_globals_unpark(void)
> > > -{
> > > -       atomic_inc(&epoch);
> > > -       atomic_inc(&active);
> > > -}
> > > -
> > > -static void __exit __i915_globals_flush(void)
> > > -{
> > > -       atomic_inc(&active); /* skip shrinking */
> > > -
> > > -       rcu_barrier(); /* wait for the work to be queued */
> > > -       flush_delayed_work(&park.work);
> > > -
> > > -       atomic_dec(&active);
> > > -}
> > > -
> > >  void __exit i915_globals_exit(void)
> > >  {
> > > -       GEM_BUG_ON(atomic_read(&active));
> > > -
> > > -       __i915_globals_flush();
> > >         __i915_globals_cleanup();
> > > -
> > > -       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> > > -       rcu_barrier();
> > >  }
> > > diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> > > index 2d199f411a4a..9e6b4fd07528 100644
> > > --- a/drivers/gpu/drm/i915/i915_globals.h
> > > +++ b/drivers/gpu/drm/i915/i915_globals.h
> > > @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
> > >  struct i915_global {
> > >         struct list_head link;
> > >
> > > -       i915_global_func_t shrink;
> > >         i915_global_func_t exit;
> > >  };
> > >
> > >  void i915_global_register(struct i915_global *global);
> > >
> > >  int i915_globals_init(void);
> > > -void i915_globals_park(void);
> > > -void i915_globals_unpark(void);
> > >  void i915_globals_exit(void);
> > >
> > >  /* constructors */
> > > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > > index 09ebea9a0090..d3de9f60e03a 100644
> > > --- a/drivers/gpu/drm/i915/i915_request.c
> > > +++ b/drivers/gpu/drm/i915/i915_request.c
> > > @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
> > >  #include "selftests/i915_request.c"
> > >  #endif
> > >
> > > -static void i915_global_request_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_execute_cbs);
> > > -       kmem_cache_shrink(global.slab_requests);
> > > -}
> > > -
> > >  static void i915_global_request_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_execute_cbs);
> > > @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
> > >  }
> > >
> > >  static struct i915_global_request global = { {
> > > -       .shrink = i915_global_request_shrink,
> > >         .exit = i915_global_request_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > > index 3a58a9130309..561c649e59f7 100644
> > > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > > @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
> > >         return sched_engine;
> > >  }
> > >
> > > -static void i915_global_scheduler_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_dependencies);
> > > -       kmem_cache_shrink(global.slab_priorities);
> > > -}
> > > -
> > >  static void i915_global_scheduler_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_dependencies);
> > > @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
> > >  }
> > >
> > >  static struct i915_global_scheduler global = { {
> > > -       .shrink = i915_global_scheduler_shrink,
> > >         .exit = i915_global_scheduler_exit,
> > >  } };
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> > > index 5b9dce0f443b..09a7c47926f7 100644
> > > --- a/drivers/gpu/drm/i915/i915_vma.c
> > > +++ b/drivers/gpu/drm/i915/i915_vma.c
> > > @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
> > >  #include "selftests/i915_vma.c"
> > >  #endif
> > >
> > > -static void i915_global_vma_shrink(void)
> > > -{
> > > -       kmem_cache_shrink(global.slab_vmas);
> > > -}
> > > -
> > >  static void i915_global_vma_exit(void)
> > >  {
> > >         kmem_cache_destroy(global.slab_vmas);
> > >  }
> > >
> > >  static struct i915_global_vma global = { {
> > > -       .shrink = i915_global_vma_shrink,
> > >         .exit = i915_global_vma_exit,
> > >  } };
> > >
> > > --
> > > 2.32.0
> > >
> 
> 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
                   ` (3 preceding siblings ...)
  2021-07-22  2:31 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
@ 2021-07-22 10:02 ` Tvrtko Ursulin
  2021-07-22 10:16   ` Daniel Vetter
  4 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-07-22 10:02 UTC (permalink / raw)
  To: Daniel Vetter, DRI Development
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development


On 21/07/2021 19:32, Daniel Vetter wrote:
> This essentially reverts
> 
> commit 84a1074920523430f9dc30ff907f4801b4820072
> Author: Chris Wilson <chris@chris-wilson.co.uk>
> Date:   Wed Jan 24 11:36:08 2018 +0000
> 
>      drm/i915: Shrink the GEM kmem_caches upon idling
> 
> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> then we need to fix that there, not hand-roll our own slab shrinking
> code in i915.

This is somewhat incomplete statement which ignores a couple of angles 
so I wish there was a bit more time to respond before steam rolling it 
in. :(

The removed code was not a hand rolled shrinker, but about managing slab 
sizes in face of bursty workloads. Core code does not know when i915 is 
active and when it is idle, so calling kmem_cache_shrink() after going 
idle wass supposed to help with house keeping by doing house keeping 
work outside of the latency sensitive phase.

To "fix" (improve really) it in core as you suggest, would need some 
method of signaling when a slab user feels is an opportunte moment to do 
this house keeping. And kmem_cache_shrink is just that so I don't see 
the problem.

Granted, argument kmem_cache_shrink is not much used is a valid one so 
discussion overall is definitely valid. Becuase on the higher level we 
could definitely talk about which workloads actually benefit from this 
code and how much which probably no one knows at this point.

But in general I think you needed to leave more time for discussion. 12 
hours is way too short.

Regards,

Tvrtko

> Noticed while reviewing a patch set from Jason to fix up some issues
> in our i915_init() and i915_exit() module load/cleanup code. Now that
> i915_globals.c isn't any different than normal init/exit functions, we
> should convert them over to one unified table and remove
> i915_globals.[hc] entirely.
> 
> Cc: David Airlie <airlied@linux.ie>
> Cc: Jason Ekstrand <jason@jlekstrand.net>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
>   drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
>   drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
>   drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
>   drivers/gpu/drm/i915/i915_active.c          |  6 --
>   drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
>   drivers/gpu/drm/i915/i915_globals.h         |  3 -
>   drivers/gpu/drm/i915/i915_request.c         |  7 --
>   drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
>   drivers/gpu/drm/i915/i915_vma.c             |  6 --
>   10 files changed, 146 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 7d6f52d8a801..bf2a2319353a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
>   #include "selftests/i915_gem_context.c"
>   #endif
>   
> -static void i915_global_gem_context_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_luts);
> -}
> -
>   static void i915_global_gem_context_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_luts);
>   }
>   
>   static struct i915_global_gem_context global = { {
> -	.shrink = i915_global_gem_context_shrink,
>   	.exit = i915_global_gem_context_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 9da7b288b7ed..5c21cff33199 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
>   	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
>   }
>   
> -static void i915_global_objects_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_objects);
> -}
> -
>   static void i915_global_objects_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_objects);
>   }
>   
>   static struct i915_global_object global = { {
> -	.shrink = i915_global_objects_shrink,
>   	.exit = i915_global_objects_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index bd63813c8a80..c1338441cc1d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
>   	i915_active_fini(&ce->active);
>   }
>   
> -static void i915_global_context_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_ce);
> -}
> -
>   static void i915_global_context_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_ce);
>   }
>   
>   static struct i915_global_context global = { {
> -	.shrink = i915_global_context_shrink,
>   	.exit = i915_global_context_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index aef3084e8b16..d86825437516 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
>   
>   	GT_TRACE(gt, "\n");
>   
> -	i915_globals_unpark();
> -
>   	/*
>   	 * It seems that the DMC likes to transition between the DC states a lot
>   	 * when there are no connected displays (no active power domains) during
> @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
>   	GEM_BUG_ON(!wakeref);
>   	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
>   
> -	i915_globals_park();
> -
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index b1aa1c482c32..91723123ae9f 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
>   #include "selftests/i915_active.c"
>   #endif
>   
> -static void i915_global_active_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_cache);
> -}
> -
>   static void i915_global_active_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_cache);
>   }
>   
>   static struct i915_global_active global = { {
> -	.shrink = i915_global_active_shrink,
>   	.exit = i915_global_active_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> index 77f1911c463b..7fe2e503897b 100644
> --- a/drivers/gpu/drm/i915/i915_globals.c
> +++ b/drivers/gpu/drm/i915/i915_globals.c
> @@ -17,61 +17,8 @@
>   
>   static LIST_HEAD(globals);
>   
> -static atomic_t active;
> -static atomic_t epoch;
> -static struct park_work {
> -	struct delayed_work work;
> -	struct rcu_head rcu;
> -	unsigned long flags;
> -#define PENDING 0
> -	int epoch;
> -} park;
> -
> -static void i915_globals_shrink(void)
> -{
> -	struct i915_global *global;
> -
> -	/*
> -	 * kmem_cache_shrink() discards empty slabs and reorders partially
> -	 * filled slabs to prioritise allocating from the mostly full slabs,
> -	 * with the aim of reducing fragmentation.
> -	 */
> -	list_for_each_entry(global, &globals, link)
> -		global->shrink();
> -}
> -
> -static void __i915_globals_grace(struct rcu_head *rcu)
> -{
> -	/* Ratelimit parking as shrinking is quite slow */
> -	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> -}
> -
> -static void __i915_globals_queue_rcu(void)
> -{
> -	park.epoch = atomic_inc_return(&epoch);
> -	if (!atomic_read(&active)) {
> -		init_rcu_head(&park.rcu);
> -		call_rcu(&park.rcu, __i915_globals_grace);
> -	}
> -}
> -
> -static void __i915_globals_park(struct work_struct *work)
> -{
> -	destroy_rcu_head(&park.rcu);
> -
> -	/* Confirm nothing woke up in the last grace period */
> -	if (park.epoch != atomic_read(&epoch)) {
> -		__i915_globals_queue_rcu();
> -		return;
> -	}
> -
> -	clear_bit(PENDING, &park.flags);
> -	i915_globals_shrink();
> -}
> -
>   void __init i915_global_register(struct i915_global *global)
>   {
> -	GEM_BUG_ON(!global->shrink);
>   	GEM_BUG_ON(!global->exit);
>   
>   	list_add_tail(&global->link, &globals);
> @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
>   		}
>   	}
>   
> -	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
>   	return 0;
>   }
>   
> -void i915_globals_park(void)
> -{
> -	/*
> -	 * Defer shrinking the global slab caches (and other work) until
> -	 * after a RCU grace period has completed with no activity. This
> -	 * is to try and reduce the latency impact on the consumers caused
> -	 * by us shrinking the caches the same time as they are trying to
> -	 * allocate, with the assumption being that if we idle long enough
> -	 * for an RCU grace period to elapse since the last use, it is likely
> -	 * to be longer until we need the caches again.
> -	 */
> -	if (!atomic_dec_and_test(&active))
> -		return;
> -
> -	/* Queue cleanup after the next RCU grace period has freed slabs */
> -	if (!test_and_set_bit(PENDING, &park.flags))
> -		__i915_globals_queue_rcu();
> -}
> -
> -void i915_globals_unpark(void)
> -{
> -	atomic_inc(&epoch);
> -	atomic_inc(&active);
> -}
> -
> -static void __exit __i915_globals_flush(void)
> -{
> -	atomic_inc(&active); /* skip shrinking */
> -
> -	rcu_barrier(); /* wait for the work to be queued */
> -	flush_delayed_work(&park.work);
> -
> -	atomic_dec(&active);
> -}
> -
>   void __exit i915_globals_exit(void)
>   {
> -	GEM_BUG_ON(atomic_read(&active));
> -
> -	__i915_globals_flush();
>   	__i915_globals_cleanup();
> -
> -	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> -	rcu_barrier();
>   }
> diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> index 2d199f411a4a..9e6b4fd07528 100644
> --- a/drivers/gpu/drm/i915/i915_globals.h
> +++ b/drivers/gpu/drm/i915/i915_globals.h
> @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
>   struct i915_global {
>   	struct list_head link;
>   
> -	i915_global_func_t shrink;
>   	i915_global_func_t exit;
>   };
>   
>   void i915_global_register(struct i915_global *global);
>   
>   int i915_globals_init(void);
> -void i915_globals_park(void);
> -void i915_globals_unpark(void);
>   void i915_globals_exit(void);
>   
>   /* constructors */
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 09ebea9a0090..d3de9f60e03a 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
>   #include "selftests/i915_request.c"
>   #endif
>   
> -static void i915_global_request_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_execute_cbs);
> -	kmem_cache_shrink(global.slab_requests);
> -}
> -
>   static void i915_global_request_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_execute_cbs);
> @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
>   }
>   
>   static struct i915_global_request global = { {
> -	.shrink = i915_global_request_shrink,
>   	.exit = i915_global_request_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 3a58a9130309..561c649e59f7 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
>   	return sched_engine;
>   }
>   
> -static void i915_global_scheduler_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_dependencies);
> -	kmem_cache_shrink(global.slab_priorities);
> -}
> -
>   static void i915_global_scheduler_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_dependencies);
> @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
>   }
>   
>   static struct i915_global_scheduler global = { {
> -	.shrink = i915_global_scheduler_shrink,
>   	.exit = i915_global_scheduler_exit,
>   } };
>   
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 5b9dce0f443b..09a7c47926f7 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
>   #include "selftests/i915_vma.c"
>   #endif
>   
> -static void i915_global_vma_shrink(void)
> -{
> -	kmem_cache_shrink(global.slab_vmas);
> -}
> -
>   static void i915_global_vma_exit(void)
>   {
>   	kmem_cache_destroy(global.slab_vmas);
>   }
>   
>   static struct i915_global_vma global = { {
> -	.shrink = i915_global_vma_shrink,
>   	.exit = i915_global_vma_exit,
>   } };
>   
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-22 10:02 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
@ 2021-07-22 10:16   ` Daniel Vetter
  2021-07-22 10:33     ` Tvrtko Ursulin
  0 siblings, 1 reply; 13+ messages in thread
From: Daniel Vetter @ 2021-07-22 10:16 UTC (permalink / raw)
  To: Tvrtko Ursulin
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development,
	DRI Development, Daniel Vetter

On Thu, Jul 22, 2021 at 11:02:55AM +0100, Tvrtko Ursulin wrote:
> On 21/07/2021 19:32, Daniel Vetter wrote:
> > This essentially reverts
> > 
> > commit 84a1074920523430f9dc30ff907f4801b4820072
> > Author: Chris Wilson <chris@chris-wilson.co.uk>
> > Date:   Wed Jan 24 11:36:08 2018 +0000
> > 
> >      drm/i915: Shrink the GEM kmem_caches upon idling
> > 
> > mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> > then we need to fix that there, not hand-roll our own slab shrinking
> > code in i915.
> 
> This is somewhat incomplete statement which ignores a couple of angles so I
> wish there was a bit more time to respond before steam rolling it in. :(
> 
> The removed code was not a hand rolled shrinker, but about managing slab
> sizes in face of bursty workloads. Core code does not know when i915 is
> active and when it is idle, so calling kmem_cache_shrink() after going idle
> wass supposed to help with house keeping by doing house keeping work outside
> of the latency sensitive phase.
> 
> To "fix" (improve really) it in core as you suggest, would need some method
> of signaling when a slab user feels is an opportunte moment to do this house
> keeping. And kmem_cache_shrink is just that so I don't see the problem.
> 
> Granted, argument kmem_cache_shrink is not much used is a valid one so
> discussion overall is definitely valid. Becuase on the higher level we could
> definitely talk about which workloads actually benefit from this code and
> how much which probably no one knows at this point.
> 
> But in general I think you needed to leave more time for discussion. 12
> hours is way too short.

It's 500+ users of kmem_cache_create vs i915 doing kmem_cache_shrink. And
I guarantee you there's slab users that churn through more allocations
than we do, and are more bursty.

An extraordinary claim like this needs extraordinary evidence. And then a
discussion with core mm/ folks so that we can figure out how to solve the
discovered problem best for the other 500+ users of slabs in-tree, so that
everyone benefits. Not just i915 gpu workloads.
-Daniel

> > Noticed while reviewing a patch set from Jason to fix up some issues
> > in our i915_init() and i915_exit() module load/cleanup code. Now that
> > i915_globals.c isn't any different than normal init/exit functions, we
> > should convert them over to one unified table and remove
> > i915_globals.[hc] entirely.
> > 
> > Cc: David Airlie <airlied@linux.ie>
> > Cc: Jason Ekstrand <jason@jlekstrand.net>
> > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
> >   drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
> >   drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
> >   drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
> >   drivers/gpu/drm/i915/i915_active.c          |  6 --
> >   drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
> >   drivers/gpu/drm/i915/i915_globals.h         |  3 -
> >   drivers/gpu/drm/i915/i915_request.c         |  7 --
> >   drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
> >   drivers/gpu/drm/i915/i915_vma.c             |  6 --
> >   10 files changed, 146 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > index 7d6f52d8a801..bf2a2319353a 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
> >   #include "selftests/i915_gem_context.c"
> >   #endif
> > -static void i915_global_gem_context_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_luts);
> > -}
> > -
> >   static void i915_global_gem_context_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_luts);
> >   }
> >   static struct i915_global_gem_context global = { {
> > -	.shrink = i915_global_gem_context_shrink,
> >   	.exit = i915_global_gem_context_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > index 9da7b288b7ed..5c21cff33199 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
> >   	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
> >   }
> > -static void i915_global_objects_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_objects);
> > -}
> > -
> >   static void i915_global_objects_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_objects);
> >   }
> >   static struct i915_global_object global = { {
> > -	.shrink = i915_global_objects_shrink,
> >   	.exit = i915_global_objects_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> > index bd63813c8a80..c1338441cc1d 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_context.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> > @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
> >   	i915_active_fini(&ce->active);
> >   }
> > -static void i915_global_context_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_ce);
> > -}
> > -
> >   static void i915_global_context_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_ce);
> >   }
> >   static struct i915_global_context global = { {
> > -	.shrink = i915_global_context_shrink,
> >   	.exit = i915_global_context_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > index aef3084e8b16..d86825437516 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> > @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
> >   	GT_TRACE(gt, "\n");
> > -	i915_globals_unpark();
> > -
> >   	/*
> >   	 * It seems that the DMC likes to transition between the DC states a lot
> >   	 * when there are no connected displays (no active power domains) during
> > @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
> >   	GEM_BUG_ON(!wakeref);
> >   	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
> > -	i915_globals_park();
> > -
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> > index b1aa1c482c32..91723123ae9f 100644
> > --- a/drivers/gpu/drm/i915/i915_active.c
> > +++ b/drivers/gpu/drm/i915/i915_active.c
> > @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
> >   #include "selftests/i915_active.c"
> >   #endif
> > -static void i915_global_active_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_cache);
> > -}
> > -
> >   static void i915_global_active_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_cache);
> >   }
> >   static struct i915_global_active global = { {
> > -	.shrink = i915_global_active_shrink,
> >   	.exit = i915_global_active_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> > index 77f1911c463b..7fe2e503897b 100644
> > --- a/drivers/gpu/drm/i915/i915_globals.c
> > +++ b/drivers/gpu/drm/i915/i915_globals.c
> > @@ -17,61 +17,8 @@
> >   static LIST_HEAD(globals);
> > -static atomic_t active;
> > -static atomic_t epoch;
> > -static struct park_work {
> > -	struct delayed_work work;
> > -	struct rcu_head rcu;
> > -	unsigned long flags;
> > -#define PENDING 0
> > -	int epoch;
> > -} park;
> > -
> > -static void i915_globals_shrink(void)
> > -{
> > -	struct i915_global *global;
> > -
> > -	/*
> > -	 * kmem_cache_shrink() discards empty slabs and reorders partially
> > -	 * filled slabs to prioritise allocating from the mostly full slabs,
> > -	 * with the aim of reducing fragmentation.
> > -	 */
> > -	list_for_each_entry(global, &globals, link)
> > -		global->shrink();
> > -}
> > -
> > -static void __i915_globals_grace(struct rcu_head *rcu)
> > -{
> > -	/* Ratelimit parking as shrinking is quite slow */
> > -	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> > -}
> > -
> > -static void __i915_globals_queue_rcu(void)
> > -{
> > -	park.epoch = atomic_inc_return(&epoch);
> > -	if (!atomic_read(&active)) {
> > -		init_rcu_head(&park.rcu);
> > -		call_rcu(&park.rcu, __i915_globals_grace);
> > -	}
> > -}
> > -
> > -static void __i915_globals_park(struct work_struct *work)
> > -{
> > -	destroy_rcu_head(&park.rcu);
> > -
> > -	/* Confirm nothing woke up in the last grace period */
> > -	if (park.epoch != atomic_read(&epoch)) {
> > -		__i915_globals_queue_rcu();
> > -		return;
> > -	}
> > -
> > -	clear_bit(PENDING, &park.flags);
> > -	i915_globals_shrink();
> > -}
> > -
> >   void __init i915_global_register(struct i915_global *global)
> >   {
> > -	GEM_BUG_ON(!global->shrink);
> >   	GEM_BUG_ON(!global->exit);
> >   	list_add_tail(&global->link, &globals);
> > @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
> >   		}
> >   	}
> > -	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
> >   	return 0;
> >   }
> > -void i915_globals_park(void)
> > -{
> > -	/*
> > -	 * Defer shrinking the global slab caches (and other work) until
> > -	 * after a RCU grace period has completed with no activity. This
> > -	 * is to try and reduce the latency impact on the consumers caused
> > -	 * by us shrinking the caches the same time as they are trying to
> > -	 * allocate, with the assumption being that if we idle long enough
> > -	 * for an RCU grace period to elapse since the last use, it is likely
> > -	 * to be longer until we need the caches again.
> > -	 */
> > -	if (!atomic_dec_and_test(&active))
> > -		return;
> > -
> > -	/* Queue cleanup after the next RCU grace period has freed slabs */
> > -	if (!test_and_set_bit(PENDING, &park.flags))
> > -		__i915_globals_queue_rcu();
> > -}
> > -
> > -void i915_globals_unpark(void)
> > -{
> > -	atomic_inc(&epoch);
> > -	atomic_inc(&active);
> > -}
> > -
> > -static void __exit __i915_globals_flush(void)
> > -{
> > -	atomic_inc(&active); /* skip shrinking */
> > -
> > -	rcu_barrier(); /* wait for the work to be queued */
> > -	flush_delayed_work(&park.work);
> > -
> > -	atomic_dec(&active);
> > -}
> > -
> >   void __exit i915_globals_exit(void)
> >   {
> > -	GEM_BUG_ON(atomic_read(&active));
> > -
> > -	__i915_globals_flush();
> >   	__i915_globals_cleanup();
> > -
> > -	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> > -	rcu_barrier();
> >   }
> > diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> > index 2d199f411a4a..9e6b4fd07528 100644
> > --- a/drivers/gpu/drm/i915/i915_globals.h
> > +++ b/drivers/gpu/drm/i915/i915_globals.h
> > @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
> >   struct i915_global {
> >   	struct list_head link;
> > -	i915_global_func_t shrink;
> >   	i915_global_func_t exit;
> >   };
> >   void i915_global_register(struct i915_global *global);
> >   int i915_globals_init(void);
> > -void i915_globals_park(void);
> > -void i915_globals_unpark(void);
> >   void i915_globals_exit(void);
> >   /* constructors */
> > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > index 09ebea9a0090..d3de9f60e03a 100644
> > --- a/drivers/gpu/drm/i915/i915_request.c
> > +++ b/drivers/gpu/drm/i915/i915_request.c
> > @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
> >   #include "selftests/i915_request.c"
> >   #endif
> > -static void i915_global_request_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_execute_cbs);
> > -	kmem_cache_shrink(global.slab_requests);
> > -}
> > -
> >   static void i915_global_request_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_execute_cbs);
> > @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
> >   }
> >   static struct i915_global_request global = { {
> > -	.shrink = i915_global_request_shrink,
> >   	.exit = i915_global_request_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 3a58a9130309..561c649e59f7 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
> >   	return sched_engine;
> >   }
> > -static void i915_global_scheduler_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_dependencies);
> > -	kmem_cache_shrink(global.slab_priorities);
> > -}
> > -
> >   static void i915_global_scheduler_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_dependencies);
> > @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
> >   }
> >   static struct i915_global_scheduler global = { {
> > -	.shrink = i915_global_scheduler_shrink,
> >   	.exit = i915_global_scheduler_exit,
> >   } };
> > diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> > index 5b9dce0f443b..09a7c47926f7 100644
> > --- a/drivers/gpu/drm/i915/i915_vma.c
> > +++ b/drivers/gpu/drm/i915/i915_vma.c
> > @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
> >   #include "selftests/i915_vma.c"
> >   #endif
> > -static void i915_global_vma_shrink(void)
> > -{
> > -	kmem_cache_shrink(global.slab_vmas);
> > -}
> > -
> >   static void i915_global_vma_exit(void)
> >   {
> >   	kmem_cache_destroy(global.slab_vmas);
> >   }
> >   static struct i915_global_vma global = { {
> > -	.shrink = i915_global_vma_shrink,
> >   	.exit = i915_global_vma_exit,
> >   } };
> > 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-22 10:16   ` Daniel Vetter
@ 2021-07-22 10:33     ` Tvrtko Ursulin
  2021-07-22 13:37       ` Jason Ekstrand
  0 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-07-22 10:33 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development,
	DRI Development, Daniel Vetter


On 22/07/2021 11:16, Daniel Vetter wrote:
> On Thu, Jul 22, 2021 at 11:02:55AM +0100, Tvrtko Ursulin wrote:
>> On 21/07/2021 19:32, Daniel Vetter wrote:
>>> This essentially reverts
>>>
>>> commit 84a1074920523430f9dc30ff907f4801b4820072
>>> Author: Chris Wilson <chris@chris-wilson.co.uk>
>>> Date:   Wed Jan 24 11:36:08 2018 +0000
>>>
>>>       drm/i915: Shrink the GEM kmem_caches upon idling
>>>
>>> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
>>> then we need to fix that there, not hand-roll our own slab shrinking
>>> code in i915.
>>
>> This is somewhat incomplete statement which ignores a couple of angles so I
>> wish there was a bit more time to respond before steam rolling it in. :(
>>
>> The removed code was not a hand rolled shrinker, but about managing slab
>> sizes in face of bursty workloads. Core code does not know when i915 is
>> active and when it is idle, so calling kmem_cache_shrink() after going idle
>> wass supposed to help with house keeping by doing house keeping work outside
>> of the latency sensitive phase.
>>
>> To "fix" (improve really) it in core as you suggest, would need some method
>> of signaling when a slab user feels is an opportunte moment to do this house
>> keeping. And kmem_cache_shrink is just that so I don't see the problem.
>>
>> Granted, argument kmem_cache_shrink is not much used is a valid one so
>> discussion overall is definitely valid. Becuase on the higher level we could
>> definitely talk about which workloads actually benefit from this code and
>> how much which probably no one knows at this point.
>>
>> But in general I think you needed to leave more time for discussion. 12
>> hours is way too short.
> 
> It's 500+ users of kmem_cache_create vs i915 doing kmem_cache_shrink. And

There are two other callers for the record. ;)

> I guarantee you there's slab users that churn through more allocations
> than we do, and are more bursty.

I wasn't disputing that.

> An extraordinary claim like this needs extraordinary evidence. And then a
> discussion with core mm/ folks so that we can figure out how to solve the
> discovered problem best for the other 500+ users of slabs in-tree, so that
> everyone benefits. Not just i915 gpu workloads.

Yep, not disputing that either. Noticed I wrote it was a valid argument?

But discussion with mm folks could also have happened before you steam 
rolled the "revert" in though. Perhaps tey would have said 
kmem_cache_shrink is the way. Or maybe it isn't. Or maybe they would 
have said meh. I just don't see how the rush was justified given the 
code in question.

Regards,

Tvrtko

> -Daniel
> 
>>> Noticed while reviewing a patch set from Jason to fix up some issues
>>> in our i915_init() and i915_exit() module load/cleanup code. Now that
>>> i915_globals.c isn't any different than normal init/exit functions, we
>>> should convert them over to one unified table and remove
>>> i915_globals.[hc] entirely.
>>>
>>> Cc: David Airlie <airlied@linux.ie>
>>> Cc: Jason Ekstrand <jason@jlekstrand.net>
>>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>>> ---
>>>    drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
>>>    drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
>>>    drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
>>>    drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
>>>    drivers/gpu/drm/i915/i915_active.c          |  6 --
>>>    drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
>>>    drivers/gpu/drm/i915/i915_globals.h         |  3 -
>>>    drivers/gpu/drm/i915/i915_request.c         |  7 --
>>>    drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
>>>    drivers/gpu/drm/i915/i915_vma.c             |  6 --
>>>    10 files changed, 146 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>>> index 7d6f52d8a801..bf2a2319353a 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>>> @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
>>>    #include "selftests/i915_gem_context.c"
>>>    #endif
>>> -static void i915_global_gem_context_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_luts);
>>> -}
>>> -
>>>    static void i915_global_gem_context_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_luts);
>>>    }
>>>    static struct i915_global_gem_context global = { {
>>> -	.shrink = i915_global_gem_context_shrink,
>>>    	.exit = i915_global_gem_context_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> index 9da7b288b7ed..5c21cff33199 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>> @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
>>>    	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
>>>    }
>>> -static void i915_global_objects_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_objects);
>>> -}
>>> -
>>>    static void i915_global_objects_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_objects);
>>>    }
>>>    static struct i915_global_object global = { {
>>> -	.shrink = i915_global_objects_shrink,
>>>    	.exit = i915_global_objects_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
>>> index bd63813c8a80..c1338441cc1d 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_context.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
>>> @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
>>>    	i915_active_fini(&ce->active);
>>>    }
>>> -static void i915_global_context_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_ce);
>>> -}
>>> -
>>>    static void i915_global_context_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_ce);
>>>    }
>>>    static struct i915_global_context global = { {
>>> -	.shrink = i915_global_context_shrink,
>>>    	.exit = i915_global_context_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>>> index aef3084e8b16..d86825437516 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>>> @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
>>>    	GT_TRACE(gt, "\n");
>>> -	i915_globals_unpark();
>>> -
>>>    	/*
>>>    	 * It seems that the DMC likes to transition between the DC states a lot
>>>    	 * when there are no connected displays (no active power domains) during
>>> @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
>>>    	GEM_BUG_ON(!wakeref);
>>>    	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
>>> -	i915_globals_park();
>>> -
>>>    	return 0;
>>>    }
>>> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
>>> index b1aa1c482c32..91723123ae9f 100644
>>> --- a/drivers/gpu/drm/i915/i915_active.c
>>> +++ b/drivers/gpu/drm/i915/i915_active.c
>>> @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
>>>    #include "selftests/i915_active.c"
>>>    #endif
>>> -static void i915_global_active_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_cache);
>>> -}
>>> -
>>>    static void i915_global_active_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_cache);
>>>    }
>>>    static struct i915_global_active global = { {
>>> -	.shrink = i915_global_active_shrink,
>>>    	.exit = i915_global_active_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
>>> index 77f1911c463b..7fe2e503897b 100644
>>> --- a/drivers/gpu/drm/i915/i915_globals.c
>>> +++ b/drivers/gpu/drm/i915/i915_globals.c
>>> @@ -17,61 +17,8 @@
>>>    static LIST_HEAD(globals);
>>> -static atomic_t active;
>>> -static atomic_t epoch;
>>> -static struct park_work {
>>> -	struct delayed_work work;
>>> -	struct rcu_head rcu;
>>> -	unsigned long flags;
>>> -#define PENDING 0
>>> -	int epoch;
>>> -} park;
>>> -
>>> -static void i915_globals_shrink(void)
>>> -{
>>> -	struct i915_global *global;
>>> -
>>> -	/*
>>> -	 * kmem_cache_shrink() discards empty slabs and reorders partially
>>> -	 * filled slabs to prioritise allocating from the mostly full slabs,
>>> -	 * with the aim of reducing fragmentation.
>>> -	 */
>>> -	list_for_each_entry(global, &globals, link)
>>> -		global->shrink();
>>> -}
>>> -
>>> -static void __i915_globals_grace(struct rcu_head *rcu)
>>> -{
>>> -	/* Ratelimit parking as shrinking is quite slow */
>>> -	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
>>> -}
>>> -
>>> -static void __i915_globals_queue_rcu(void)
>>> -{
>>> -	park.epoch = atomic_inc_return(&epoch);
>>> -	if (!atomic_read(&active)) {
>>> -		init_rcu_head(&park.rcu);
>>> -		call_rcu(&park.rcu, __i915_globals_grace);
>>> -	}
>>> -}
>>> -
>>> -static void __i915_globals_park(struct work_struct *work)
>>> -{
>>> -	destroy_rcu_head(&park.rcu);
>>> -
>>> -	/* Confirm nothing woke up in the last grace period */
>>> -	if (park.epoch != atomic_read(&epoch)) {
>>> -		__i915_globals_queue_rcu();
>>> -		return;
>>> -	}
>>> -
>>> -	clear_bit(PENDING, &park.flags);
>>> -	i915_globals_shrink();
>>> -}
>>> -
>>>    void __init i915_global_register(struct i915_global *global)
>>>    {
>>> -	GEM_BUG_ON(!global->shrink);
>>>    	GEM_BUG_ON(!global->exit);
>>>    	list_add_tail(&global->link, &globals);
>>> @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
>>>    		}
>>>    	}
>>> -	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
>>>    	return 0;
>>>    }
>>> -void i915_globals_park(void)
>>> -{
>>> -	/*
>>> -	 * Defer shrinking the global slab caches (and other work) until
>>> -	 * after a RCU grace period has completed with no activity. This
>>> -	 * is to try and reduce the latency impact on the consumers caused
>>> -	 * by us shrinking the caches the same time as they are trying to
>>> -	 * allocate, with the assumption being that if we idle long enough
>>> -	 * for an RCU grace period to elapse since the last use, it is likely
>>> -	 * to be longer until we need the caches again.
>>> -	 */
>>> -	if (!atomic_dec_and_test(&active))
>>> -		return;
>>> -
>>> -	/* Queue cleanup after the next RCU grace period has freed slabs */
>>> -	if (!test_and_set_bit(PENDING, &park.flags))
>>> -		__i915_globals_queue_rcu();
>>> -}
>>> -
>>> -void i915_globals_unpark(void)
>>> -{
>>> -	atomic_inc(&epoch);
>>> -	atomic_inc(&active);
>>> -}
>>> -
>>> -static void __exit __i915_globals_flush(void)
>>> -{
>>> -	atomic_inc(&active); /* skip shrinking */
>>> -
>>> -	rcu_barrier(); /* wait for the work to be queued */
>>> -	flush_delayed_work(&park.work);
>>> -
>>> -	atomic_dec(&active);
>>> -}
>>> -
>>>    void __exit i915_globals_exit(void)
>>>    {
>>> -	GEM_BUG_ON(atomic_read(&active));
>>> -
>>> -	__i915_globals_flush();
>>>    	__i915_globals_cleanup();
>>> -
>>> -	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
>>> -	rcu_barrier();
>>>    }
>>> diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
>>> index 2d199f411a4a..9e6b4fd07528 100644
>>> --- a/drivers/gpu/drm/i915/i915_globals.h
>>> +++ b/drivers/gpu/drm/i915/i915_globals.h
>>> @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
>>>    struct i915_global {
>>>    	struct list_head link;
>>> -	i915_global_func_t shrink;
>>>    	i915_global_func_t exit;
>>>    };
>>>    void i915_global_register(struct i915_global *global);
>>>    int i915_globals_init(void);
>>> -void i915_globals_park(void);
>>> -void i915_globals_unpark(void);
>>>    void i915_globals_exit(void);
>>>    /* constructors */
>>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
>>> index 09ebea9a0090..d3de9f60e03a 100644
>>> --- a/drivers/gpu/drm/i915/i915_request.c
>>> +++ b/drivers/gpu/drm/i915/i915_request.c
>>> @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
>>>    #include "selftests/i915_request.c"
>>>    #endif
>>> -static void i915_global_request_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_execute_cbs);
>>> -	kmem_cache_shrink(global.slab_requests);
>>> -}
>>> -
>>>    static void i915_global_request_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_execute_cbs);
>>> @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
>>>    }
>>>    static struct i915_global_request global = { {
>>> -	.shrink = i915_global_request_shrink,
>>>    	.exit = i915_global_request_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>>> index 3a58a9130309..561c649e59f7 100644
>>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
>>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>>> @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
>>>    	return sched_engine;
>>>    }
>>> -static void i915_global_scheduler_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_dependencies);
>>> -	kmem_cache_shrink(global.slab_priorities);
>>> -}
>>> -
>>>    static void i915_global_scheduler_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_dependencies);
>>> @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
>>>    }
>>>    static struct i915_global_scheduler global = { {
>>> -	.shrink = i915_global_scheduler_shrink,
>>>    	.exit = i915_global_scheduler_exit,
>>>    } };
>>> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>>> index 5b9dce0f443b..09a7c47926f7 100644
>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>> @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
>>>    #include "selftests/i915_vma.c"
>>>    #endif
>>> -static void i915_global_vma_shrink(void)
>>> -{
>>> -	kmem_cache_shrink(global.slab_vmas);
>>> -}
>>> -
>>>    static void i915_global_vma_exit(void)
>>>    {
>>>    	kmem_cache_destroy(global.slab_vmas);
>>>    }
>>>    static struct i915_global_vma global = { {
>>> -	.shrink = i915_global_vma_shrink,
>>>    	.exit = i915_global_vma_exit,
>>>    } };
>>>
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-22 10:33     ` Tvrtko Ursulin
@ 2021-07-22 13:37       ` Jason Ekstrand
  2021-07-22 14:02         ` Tvrtko Ursulin
  0 siblings, 1 reply; 13+ messages in thread
From: Jason Ekstrand @ 2021-07-22 13:37 UTC (permalink / raw)
  To: Tvrtko Ursulin
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development,
	DRI Development, Daniel Vetter

On Thu, Jul 22, 2021 at 5:34 AM Tvrtko Ursulin
<tvrtko.ursulin@linux.intel.com> wrote:
> On 22/07/2021 11:16, Daniel Vetter wrote:
> > On Thu, Jul 22, 2021 at 11:02:55AM +0100, Tvrtko Ursulin wrote:
> >> On 21/07/2021 19:32, Daniel Vetter wrote:
> >>> This essentially reverts
> >>>
> >>> commit 84a1074920523430f9dc30ff907f4801b4820072
> >>> Author: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Date:   Wed Jan 24 11:36:08 2018 +0000
> >>>
> >>>       drm/i915: Shrink the GEM kmem_caches upon idling
> >>>
> >>> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
> >>> then we need to fix that there, not hand-roll our own slab shrinking
> >>> code in i915.
> >>
> >> This is somewhat incomplete statement which ignores a couple of angles so I
> >> wish there was a bit more time to respond before steam rolling it in. :(
> >>
> >> The removed code was not a hand rolled shrinker, but about managing slab
> >> sizes in face of bursty workloads. Core code does not know when i915 is
> >> active and when it is idle, so calling kmem_cache_shrink() after going idle
> >> wass supposed to help with house keeping by doing house keeping work outside
> >> of the latency sensitive phase.
> >>
> >> To "fix" (improve really) it in core as you suggest, would need some method
> >> of signaling when a slab user feels is an opportunte moment to do this house
> >> keeping. And kmem_cache_shrink is just that so I don't see the problem.
> >>
> >> Granted, argument kmem_cache_shrink is not much used is a valid one so
> >> discussion overall is definitely valid. Becuase on the higher level we could
> >> definitely talk about which workloads actually benefit from this code and
> >> how much which probably no one knows at this point.

Pardon me for being a bit curt here, but that discussion should have
happened 3.5 years ago when this landed.  The entire justification we
have on record for this change is, "When we finally decide the gpu is
idle, that is a good time to shrink our kmem_caches."  We have no
record of any workloads which benefit from this and no recorded way to
reproduce any supposed benefits, even if it requires a microbenchmark.
But we added over 100 lines of code for it anyway, including a bunch
of hand-rolled RCU juggling.  Ripping out unjustified complexity is
almost always justified, IMO.  The burden of proof here isn't on
Daniel to show he isn't regressing anything but it was on you and
Chris to show that complexity was worth something back in 2018 when
this landed.

--Jason


> >> But in general I think you needed to leave more time for discussion. 12
> >> hours is way too short.
> >
> > It's 500+ users of kmem_cache_create vs i915 doing kmem_cache_shrink. And
>
> There are two other callers for the record. ;)
>
> > I guarantee you there's slab users that churn through more allocations
> > than we do, and are more bursty.
>
> I wasn't disputing that.
>
> > An extraordinary claim like this needs extraordinary evidence. And then a
> > discussion with core mm/ folks so that we can figure out how to solve the
> > discovered problem best for the other 500+ users of slabs in-tree, so that
> > everyone benefits. Not just i915 gpu workloads.
>
> Yep, not disputing that either. Noticed I wrote it was a valid argument?
>
> But discussion with mm folks could also have happened before you steam
> rolled the "revert" in though. Perhaps tey would have said
> kmem_cache_shrink is the way. Or maybe it isn't. Or maybe they would
> have said meh. I just don't see how the rush was justified given the
> code in question.
>
> Regards,
>
> Tvrtko
>
> > -Daniel
> >
> >>> Noticed while reviewing a patch set from Jason to fix up some issues
> >>> in our i915_init() and i915_exit() module load/cleanup code. Now that
> >>> i915_globals.c isn't any different than normal init/exit functions, we
> >>> should convert them over to one unified table and remove
> >>> i915_globals.[hc] entirely.
> >>>
> >>> Cc: David Airlie <airlied@linux.ie>
> >>> Cc: Jason Ekstrand <jason@jlekstrand.net>
> >>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> >>> ---
> >>>    drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
> >>>    drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
> >>>    drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
> >>>    drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
> >>>    drivers/gpu/drm/i915/i915_active.c          |  6 --
> >>>    drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
> >>>    drivers/gpu/drm/i915/i915_globals.h         |  3 -
> >>>    drivers/gpu/drm/i915/i915_request.c         |  7 --
> >>>    drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
> >>>    drivers/gpu/drm/i915/i915_vma.c             |  6 --
> >>>    10 files changed, 146 deletions(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>> index 7d6f52d8a801..bf2a2319353a 100644
> >>> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>> @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
> >>>    #include "selftests/i915_gem_context.c"
> >>>    #endif
> >>> -static void i915_global_gem_context_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_luts);
> >>> -}
> >>> -
> >>>    static void i915_global_gem_context_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_luts);
> >>>    }
> >>>    static struct i915_global_gem_context global = { {
> >>> -   .shrink = i915_global_gem_context_shrink,
> >>>     .exit = i915_global_gem_context_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> >>> index 9da7b288b7ed..5c21cff33199 100644
> >>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> >>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> >>> @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
> >>>     INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
> >>>    }
> >>> -static void i915_global_objects_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_objects);
> >>> -}
> >>> -
> >>>    static void i915_global_objects_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_objects);
> >>>    }
> >>>    static struct i915_global_object global = { {
> >>> -   .shrink = i915_global_objects_shrink,
> >>>     .exit = i915_global_objects_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> >>> index bd63813c8a80..c1338441cc1d 100644
> >>> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> >>> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> >>> @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
> >>>     i915_active_fini(&ce->active);
> >>>    }
> >>> -static void i915_global_context_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_ce);
> >>> -}
> >>> -
> >>>    static void i915_global_context_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_ce);
> >>>    }
> >>>    static struct i915_global_context global = { {
> >>> -   .shrink = i915_global_context_shrink,
> >>>     .exit = i915_global_context_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> >>> index aef3084e8b16..d86825437516 100644
> >>> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> >>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> >>> @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
> >>>     GT_TRACE(gt, "\n");
> >>> -   i915_globals_unpark();
> >>> -
> >>>     /*
> >>>      * It seems that the DMC likes to transition between the DC states a lot
> >>>      * when there are no connected displays (no active power domains) during
> >>> @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
> >>>     GEM_BUG_ON(!wakeref);
> >>>     intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
> >>> -   i915_globals_park();
> >>> -
> >>>     return 0;
> >>>    }
> >>> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> >>> index b1aa1c482c32..91723123ae9f 100644
> >>> --- a/drivers/gpu/drm/i915/i915_active.c
> >>> +++ b/drivers/gpu/drm/i915/i915_active.c
> >>> @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
> >>>    #include "selftests/i915_active.c"
> >>>    #endif
> >>> -static void i915_global_active_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_cache);
> >>> -}
> >>> -
> >>>    static void i915_global_active_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_cache);
> >>>    }
> >>>    static struct i915_global_active global = { {
> >>> -   .shrink = i915_global_active_shrink,
> >>>     .exit = i915_global_active_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> >>> index 77f1911c463b..7fe2e503897b 100644
> >>> --- a/drivers/gpu/drm/i915/i915_globals.c
> >>> +++ b/drivers/gpu/drm/i915/i915_globals.c
> >>> @@ -17,61 +17,8 @@
> >>>    static LIST_HEAD(globals);
> >>> -static atomic_t active;
> >>> -static atomic_t epoch;
> >>> -static struct park_work {
> >>> -   struct delayed_work work;
> >>> -   struct rcu_head rcu;
> >>> -   unsigned long flags;
> >>> -#define PENDING 0
> >>> -   int epoch;
> >>> -} park;
> >>> -
> >>> -static void i915_globals_shrink(void)
> >>> -{
> >>> -   struct i915_global *global;
> >>> -
> >>> -   /*
> >>> -    * kmem_cache_shrink() discards empty slabs and reorders partially
> >>> -    * filled slabs to prioritise allocating from the mostly full slabs,
> >>> -    * with the aim of reducing fragmentation.
> >>> -    */
> >>> -   list_for_each_entry(global, &globals, link)
> >>> -           global->shrink();
> >>> -}
> >>> -
> >>> -static void __i915_globals_grace(struct rcu_head *rcu)
> >>> -{
> >>> -   /* Ratelimit parking as shrinking is quite slow */
> >>> -   schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
> >>> -}
> >>> -
> >>> -static void __i915_globals_queue_rcu(void)
> >>> -{
> >>> -   park.epoch = atomic_inc_return(&epoch);
> >>> -   if (!atomic_read(&active)) {
> >>> -           init_rcu_head(&park.rcu);
> >>> -           call_rcu(&park.rcu, __i915_globals_grace);
> >>> -   }
> >>> -}
> >>> -
> >>> -static void __i915_globals_park(struct work_struct *work)
> >>> -{
> >>> -   destroy_rcu_head(&park.rcu);
> >>> -
> >>> -   /* Confirm nothing woke up in the last grace period */
> >>> -   if (park.epoch != atomic_read(&epoch)) {
> >>> -           __i915_globals_queue_rcu();
> >>> -           return;
> >>> -   }
> >>> -
> >>> -   clear_bit(PENDING, &park.flags);
> >>> -   i915_globals_shrink();
> >>> -}
> >>> -
> >>>    void __init i915_global_register(struct i915_global *global)
> >>>    {
> >>> -   GEM_BUG_ON(!global->shrink);
> >>>     GEM_BUG_ON(!global->exit);
> >>>     list_add_tail(&global->link, &globals);
> >>> @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
> >>>             }
> >>>     }
> >>> -   INIT_DELAYED_WORK(&park.work, __i915_globals_park);
> >>>     return 0;
> >>>    }
> >>> -void i915_globals_park(void)
> >>> -{
> >>> -   /*
> >>> -    * Defer shrinking the global slab caches (and other work) until
> >>> -    * after a RCU grace period has completed with no activity. This
> >>> -    * is to try and reduce the latency impact on the consumers caused
> >>> -    * by us shrinking the caches the same time as they are trying to
> >>> -    * allocate, with the assumption being that if we idle long enough
> >>> -    * for an RCU grace period to elapse since the last use, it is likely
> >>> -    * to be longer until we need the caches again.
> >>> -    */
> >>> -   if (!atomic_dec_and_test(&active))
> >>> -           return;
> >>> -
> >>> -   /* Queue cleanup after the next RCU grace period has freed slabs */
> >>> -   if (!test_and_set_bit(PENDING, &park.flags))
> >>> -           __i915_globals_queue_rcu();
> >>> -}
> >>> -
> >>> -void i915_globals_unpark(void)
> >>> -{
> >>> -   atomic_inc(&epoch);
> >>> -   atomic_inc(&active);
> >>> -}
> >>> -
> >>> -static void __exit __i915_globals_flush(void)
> >>> -{
> >>> -   atomic_inc(&active); /* skip shrinking */
> >>> -
> >>> -   rcu_barrier(); /* wait for the work to be queued */
> >>> -   flush_delayed_work(&park.work);
> >>> -
> >>> -   atomic_dec(&active);
> >>> -}
> >>> -
> >>>    void __exit i915_globals_exit(void)
> >>>    {
> >>> -   GEM_BUG_ON(atomic_read(&active));
> >>> -
> >>> -   __i915_globals_flush();
> >>>     __i915_globals_cleanup();
> >>> -
> >>> -   /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
> >>> -   rcu_barrier();
> >>>    }
> >>> diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
> >>> index 2d199f411a4a..9e6b4fd07528 100644
> >>> --- a/drivers/gpu/drm/i915/i915_globals.h
> >>> +++ b/drivers/gpu/drm/i915/i915_globals.h
> >>> @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
> >>>    struct i915_global {
> >>>     struct list_head link;
> >>> -   i915_global_func_t shrink;
> >>>     i915_global_func_t exit;
> >>>    };
> >>>    void i915_global_register(struct i915_global *global);
> >>>    int i915_globals_init(void);
> >>> -void i915_globals_park(void);
> >>> -void i915_globals_unpark(void);
> >>>    void i915_globals_exit(void);
> >>>    /* constructors */
> >>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> >>> index 09ebea9a0090..d3de9f60e03a 100644
> >>> --- a/drivers/gpu/drm/i915/i915_request.c
> >>> +++ b/drivers/gpu/drm/i915/i915_request.c
> >>> @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
> >>>    #include "selftests/i915_request.c"
> >>>    #endif
> >>> -static void i915_global_request_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_execute_cbs);
> >>> -   kmem_cache_shrink(global.slab_requests);
> >>> -}
> >>> -
> >>>    static void i915_global_request_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_execute_cbs);
> >>> @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
> >>>    }
> >>>    static struct i915_global_request global = { {
> >>> -   .shrink = i915_global_request_shrink,
> >>>     .exit = i915_global_request_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> >>> index 3a58a9130309..561c649e59f7 100644
> >>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> >>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> >>> @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
> >>>     return sched_engine;
> >>>    }
> >>> -static void i915_global_scheduler_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_dependencies);
> >>> -   kmem_cache_shrink(global.slab_priorities);
> >>> -}
> >>> -
> >>>    static void i915_global_scheduler_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_dependencies);
> >>> @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
> >>>    }
> >>>    static struct i915_global_scheduler global = { {
> >>> -   .shrink = i915_global_scheduler_shrink,
> >>>     .exit = i915_global_scheduler_exit,
> >>>    } };
> >>> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> >>> index 5b9dce0f443b..09a7c47926f7 100644
> >>> --- a/drivers/gpu/drm/i915/i915_vma.c
> >>> +++ b/drivers/gpu/drm/i915/i915_vma.c
> >>> @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
> >>>    #include "selftests/i915_vma.c"
> >>>    #endif
> >>> -static void i915_global_vma_shrink(void)
> >>> -{
> >>> -   kmem_cache_shrink(global.slab_vmas);
> >>> -}
> >>> -
> >>>    static void i915_global_vma_exit(void)
> >>>    {
> >>>     kmem_cache_destroy(global.slab_vmas);
> >>>    }
> >>>    static struct i915_global_vma global = { {
> >>> -   .shrink = i915_global_vma_shrink,
> >>>     .exit = i915_global_vma_exit,
> >>>    } };
> >>>
> >
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-22 13:37       ` Jason Ekstrand
@ 2021-07-22 14:02         ` Tvrtko Ursulin
  0 siblings, 0 replies; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-07-22 14:02 UTC (permalink / raw)
  To: Jason Ekstrand
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development,
	DRI Development, Daniel Vetter


On 22/07/2021 14:37, Jason Ekstrand wrote:
> On Thu, Jul 22, 2021 at 5:34 AM Tvrtko Ursulin
> <tvrtko.ursulin@linux.intel.com> wrote:
>> On 22/07/2021 11:16, Daniel Vetter wrote:
>>> On Thu, Jul 22, 2021 at 11:02:55AM +0100, Tvrtko Ursulin wrote:
>>>> On 21/07/2021 19:32, Daniel Vetter wrote:
>>>>> This essentially reverts
>>>>>
>>>>> commit 84a1074920523430f9dc30ff907f4801b4820072
>>>>> Author: Chris Wilson <chris@chris-wilson.co.uk>
>>>>> Date:   Wed Jan 24 11:36:08 2018 +0000
>>>>>
>>>>>        drm/i915: Shrink the GEM kmem_caches upon idling
>>>>>
>>>>> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
>>>>> then we need to fix that there, not hand-roll our own slab shrinking
>>>>> code in i915.
>>>>
>>>> This is somewhat incomplete statement which ignores a couple of angles so I
>>>> wish there was a bit more time to respond before steam rolling it in. :(
>>>>
>>>> The removed code was not a hand rolled shrinker, but about managing slab
>>>> sizes in face of bursty workloads. Core code does not know when i915 is
>>>> active and when it is idle, so calling kmem_cache_shrink() after going idle
>>>> wass supposed to help with house keeping by doing house keeping work outside
>>>> of the latency sensitive phase.
>>>>
>>>> To "fix" (improve really) it in core as you suggest, would need some method
>>>> of signaling when a slab user feels is an opportunte moment to do this house
>>>> keeping. And kmem_cache_shrink is just that so I don't see the problem.
>>>>
>>>> Granted, argument kmem_cache_shrink is not much used is a valid one so
>>>> discussion overall is definitely valid. Becuase on the higher level we could
>>>> definitely talk about which workloads actually benefit from this code and
>>>> how much which probably no one knows at this point.
> 
> Pardon me for being a bit curt here, but that discussion should have
> happened 3.5 years ago when this landed.  The entire justification we
> have on record for this change is, "When we finally decide the gpu is
> idle, that is a good time to shrink our kmem_caches."  We have no
> record of any workloads which benefit from this and no recorded way to
> reproduce any supposed benefits, even if it requires a microbenchmark.
> But we added over 100 lines of code for it anyway, including a bunch
> of hand-rolled RCU juggling.  Ripping out unjustified complexity is
> almost always justified, IMO.  The burden of proof here isn't on
> Daniel to show he isn't regressing anything but it was on you and
> Chris to show that complexity was worth something back in 2018 when
> this landed.

It feels like there is so much knee-jerk when looking at code added by 
Chris which often results in not reading properly what I wrote.

For instance I did not ask for any proof of no regressions, neither I 
claimed any regressions. In fact I said clearly that at this point it is 
not known what benefited from it. Statement at the time wasn't clear so 
you would need to ask Chris whether he remembers any better than what I 
can find in mailing list archives. Heck I even said the argument to 
remove is completely valid..

Point is, process used to be more inclusive and IMO there is no 
technical justification to fast track this type of change. Compared to 
other work in progress there was approaching zero maintenance cost with 
this.

Besides, mm folks may still say that it is good hygiene to tidy own 
slabs at opportune moments. Maybe it is a stretch but we don't know if 
we don't ask. There are certainly online references to slab reclaim 
being problematic in the past. There was nothing urgent in this "revert" 
which couldn't have waited a bit longer, or at least _some_ of the 
involved people copied.

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure
  2021-07-21 20:17 ` Jason Ekstrand
  2021-07-21 20:24   ` Daniel Vetter
@ 2021-07-23 15:42   ` Tvrtko Ursulin
  1 sibling, 0 replies; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-07-23 15:42 UTC (permalink / raw)
  To: Jason Ekstrand, Daniel Vetter
  Cc: David Airlie, Daniel Vetter, Intel Graphics Development, DRI Development


On 21/07/2021 21:17, Jason Ekstrand wrote:
> On Wed, Jul 21, 2021 at 1:32 PM Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
>>
>> This essentially reverts
>>
>> commit 84a1074920523430f9dc30ff907f4801b4820072
>> Author: Chris Wilson <chris@chris-wilson.co.uk>
>> Date:   Wed Jan 24 11:36:08 2018 +0000
>>
>>      drm/i915: Shrink the GEM kmem_caches upon idling
>>
>> mm/vmscan.c:do_shrink_slab() is a thing, if there's an issue with it
>> then we need to fix that there, not hand-roll our own slab shrinking
>> code in i915.
>>
>> Noticed while reviewing a patch set from Jason to fix up some issues
>> in our i915_init() and i915_exit() module load/cleanup code. Now that
>> i915_globals.c isn't any different than normal init/exit functions, we
>> should convert them over to one unified table and remove
>> i915_globals.[hc] entirely.
> 
> Mind throwing in a comment somewhere about how i915 is one of only two
> users of kmem_cache_shrink() in the entire kernel?  That also seems to
> be pretty good evidence that it's not useful.
> 
> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>

FYI guys I've copied you on an thread with an mm expert which is 
suggesting calling kmem_cache_shrink() is in fact probably a good idea 
if it can be reasonably done.

Since that means the general principle is not invalid, as the commit 
message and here suggest, we therefore could have had the discussion 
purely in the realm of how much code complexity has this added to i915 
and go from there.

Anyway at this point I can only repeat my plea not to rush things and 
not to avoid copying people who were involved in discussions.

Regards,

Tvrtko
> Feel free to land at-will and I'll deal with merge conflicts on my end.
> 
>> Cc: David Airlie <airlied@linux.ie>
>> Cc: Jason Ekstrand <jason@jlekstrand.net>
>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_context.c |  6 --
>>   drivers/gpu/drm/i915/gem/i915_gem_object.c  |  6 --
>>   drivers/gpu/drm/i915/gt/intel_context.c     |  6 --
>>   drivers/gpu/drm/i915/gt/intel_gt_pm.c       |  4 -
>>   drivers/gpu/drm/i915/i915_active.c          |  6 --
>>   drivers/gpu/drm/i915/i915_globals.c         | 95 ---------------------
>>   drivers/gpu/drm/i915/i915_globals.h         |  3 -
>>   drivers/gpu/drm/i915/i915_request.c         |  7 --
>>   drivers/gpu/drm/i915/i915_scheduler.c       |  7 --
>>   drivers/gpu/drm/i915/i915_vma.c             |  6 --
>>   10 files changed, 146 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>> index 7d6f52d8a801..bf2a2319353a 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>> @@ -2280,18 +2280,12 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
>>   #include "selftests/i915_gem_context.c"
>>   #endif
>>
>> -static void i915_global_gem_context_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_luts);
>> -}
>> -
>>   static void i915_global_gem_context_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_luts);
>>   }
>>
>>   static struct i915_global_gem_context global = { {
>> -       .shrink = i915_global_gem_context_shrink,
>>          .exit = i915_global_gem_context_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> index 9da7b288b7ed..5c21cff33199 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> @@ -664,18 +664,12 @@ void i915_gem_init__objects(struct drm_i915_private *i915)
>>          INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
>>   }
>>
>> -static void i915_global_objects_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_objects);
>> -}
>> -
>>   static void i915_global_objects_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_objects);
>>   }
>>
>>   static struct i915_global_object global = { {
>> -       .shrink = i915_global_objects_shrink,
>>          .exit = i915_global_objects_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
>> index bd63813c8a80..c1338441cc1d 100644
>> --- a/drivers/gpu/drm/i915/gt/intel_context.c
>> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
>> @@ -398,18 +398,12 @@ void intel_context_fini(struct intel_context *ce)
>>          i915_active_fini(&ce->active);
>>   }
>>
>> -static void i915_global_context_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_ce);
>> -}
>> -
>>   static void i915_global_context_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_ce);
>>   }
>>
>>   static struct i915_global_context global = { {
>> -       .shrink = i915_global_context_shrink,
>>          .exit = i915_global_context_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>> index aef3084e8b16..d86825437516 100644
>> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>> @@ -67,8 +67,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
>>
>>          GT_TRACE(gt, "\n");
>>
>> -       i915_globals_unpark();
>> -
>>          /*
>>           * It seems that the DMC likes to transition between the DC states a lot
>>           * when there are no connected displays (no active power domains) during
>> @@ -116,8 +114,6 @@ static int __gt_park(struct intel_wakeref *wf)
>>          GEM_BUG_ON(!wakeref);
>>          intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
>>
>> -       i915_globals_park();
>> -
>>          return 0;
>>   }
>>
>> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
>> index b1aa1c482c32..91723123ae9f 100644
>> --- a/drivers/gpu/drm/i915/i915_active.c
>> +++ b/drivers/gpu/drm/i915/i915_active.c
>> @@ -1176,18 +1176,12 @@ struct i915_active *i915_active_create(void)
>>   #include "selftests/i915_active.c"
>>   #endif
>>
>> -static void i915_global_active_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_cache);
>> -}
>> -
>>   static void i915_global_active_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_cache);
>>   }
>>
>>   static struct i915_global_active global = { {
>> -       .shrink = i915_global_active_shrink,
>>          .exit = i915_global_active_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
>> index 77f1911c463b..7fe2e503897b 100644
>> --- a/drivers/gpu/drm/i915/i915_globals.c
>> +++ b/drivers/gpu/drm/i915/i915_globals.c
>> @@ -17,61 +17,8 @@
>>
>>   static LIST_HEAD(globals);
>>
>> -static atomic_t active;
>> -static atomic_t epoch;
>> -static struct park_work {
>> -       struct delayed_work work;
>> -       struct rcu_head rcu;
>> -       unsigned long flags;
>> -#define PENDING 0
>> -       int epoch;
>> -} park;
>> -
>> -static void i915_globals_shrink(void)
>> -{
>> -       struct i915_global *global;
>> -
>> -       /*
>> -        * kmem_cache_shrink() discards empty slabs and reorders partially
>> -        * filled slabs to prioritise allocating from the mostly full slabs,
>> -        * with the aim of reducing fragmentation.
>> -        */
>> -       list_for_each_entry(global, &globals, link)
>> -               global->shrink();
>> -}
>> -
>> -static void __i915_globals_grace(struct rcu_head *rcu)
>> -{
>> -       /* Ratelimit parking as shrinking is quite slow */
>> -       schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
>> -}
>> -
>> -static void __i915_globals_queue_rcu(void)
>> -{
>> -       park.epoch = atomic_inc_return(&epoch);
>> -       if (!atomic_read(&active)) {
>> -               init_rcu_head(&park.rcu);
>> -               call_rcu(&park.rcu, __i915_globals_grace);
>> -       }
>> -}
>> -
>> -static void __i915_globals_park(struct work_struct *work)
>> -{
>> -       destroy_rcu_head(&park.rcu);
>> -
>> -       /* Confirm nothing woke up in the last grace period */
>> -       if (park.epoch != atomic_read(&epoch)) {
>> -               __i915_globals_queue_rcu();
>> -               return;
>> -       }
>> -
>> -       clear_bit(PENDING, &park.flags);
>> -       i915_globals_shrink();
>> -}
>> -
>>   void __init i915_global_register(struct i915_global *global)
>>   {
>> -       GEM_BUG_ON(!global->shrink);
>>          GEM_BUG_ON(!global->exit);
>>
>>          list_add_tail(&global->link, &globals);
>> @@ -109,52 +56,10 @@ int __init i915_globals_init(void)
>>                  }
>>          }
>>
>> -       INIT_DELAYED_WORK(&park.work, __i915_globals_park);
>>          return 0;
>>   }
>>
>> -void i915_globals_park(void)
>> -{
>> -       /*
>> -        * Defer shrinking the global slab caches (and other work) until
>> -        * after a RCU grace period has completed with no activity. This
>> -        * is to try and reduce the latency impact on the consumers caused
>> -        * by us shrinking the caches the same time as they are trying to
>> -        * allocate, with the assumption being that if we idle long enough
>> -        * for an RCU grace period to elapse since the last use, it is likely
>> -        * to be longer until we need the caches again.
>> -        */
>> -       if (!atomic_dec_and_test(&active))
>> -               return;
>> -
>> -       /* Queue cleanup after the next RCU grace period has freed slabs */
>> -       if (!test_and_set_bit(PENDING, &park.flags))
>> -               __i915_globals_queue_rcu();
>> -}
>> -
>> -void i915_globals_unpark(void)
>> -{
>> -       atomic_inc(&epoch);
>> -       atomic_inc(&active);
>> -}
>> -
>> -static void __exit __i915_globals_flush(void)
>> -{
>> -       atomic_inc(&active); /* skip shrinking */
>> -
>> -       rcu_barrier(); /* wait for the work to be queued */
>> -       flush_delayed_work(&park.work);
>> -
>> -       atomic_dec(&active);
>> -}
>> -
>>   void __exit i915_globals_exit(void)
>>   {
>> -       GEM_BUG_ON(atomic_read(&active));
>> -
>> -       __i915_globals_flush();
>>          __i915_globals_cleanup();
>> -
>> -       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
>> -       rcu_barrier();
>>   }
>> diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
>> index 2d199f411a4a..9e6b4fd07528 100644
>> --- a/drivers/gpu/drm/i915/i915_globals.h
>> +++ b/drivers/gpu/drm/i915/i915_globals.h
>> @@ -14,15 +14,12 @@ typedef void (*i915_global_func_t)(void);
>>   struct i915_global {
>>          struct list_head link;
>>
>> -       i915_global_func_t shrink;
>>          i915_global_func_t exit;
>>   };
>>
>>   void i915_global_register(struct i915_global *global);
>>
>>   int i915_globals_init(void);
>> -void i915_globals_park(void);
>> -void i915_globals_unpark(void);
>>   void i915_globals_exit(void);
>>
>>   /* constructors */
>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
>> index 09ebea9a0090..d3de9f60e03a 100644
>> --- a/drivers/gpu/drm/i915/i915_request.c
>> +++ b/drivers/gpu/drm/i915/i915_request.c
>> @@ -2077,12 +2077,6 @@ void i915_request_show(struct drm_printer *m,
>>   #include "selftests/i915_request.c"
>>   #endif
>>
>> -static void i915_global_request_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_execute_cbs);
>> -       kmem_cache_shrink(global.slab_requests);
>> -}
>> -
>>   static void i915_global_request_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_execute_cbs);
>> @@ -2090,7 +2084,6 @@ static void i915_global_request_exit(void)
>>   }
>>
>>   static struct i915_global_request global = { {
>> -       .shrink = i915_global_request_shrink,
>>          .exit = i915_global_request_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>> index 3a58a9130309..561c649e59f7 100644
>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>> @@ -475,12 +475,6 @@ i915_sched_engine_create(unsigned int subclass)
>>          return sched_engine;
>>   }
>>
>> -static void i915_global_scheduler_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_dependencies);
>> -       kmem_cache_shrink(global.slab_priorities);
>> -}
>> -
>>   static void i915_global_scheduler_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_dependencies);
>> @@ -488,7 +482,6 @@ static void i915_global_scheduler_exit(void)
>>   }
>>
>>   static struct i915_global_scheduler global = { {
>> -       .shrink = i915_global_scheduler_shrink,
>>          .exit = i915_global_scheduler_exit,
>>   } };
>>
>> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>> index 5b9dce0f443b..09a7c47926f7 100644
>> --- a/drivers/gpu/drm/i915/i915_vma.c
>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>> @@ -1414,18 +1414,12 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
>>   #include "selftests/i915_vma.c"
>>   #endif
>>
>> -static void i915_global_vma_shrink(void)
>> -{
>> -       kmem_cache_shrink(global.slab_vmas);
>> -}
>> -
>>   static void i915_global_vma_exit(void)
>>   {
>>          kmem_cache_destroy(global.slab_vmas);
>>   }
>>
>>   static struct i915_global_vma global = { {
>> -       .shrink = i915_global_vma_shrink,
>>          .exit = i915_global_vma_exit,
>>   } };
>>
>> --
>> 2.32.0
>>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-07-23 15:42 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21 18:32 [Intel-gfx] [PATCH] drm/i915: Ditch i915 globals shrink infrastructure Daniel Vetter
2021-07-21 20:17 ` Jason Ekstrand
2021-07-21 20:24   ` Daniel Vetter
2021-07-22  9:15     ` Daniel Vetter
2021-07-23 15:42   ` Tvrtko Ursulin
2021-07-21 20:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2021-07-21 21:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-07-22  2:31 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-07-22 10:02 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
2021-07-22 10:16   ` Daniel Vetter
2021-07-22 10:33     ` Tvrtko Ursulin
2021-07-22 13:37       ` Jason Ekstrand
2021-07-22 14:02         ` Tvrtko Ursulin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).