intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>,
	Intel Graphics Development <intel-gfx@lists.freedesktop.org>,
	Daniel Vetter <daniel.vetter@intel.com>
Subject: [Intel-gfx] [PATCH v2 08/11] drm/i915: move scheduler slabs to direct module init/exit
Date: Tue, 27 Jul 2021 14:10:34 +0200	[thread overview]
Message-ID: <20210727121037.2041102-8-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210727121037.2041102-1-daniel.vetter@ffwll.ch>

With the global kmem_cache shrink infrastructure gone there's nothing
special and we can convert them over.

I'm doing this split up into each patch because there's quite a bit of
noise with removing the static global.slab_dependencies|priorities to just a
slab_dependencies|priorities.

v2: Make slab static (Jason, 0day)

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/i915/i915_globals.c   |  2 --
 drivers/gpu/drm/i915/i915_globals.h   |  2 --
 drivers/gpu/drm/i915/i915_pci.c       |  2 ++
 drivers/gpu/drm/i915/i915_scheduler.c | 39 +++++++++++----------------
 drivers/gpu/drm/i915/i915_scheduler.h |  3 +++
 5 files changed, 20 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 8fffa8d93bc5..8923589057ab 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -8,7 +8,6 @@
 #include <linux/workqueue.h>
 
 #include "i915_globals.h"
-#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 static LIST_HEAD(globals);
@@ -29,7 +28,6 @@ static void __i915_globals_cleanup(void)
 }
 
 static __initconst int (* const initfn[])(void) = {
-	i915_global_scheduler_init,
 	i915_global_vma_init,
 };
 
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 9734740708f4..7a57bce1da05 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -23,8 +23,6 @@ int i915_globals_init(void);
 void i915_globals_exit(void);
 
 /* constructors */
-int i915_global_request_init(void);
-int i915_global_scheduler_init(void);
 int i915_global_vma_init(void);
 
 #endif /* _I915_GLOBALS_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 3f1716a3d74f..500897d0d4b5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -39,6 +39,7 @@
 #include "i915_perf.h"
 #include "i915_globals.h"
 #include "i915_selftest.h"
+#include "i915_scheduler.h"
 
 #define PLATFORM(x) .platform = (x)
 #define GEN(x) \
@@ -1305,6 +1306,7 @@ static const struct {
 	{ i915_gem_context_module_init, i915_gem_context_module_exit },
 	{ i915_objects_module_init, i915_objects_module_exit },
 	{ i915_request_module_init, i915_request_module_exit },
+	{ i915_scheduler_module_init, i915_scheduler_module_exit },
 	{ i915_globals_init, i915_globals_exit },
 	{ i915_mock_selftests, NULL },
 	{ i915_pmu_init, i915_pmu_exit },
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 561c649e59f7..c7ea5a1f3b94 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -7,15 +7,11 @@
 #include <linux/mutex.h>
 
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
 
-static struct i915_global_scheduler {
-	struct i915_global base;
-	struct kmem_cache *slab_dependencies;
-	struct kmem_cache *slab_priorities;
-} global;
+static struct kmem_cache *slab_dependencies;
+static struct kmem_cache *slab_priorities;
 
 static DEFINE_SPINLOCK(schedule_lock);
 
@@ -93,7 +89,7 @@ i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 	if (prio == I915_PRIORITY_NORMAL) {
 		p = &sched_engine->default_priolist;
 	} else {
-		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+		p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC);
 		/* Convert an allocation failure to a priority bump */
 		if (unlikely(!p)) {
 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -122,7 +118,7 @@ i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 
 void __i915_priolist_free(struct i915_priolist *p)
 {
-	kmem_cache_free(global.slab_priorities, p);
+	kmem_cache_free(slab_priorities, p);
 }
 
 struct sched_cache {
@@ -313,13 +309,13 @@ void i915_sched_node_reinit(struct i915_sched_node *node)
 static struct i915_dependency *
 i915_dependency_alloc(void)
 {
-	return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
+	return kmem_cache_alloc(slab_dependencies, GFP_KERNEL);
 }
 
 static void
 i915_dependency_free(struct i915_dependency *dep)
 {
-	kmem_cache_free(global.slab_dependencies, dep);
+	kmem_cache_free(slab_dependencies, dep);
 }
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -475,32 +471,27 @@ i915_sched_engine_create(unsigned int subclass)
 	return sched_engine;
 }
 
-static void i915_global_scheduler_exit(void)
+void i915_scheduler_module_exit(void)
 {
-	kmem_cache_destroy(global.slab_dependencies);
-	kmem_cache_destroy(global.slab_priorities);
+	kmem_cache_destroy(slab_dependencies);
+	kmem_cache_destroy(slab_priorities);
 }
 
-static struct i915_global_scheduler global = { {
-	.exit = i915_global_scheduler_exit,
-} };
-
-int __init i915_global_scheduler_init(void)
+int __init i915_scheduler_module_init(void)
 {
-	global.slab_dependencies = KMEM_CACHE(i915_dependency,
+	slab_dependencies = KMEM_CACHE(i915_dependency,
 					      SLAB_HWCACHE_ALIGN |
 					      SLAB_TYPESAFE_BY_RCU);
-	if (!global.slab_dependencies)
+	if (!slab_dependencies)
 		return -ENOMEM;
 
-	global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
-	if (!global.slab_priorities)
+	slab_priorities = KMEM_CACHE(i915_priolist, 0);
+	if (!slab_priorities)
 		goto err_priorities;
 
-	i915_global_register(&global.base);
 	return 0;
 
 err_priorities:
-	kmem_cache_destroy(global.slab_priorities);
+	kmem_cache_destroy(slab_priorities);
 	return -ENOMEM;
 }
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 650ab8e0db9f..0a4f61fd0be0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -98,4 +98,7 @@ void i915_request_show_with_schedule(struct drm_printer *m,
 				     const char *prefix,
 				     int indent);
 
+void i915_scheduler_module_exit(void);
+int i915_scheduler_module_init(void);
+
 #endif /* _I915_SCHEDULER_H_ */
-- 
2.32.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2021-07-27 12:11 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-27 12:10 [Intel-gfx] [PATCH v2 01/11] drm/i915: Check for nomodeset in i915_init() first Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 02/11] drm/i915: move i915_active slab to direct module init/exit Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 03/11] drm/i915: move i915_buddy " Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 04/11] drm/i915: move intel_context " Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 05/11] drm/i915: move gem_context " Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 06/11] drm/i915: move gem_objects " Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 07/11] drm/i915: move request slabs " Daniel Vetter
2021-07-27 12:10 ` Daniel Vetter [this message]
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 09/11] drm/i915: move vma slab " Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 10/11] drm/i915: Remove i915_globals Daniel Vetter
2021-07-27 12:10 ` [Intel-gfx] [PATCH v2 11/11] drm/i915: Extract i915_module.c Daniel Vetter
2021-07-27 14:44   ` Tvrtko Ursulin
2021-07-27 18:25     ` Jason Ekstrand
2021-08-23 15:22   ` Guenter Roeck
2021-08-23 16:15     ` Jani Nikula
2021-08-25 14:51   ` Jani Nikula
2021-07-27 14:09 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2,01/11] drm/i915: Check for nomodeset in i915_init() first Patchwork
2021-07-27 14:11 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-07-27 14:37 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-07-27 22:02 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210727121037.2041102-8-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).