linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC/PATCH] slab: clean up allocation
@ 2006-09-29 10:56 Pekka J Enberg
  2006-09-29 16:10 ` Christoph Lameter
  0 siblings, 1 reply; 2+ messages in thread
From: Pekka J Enberg @ 2006-09-29 10:56 UTC (permalink / raw)
  To: manfred, christoph, pj; +Cc: linux-kernel, linux-mm

Hi!

This patch cleans up the slab allocation path by making the UMA case look like
NUMA that always allocates from the current node.  In addition, I merged up the
two NUMA allocation paths (kmem_cache_alloc_node and __cache_alloc) into single
function so that we always use alternate_node_alloc() if PF_SPREAD_SLAB or
PF_MEMPOLICY is defined.

Note: increases kernel text on numa by 70 bytes on x86 due to inlining of
the cache_alloc function in multiple call sites.

Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |  116 +++++++++++++++++++++++++++++++++++---------------------------
 1 file changed, 66 insertions(+), 50 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -210,6 +210,11 @@ typedef unsigned int kmem_bufctl_t;
 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
 
 /*
+ * When allocating from current node.
+ */
+#define SLAB_CURRENT_NODE (-1)
+
+/*
  * struct slab
  *
  * Manages the objs in a slab. Placed either at the beginning of mem allocated
@@ -3041,7 +3046,7 @@ static void *cache_alloc_debugcheck_afte
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
-static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static inline void *cache_alloc_local(struct kmem_cache *cachep, gfp_t flags)
 {
 	void *objp;
 	struct array_cache *ac;
@@ -3059,35 +3064,6 @@ static inline void *____cache_alloc(stru
 	return objp;
 }
 
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
-						gfp_t flags, void *caller)
-{
-	unsigned long save_flags;
-	void *objp = NULL;
-
-	cache_alloc_debugcheck_before(cachep, flags);
-
-	local_irq_save(save_flags);
-
-	if (unlikely(NUMA_BUILD &&
-			current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
-		objp = alternate_node_alloc(cachep, flags);
-
-	if (!objp)
-		objp = ____cache_alloc(cachep, flags);
-	/*
-	 * We may just have run out of memory on the local node.
-	 * __cache_alloc_node() knows how to locate memory on other nodes
-	 */
- 	if (NUMA_BUILD && !objp)
- 		objp = __cache_alloc_node(cachep, flags, numa_node_id());
-	local_irq_restore(save_flags);
-	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
-					    caller);
-	prefetchw(objp);
-	return objp;
-}
-
 #ifdef CONFIG_NUMA
 /*
  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3198,8 +3174,62 @@ must_grow:
 done:
 	return obj;
 }
+
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+					   gfp_t flags, int nodeid)
+{
+	void *objp = NULL;
+
+	if (nodeid == SLAB_CURRENT_NODE || nodeid == numa_node_id() ||
+			!cachep->nodelists[nodeid]) {
+		if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
+			objp = alternate_node_alloc(cachep, flags);
+
+		if (!objp)
+			objp = cache_alloc_local(cachep, flags);
+		/*
+		 * We may just have run out of memory on the local node.
+		 * __cache_alloc_node() knows how to locate memory on other nodes
+		 */
+	 	if (!objp)
+	 		objp = __cache_alloc_node(cachep, flags, numa_node_id());
+	} else
+		objp = __cache_alloc_node(cachep, flags, nodeid);
+
+	return objp;
+}
+
+#else
+/*
+ *	On UMA, we always allocate from the local node.
+ */
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+					   gfp_t flags, int nodeid)
+{
+	return cache_alloc_local(cachep, flags);
+}
 #endif
 
+static __always_inline void *cache_alloc(struct kmem_cache *cachep,
+					 gfp_t flags, int nodeid,
+					 void *caller)
+{
+	unsigned long save_flags;
+	void *objp;
+
+	cache_alloc_debugcheck_before(cachep, flags);
+
+	local_irq_save(save_flags);
+	objp = __cache_alloc(cachep, flags, nodeid);
+	local_irq_restore(save_flags);
+
+	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+	prefetchw(objp);
+
+	return objp;
+}
+
+
 /*
  * Caller needs to acquire correct kmem_list's list_lock
  */
@@ -3333,7 +3363,8 @@ static inline void __cache_free(struct k
  */
 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-	return __cache_alloc(cachep, flags, __builtin_return_address(0));
+	return cache_alloc(cachep, flags, SLAB_CURRENT_NODE,
+			   __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
@@ -3347,7 +3378,8 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  */
 void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
 {
-	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+	void *ret = cache_alloc(cache, flags, SLAB_CURRENT_NODE,
+				__builtin_return_address(0));
 	if (ret)
 		memset(ret, 0, obj_size(cache));
 	return ret;
@@ -3411,23 +3443,7 @@ out:
  */
 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-	unsigned long save_flags;
-	void *ptr;
-
-	cache_alloc_debugcheck_before(cachep, flags);
-	local_irq_save(save_flags);
-
-	if (nodeid == -1 || nodeid == numa_node_id() ||
-			!cachep->nodelists[nodeid])
-		ptr = ____cache_alloc(cachep, flags);
-	else
-		ptr = __cache_alloc_node(cachep, flags, nodeid);
-	local_irq_restore(save_flags);
-
-	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
-					   __builtin_return_address(0));
-
-	return ptr;
+	return cache_alloc(cachep, flags, nodeid, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
@@ -3462,7 +3478,7 @@ static __always_inline void *__do_kmallo
 	cachep = __find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
 		return NULL;
-	return __cache_alloc(cachep, flags, caller);
+	return cache_alloc(cachep, flags, SLAB_CURRENT_NODE, caller);
 }
 
 

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [RFC/PATCH] slab: clean up allocation
  2006-09-29 10:56 [RFC/PATCH] slab: clean up allocation Pekka J Enberg
@ 2006-09-29 16:10 ` Christoph Lameter
  0 siblings, 0 replies; 2+ messages in thread
From: Christoph Lameter @ 2006-09-29 16:10 UTC (permalink / raw)
  To: Pekka J Enberg; +Cc: manfred, christoph, pj, linux-kernel, linux-mm

On Fri, 29 Sep 2006, Pekka J Enberg wrote:

> + * When allocating from current node.
> + */
> +#define SLAB_CURRENT_NODE (-1)
> +

If we want a constant here then we would better define a global one and 
use it throughout the kernel.

Something like

#define LOCAL_NODE (-1)

Maybe in include/*/topology.h ?


>  #endif
>  
> -static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> +static inline void *cache_alloc_local(struct kmem_cache *cachep, gfp_t flags)
>  {
>  	void *objp;
>  	struct array_cache *ac;
> @@ -3059,35 +3064,6 @@ static inline void *____cache_alloc(stru
>  	return objp;
>  }

This is not really local in the sense of node local but its processor 
local. The speciality here is that we allocate from the per processor
list of objects. cache_alloc_cpu?

The rest looks fine on first glance.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2006-09-29 16:11 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-09-29 10:56 [RFC/PATCH] slab: clean up allocation Pekka J Enberg
2006-09-29 16:10 ` Christoph Lameter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).