All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
@ 2014-08-21  8:09 ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

We don't need to keep kmem_cache definition in include/linux/slab.h
if we don't need to inline kmem_cache_size(). According to my
code inspection, this function is only called at lc_create() in
lib/lru_cache.c which may be called at initialization phase of something,
so we don't need to inline it. Therfore, move it to slab_common.c and
move kmem_cache definition to internal header.

After this change, we can change kmem_cache definition easily without
full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
without full kernel build.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/slab.h |   42 +-----------------------------------------
 mm/slab.h            |   33 +++++++++++++++++++++++++++++++++
 mm/slab_common.c     |    8 ++++++++
 3 files changed, 42 insertions(+), 41 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7..9062e4a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 #endif
 
-#ifdef CONFIG_SLOB
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-struct kmem_cache {
-	unsigned int object_size;/* The original size of the object */
-	unsigned int size;	/* The aligned/padded/added on size  */
-	unsigned int align;	/* Alignment as calculated */
-	unsigned long flags;	/* Active flags on the slab */
-	const char *name;	/* Slab name for sysfs */
-	int refcount;		/* Use counter */
-	void (*ctor)(void *);	/* Called on object slot creation */
-	struct list_head list;	/* List of all slab caches on the system */
-};
-
-#endif /* CONFIG_SLOB */
-
 /*
  * Kmalloc array related definitions
  */
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
 }
 #endif /* CONFIG_TRACING */
 
-#ifdef CONFIG_SLAB
-#include <linux/slab_def.h>
-#endif
-
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#endif
-
 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
 
 #ifdef CONFIG_TRACING
@@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 	return kmalloc_node(size, flags | __GFP_ZERO, node);
 }
 
-/*
- * Determine the size of a slab object
- */
-static inline unsigned int kmem_cache_size(struct kmem_cache *s)
-{
-	return s->object_size;
-}
-
+unsigned int kmem_cache_size(struct kmem_cache *s);
 void __init kmem_cache_init_late(void);
 
 #endif	/* _LINUX_SLAB_H */
diff --git a/mm/slab.h b/mm/slab.h
index 0e0fdd3..bd1c54a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -4,6 +4,39 @@
  * Internal slab definitions
  */
 
+#ifdef CONFIG_SLOB
+/*
+ * Common fields provided in kmem_cache by all slab allocators
+ * This struct is either used directly by the allocator (SLOB)
+ * or the allocator must include definitions for all fields
+ * provided in kmem_cache_common in their definition of kmem_cache.
+ *
+ * Once we can do anonymous structs (C11 standard) we could put a
+ * anonymous struct definition in these allocators so that the
+ * separate allocations in the kmem_cache structure of SLAB and
+ * SLUB is no longer needed.
+ */
+struct kmem_cache {
+	unsigned int object_size;/* The original size of the object */
+	unsigned int size;	/* The aligned/padded/added on size  */
+	unsigned int align;	/* Alignment as calculated */
+	unsigned long flags;	/* Active flags on the slab */
+	const char *name;	/* Slab name for sysfs */
+	int refcount;		/* Use counter */
+	void (*ctor)(void *);	/* Called on object slot creation */
+	struct list_head list;	/* List of all slab caches on the system */
+};
+
+#endif /* CONFIG_SLOB */
+
+#ifdef CONFIG_SLAB
+#include <linux/slab_def.h>
+#endif
+
+#ifdef CONFIG_SLUB
+#include <linux/slub_def.h>
+#endif
+
 /*
  * State of the slab allocator.
  *
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d319502..2088904 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -30,6 +30,14 @@ LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
 struct kmem_cache *kmem_cache;
 
+/*
+ * Determine the size of a slab object
+ */
+unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+	return s->object_size;
+}
+
 #ifdef CONFIG_DEBUG_VM
 static int kmem_cache_sanity_check(const char *name, size_t size)
 {
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
@ 2014-08-21  8:09 ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

We don't need to keep kmem_cache definition in include/linux/slab.h
if we don't need to inline kmem_cache_size(). According to my
code inspection, this function is only called at lc_create() in
lib/lru_cache.c which may be called at initialization phase of something,
so we don't need to inline it. Therfore, move it to slab_common.c and
move kmem_cache definition to internal header.

After this change, we can change kmem_cache definition easily without
full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
without full kernel build.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/slab.h |   42 +-----------------------------------------
 mm/slab.h            |   33 +++++++++++++++++++++++++++++++++
 mm/slab_common.c     |    8 ++++++++
 3 files changed, 42 insertions(+), 41 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7..9062e4a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 #endif
 
-#ifdef CONFIG_SLOB
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-struct kmem_cache {
-	unsigned int object_size;/* The original size of the object */
-	unsigned int size;	/* The aligned/padded/added on size  */
-	unsigned int align;	/* Alignment as calculated */
-	unsigned long flags;	/* Active flags on the slab */
-	const char *name;	/* Slab name for sysfs */
-	int refcount;		/* Use counter */
-	void (*ctor)(void *);	/* Called on object slot creation */
-	struct list_head list;	/* List of all slab caches on the system */
-};
-
-#endif /* CONFIG_SLOB */
-
 /*
  * Kmalloc array related definitions
  */
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
 }
 #endif /* CONFIG_TRACING */
 
-#ifdef CONFIG_SLAB
-#include <linux/slab_def.h>
-#endif
-
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#endif
-
 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
 
 #ifdef CONFIG_TRACING
@@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 	return kmalloc_node(size, flags | __GFP_ZERO, node);
 }
 
-/*
- * Determine the size of a slab object
- */
-static inline unsigned int kmem_cache_size(struct kmem_cache *s)
-{
-	return s->object_size;
-}
-
+unsigned int kmem_cache_size(struct kmem_cache *s);
 void __init kmem_cache_init_late(void);
 
 #endif	/* _LINUX_SLAB_H */
diff --git a/mm/slab.h b/mm/slab.h
index 0e0fdd3..bd1c54a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -4,6 +4,39 @@
  * Internal slab definitions
  */
 
+#ifdef CONFIG_SLOB
+/*
+ * Common fields provided in kmem_cache by all slab allocators
+ * This struct is either used directly by the allocator (SLOB)
+ * or the allocator must include definitions for all fields
+ * provided in kmem_cache_common in their definition of kmem_cache.
+ *
+ * Once we can do anonymous structs (C11 standard) we could put a
+ * anonymous struct definition in these allocators so that the
+ * separate allocations in the kmem_cache structure of SLAB and
+ * SLUB is no longer needed.
+ */
+struct kmem_cache {
+	unsigned int object_size;/* The original size of the object */
+	unsigned int size;	/* The aligned/padded/added on size  */
+	unsigned int align;	/* Alignment as calculated */
+	unsigned long flags;	/* Active flags on the slab */
+	const char *name;	/* Slab name for sysfs */
+	int refcount;		/* Use counter */
+	void (*ctor)(void *);	/* Called on object slot creation */
+	struct list_head list;	/* List of all slab caches on the system */
+};
+
+#endif /* CONFIG_SLOB */
+
+#ifdef CONFIG_SLAB
+#include <linux/slab_def.h>
+#endif
+
+#ifdef CONFIG_SLUB
+#include <linux/slub_def.h>
+#endif
+
 /*
  * State of the slab allocator.
  *
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d319502..2088904 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -30,6 +30,14 @@ LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
 struct kmem_cache *kmem_cache;
 
+/*
+ * Determine the size of a slab object
+ */
+unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+	return s->object_size;
+}
+
 #ifdef CONFIG_DEBUG_VM
 static int kmem_cache_sanity_check(const char *name, size_t size)
 {
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/5] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21  8:09   ` Joonsoo Kim
  -1 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Now, we track caller if tracing or slab debugging is enabled. If they are
disabled, we could save one argument passing overhead by calling
__kmalloc(_node)(). But, I think that it would be marginal. Furthermore,
default slab allocator, SLUB, doesn't use this technique so I think
that it's okay to change this situation.

>From this change, we can turn on/off CONFIG_DEBUG_SLAB without full
kernel build and remove some complicated '#if' defintion. It looks
more benefitial to me.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/slab.h |   22 ----------------------
 mm/slab.c            |   18 ------------------
 mm/slob.c            |    2 --
 3 files changed, 42 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9062e4a..c265bec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -549,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
-	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
-	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 #define kmalloc_track_caller(size, flags) \
 	__kmalloc_track_caller(size, flags, _RET_IP_)
-#else
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc(size, flags)
-#endif /* DEBUG_SLAB */
 
 #ifdef CONFIG_NUMA
-/*
- * kmalloc_node_track_caller is a special version of kmalloc_node that
- * records the calling function of the routine calling it for slab leak
- * tracking instead of just the calling function (confusing, eh?).
- * It's useful when the call to kmalloc_node comes from a widely-used
- * standard allocator where we care about the real place the memory
- * allocation request comes from.
- */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
-	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
-	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 #define kmalloc_node_track_caller(size, flags, node) \
 	__kmalloc_node_track_caller(size, flags, node, \
 			_RET_IP_)
-#else
-#define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node(size, flags, node)
-#endif
 
 #else /* CONFIG_NUMA */
 
diff --git a/mm/slab.c b/mm/slab.c
index a467b30..d80b654 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3503,7 +3503,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
 	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 }
 
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
@@ -3516,13 +3515,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
 	return __do_kmalloc_node(size, flags, node, caller);
 }
 EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#else
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	return __do_kmalloc_node(size, flags, node, 0);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
 #endif /* CONFIG_NUMA */
 
 /**
@@ -3548,8 +3540,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
 	return ret;
 }
 
-
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
 void *__kmalloc(size_t size, gfp_t flags)
 {
 	return __do_kmalloc(size, flags, _RET_IP_);
@@ -3562,14 +3552,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
 
-#else
-void *__kmalloc(size_t size, gfp_t flags)
-{
-	return __do_kmalloc(size, flags, 0);
-}
-EXPORT_SYMBOL(__kmalloc);
-#endif
-
 /**
  * kmem_cache_free - Deallocate an object
  * @cachep: The cache the allocation was from.
diff --git a/mm/slob.c b/mm/slob.c
index 21980e0..96a8620 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -468,7 +468,6 @@ void *__kmalloc(size_t size, gfp_t gfp)
 }
 EXPORT_SYMBOL(__kmalloc);
 
-#ifdef CONFIG_TRACING
 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
 {
 	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
@@ -481,7 +480,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
 	return __do_kmalloc_node(size, gfp, node, caller);
 }
 #endif
-#endif
 
 void kfree(const void *block)
 {
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/5] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
@ 2014-08-21  8:09   ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Now, we track caller if tracing or slab debugging is enabled. If they are
disabled, we could save one argument passing overhead by calling
__kmalloc(_node)(). But, I think that it would be marginal. Furthermore,
default slab allocator, SLUB, doesn't use this technique so I think
that it's okay to change this situation.

>From this change, we can turn on/off CONFIG_DEBUG_SLAB without full
kernel build and remove some complicated '#if' defintion. It looks
more benefitial to me.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/slab.h |   22 ----------------------
 mm/slab.c            |   18 ------------------
 mm/slob.c            |    2 --
 3 files changed, 42 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9062e4a..c265bec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -549,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
-	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
-	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 #define kmalloc_track_caller(size, flags) \
 	__kmalloc_track_caller(size, flags, _RET_IP_)
-#else
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc(size, flags)
-#endif /* DEBUG_SLAB */
 
 #ifdef CONFIG_NUMA
-/*
- * kmalloc_node_track_caller is a special version of kmalloc_node that
- * records the calling function of the routine calling it for slab leak
- * tracking instead of just the calling function (confusing, eh?).
- * It's useful when the call to kmalloc_node comes from a widely-used
- * standard allocator where we care about the real place the memory
- * allocation request comes from.
- */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
-	(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
-	(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 #define kmalloc_node_track_caller(size, flags, node) \
 	__kmalloc_node_track_caller(size, flags, node, \
 			_RET_IP_)
-#else
-#define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node(size, flags, node)
-#endif
 
 #else /* CONFIG_NUMA */
 
diff --git a/mm/slab.c b/mm/slab.c
index a467b30..d80b654 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3503,7 +3503,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
 	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 }
 
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
@@ -3516,13 +3515,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
 	return __do_kmalloc_node(size, flags, node, caller);
 }
 EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#else
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	return __do_kmalloc_node(size, flags, node, 0);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
 #endif /* CONFIG_NUMA */
 
 /**
@@ -3548,8 +3540,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
 	return ret;
 }
 
-
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
 void *__kmalloc(size_t size, gfp_t flags)
 {
 	return __do_kmalloc(size, flags, _RET_IP_);
@@ -3562,14 +3552,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
 
-#else
-void *__kmalloc(size_t size, gfp_t flags)
-{
-	return __do_kmalloc(size, flags, 0);
-}
-EXPORT_SYMBOL(__kmalloc);
-#endif
-
 /**
  * kmem_cache_free - Deallocate an object
  * @cachep: The cache the allocation was from.
diff --git a/mm/slob.c b/mm/slob.c
index 21980e0..96a8620 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -468,7 +468,6 @@ void *__kmalloc(size_t size, gfp_t gfp)
 }
 EXPORT_SYMBOL(__kmalloc);
 
-#ifdef CONFIG_TRACING
 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
 {
 	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
@@ -481,7 +480,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
 	return __do_kmalloc_node(size, gfp, node, caller);
 }
 #endif
-#endif
 
 void kfree(const void *block)
 {
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/5] mm/slab: move cache_flusharray() out of unlikely.text section
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21  8:09   ` Joonsoo Kim
  -1 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Now, due to likely keyword, compiled code of cache_flusharray() is
on unlikely.text section. Although it is uncommon case compared to
free to cpu cache case, it is common case than free_block(). But,
free_block() is on normal text section. This patch fix this odd situation
to remove likely keyword.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/slab.c b/mm/slab.c
index d80b654..d364e3f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3406,7 +3406,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
 		return;
 
-	if (likely(ac->avail < ac->limit)) {
+	if (ac->avail < ac->limit) {
 		STATS_INC_FREEHIT(cachep);
 	} else {
 		STATS_INC_FREEMISS(cachep);
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/5] mm/slab: move cache_flusharray() out of unlikely.text section
@ 2014-08-21  8:09   ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Now, due to likely keyword, compiled code of cache_flusharray() is
on unlikely.text section. Although it is uncommon case compared to
free to cpu cache case, it is common case than free_block(). But,
free_block() is on normal text section. This patch fix this odd situation
to remove likely keyword.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/slab.c b/mm/slab.c
index d80b654..d364e3f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3406,7 +3406,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
 		return;
 
-	if (likely(ac->avail < ac->limit)) {
+	if (ac->avail < ac->limit) {
 		STATS_INC_FREEHIT(cachep);
 	} else {
 		STATS_INC_FREEMISS(cachep);
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 4/5] mm/slab: noinline __ac_put_obj()
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21  8:09   ` Joonsoo Kim
  -1 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Our intention of __ac_put_obj() is that it doesn't affect anything if
sk_memalloc_socks() is disabled. But, because __ac_put_obj() is too small,
compiler inline it to ac_put_obj() and affect code size of free path.
This patch add noinline keyword for __ac_put_obj() not to distrupt normal
free path at all.

<Before>
nm -S slab-orig.o |
	grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free"

0000000000001e80 00000000000002f5 t cache_alloc_refill
0000000000001230 0000000000000258 T kfree
0000000000000690 000000000000024c T kmem_cache_free

<After>
nm -S slab-patched.o |
	grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free"

0000000000001e00 00000000000002e5 t cache_alloc_refill
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

cache_alloc_refill: 0x2f5->0x2e5
kfree: 0x256->0x228
kmem_cache_free: 0x24c->0x216

code size of each function is reduced slightly.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index d364e3f..c9f137f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -785,8 +785,8 @@ static inline void *ac_get_obj(struct kmem_cache *cachep,
 	return objp;
 }
 
-static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
-								void *objp)
+static noinline void *__ac_put_obj(struct kmem_cache *cachep,
+			struct array_cache *ac, void *objp)
 {
 	if (unlikely(pfmemalloc_active)) {
 		/* Some pfmemalloc slabs exist, check if this is one */
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 4/5] mm/slab: noinline __ac_put_obj()
@ 2014-08-21  8:09   ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

Our intention of __ac_put_obj() is that it doesn't affect anything if
sk_memalloc_socks() is disabled. But, because __ac_put_obj() is too small,
compiler inline it to ac_put_obj() and affect code size of free path.
This patch add noinline keyword for __ac_put_obj() not to distrupt normal
free path at all.

<Before>
nm -S slab-orig.o |
	grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free"

0000000000001e80 00000000000002f5 t cache_alloc_refill
0000000000001230 0000000000000258 T kfree
0000000000000690 000000000000024c T kmem_cache_free

<After>
nm -S slab-patched.o |
	grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free"

0000000000001e00 00000000000002e5 t cache_alloc_refill
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

cache_alloc_refill: 0x2f5->0x2e5
kfree: 0x256->0x228
kmem_cache_free: 0x24c->0x216

code size of each function is reduced slightly.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index d364e3f..c9f137f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -785,8 +785,8 @@ static inline void *ac_get_obj(struct kmem_cache *cachep,
 	return objp;
 }
 
-static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
-								void *objp)
+static noinline void *__ac_put_obj(struct kmem_cache *cachep,
+			struct array_cache *ac, void *objp)
 {
 	if (unlikely(pfmemalloc_active)) {
 		/* Some pfmemalloc slabs exist, check if this is one */
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 5/5] mm/slab: factor out unlikely part of cache_free_alien()
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21  8:09   ` Joonsoo Kim
  -1 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

cache_free_alien() is rarely used function when node mismatch. But,
it is defined with inline attribute so it is inlined to __cache_free()
which is core free function of slab allocator. It uselessly makes
kmem_cache_free()/kfree() functions large. What we really need to
inline is just checking node match so this patch factor out other
parts of cache_free_alien() to reduce code size of kmem_cache_free()/
kfree().

<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free

You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |   38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index c9f137f..5927a17 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
 	}
 }
 
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
+				int node, int page_node)
 {
-	int nodeid = page_to_nid(virt_to_page(objp));
 	struct kmem_cache_node *n;
 	struct alien_cache *alien = NULL;
 	struct array_cache *ac;
-	int node;
 	LIST_HEAD(list);
 
-	node = numa_mem_id();
-
-	/*
-	 * Make sure we are not freeing a object from another node to the array
-	 * cache on this cpu.
-	 */
-	if (likely(nodeid == node))
-		return 0;
-
 	n = get_node(cachep, node);
 	STATS_INC_NODEFREES(cachep);
-	if (n->alien && n->alien[nodeid]) {
-		alien = n->alien[nodeid];
+	if (n->alien && n->alien[page_node]) {
+		alien = n->alien[page_node];
 		ac = &alien->ac;
 		spin_lock(&alien->lock);
 		if (unlikely(ac->avail == ac->limit)) {
 			STATS_INC_ACOVERFLOW(cachep);
-			__drain_alien_cache(cachep, ac, nodeid, &list);
+			__drain_alien_cache(cachep, ac, page_node, &list);
 		}
 		ac_put_obj(cachep, ac, objp);
 		spin_unlock(&alien->lock);
 		slabs_destroy(cachep, &list);
 	} else {
-		n = get_node(cachep, nodeid);
+		n = get_node(cachep, page_node);
 		spin_lock(&n->list_lock);
-		free_block(cachep, &objp, 1, nodeid, &list);
+		free_block(cachep, &objp, 1, page_node, &list);
 		spin_unlock(&n->list_lock);
 		slabs_destroy(cachep, &list);
 	}
 	return 1;
 }
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+	int page_node = page_to_nid(virt_to_page(objp));
+	int node = numa_mem_id();
+	/*
+	 * Make sure we are not freeing a object from another node to the array
+	 * cache on this cpu.
+	 */
+	if (likely(node == page_node))
+		return 0;
+
+	return __cache_free_alien(cachep, objp, node, page_node);
+}
 #endif
 
 /*
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 5/5] mm/slab: factor out unlikely part of cache_free_alien()
@ 2014-08-21  8:09   ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-21  8:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, linux-mm,
	linux-kernel, Joonsoo Kim

cache_free_alien() is rarely used function when node mismatch. But,
it is defined with inline attribute so it is inlined to __cache_free()
which is core free function of slab allocator. It uselessly makes
kmem_cache_free()/kfree() functions large. What we really need to
inline is just checking node match so this patch factor out other
parts of cache_free_alien() to reduce code size of kmem_cache_free()/
kfree().

<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free

You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/slab.c |   38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index c9f137f..5927a17 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
 	}
 }
 
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
+				int node, int page_node)
 {
-	int nodeid = page_to_nid(virt_to_page(objp));
 	struct kmem_cache_node *n;
 	struct alien_cache *alien = NULL;
 	struct array_cache *ac;
-	int node;
 	LIST_HEAD(list);
 
-	node = numa_mem_id();
-
-	/*
-	 * Make sure we are not freeing a object from another node to the array
-	 * cache on this cpu.
-	 */
-	if (likely(nodeid == node))
-		return 0;
-
 	n = get_node(cachep, node);
 	STATS_INC_NODEFREES(cachep);
-	if (n->alien && n->alien[nodeid]) {
-		alien = n->alien[nodeid];
+	if (n->alien && n->alien[page_node]) {
+		alien = n->alien[page_node];
 		ac = &alien->ac;
 		spin_lock(&alien->lock);
 		if (unlikely(ac->avail == ac->limit)) {
 			STATS_INC_ACOVERFLOW(cachep);
-			__drain_alien_cache(cachep, ac, nodeid, &list);
+			__drain_alien_cache(cachep, ac, page_node, &list);
 		}
 		ac_put_obj(cachep, ac, objp);
 		spin_unlock(&alien->lock);
 		slabs_destroy(cachep, &list);
 	} else {
-		n = get_node(cachep, nodeid);
+		n = get_node(cachep, page_node);
 		spin_lock(&n->list_lock);
-		free_block(cachep, &objp, 1, nodeid, &list);
+		free_block(cachep, &objp, 1, page_node, &list);
 		spin_unlock(&n->list_lock);
 		slabs_destroy(cachep, &list);
 	}
 	return 1;
 }
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+	int page_node = page_to_nid(virt_to_page(objp));
+	int node = numa_mem_id();
+	/*
+	 * Make sure we are not freeing a object from another node to the array
+	 * cache on this cpu.
+	 */
+	if (likely(node == page_node))
+		return 0;
+
+	return __cache_free_alien(cachep, objp, node, page_node);
+}
 #endif
 
 /*
-- 
1.7.9.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21  8:27   ` Zhang Yanfei
  -1 siblings, 0 replies; 18+ messages in thread
From: Zhang Yanfei @ 2014-08-21  8:27 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	linux-mm, linux-kernel

Hello Joonsoo,

Seems like this is a cleanup patchset. I want to mention another
tiny cleanup here.

You removed the "struct slab" before but it seems there is still
a slab_page field in page descriptor left and has no user now, right?

Thanks
Zhang

On 08/21/2014 04:09 PM, Joonsoo Kim wrote:
> We don't need to keep kmem_cache definition in include/linux/slab.h
> if we don't need to inline kmem_cache_size(). According to my
> code inspection, this function is only called at lc_create() in
> lib/lru_cache.c which may be called at initialization phase of something,
> so we don't need to inline it. Therfore, move it to slab_common.c and
> move kmem_cache definition to internal header.
> 
> After this change, we can change kmem_cache definition easily without
> full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
> without full kernel build.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> ---
>  include/linux/slab.h |   42 +-----------------------------------------
>  mm/slab.h            |   33 +++++++++++++++++++++++++++++++++
>  mm/slab_common.c     |    8 ++++++++
>  3 files changed, 42 insertions(+), 41 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 1d9abb7..9062e4a 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -158,31 +158,6 @@ size_t ksize(const void *);
>  #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
>  #endif
>  
> -#ifdef CONFIG_SLOB
> -/*
> - * Common fields provided in kmem_cache by all slab allocators
> - * This struct is either used directly by the allocator (SLOB)
> - * or the allocator must include definitions for all fields
> - * provided in kmem_cache_common in their definition of kmem_cache.
> - *
> - * Once we can do anonymous structs (C11 standard) we could put a
> - * anonymous struct definition in these allocators so that the
> - * separate allocations in the kmem_cache structure of SLAB and
> - * SLUB is no longer needed.
> - */
> -struct kmem_cache {
> -	unsigned int object_size;/* The original size of the object */
> -	unsigned int size;	/* The aligned/padded/added on size  */
> -	unsigned int align;	/* Alignment as calculated */
> -	unsigned long flags;	/* Active flags on the slab */
> -	const char *name;	/* Slab name for sysfs */
> -	int refcount;		/* Use counter */
> -	void (*ctor)(void *);	/* Called on object slot creation */
> -	struct list_head list;	/* List of all slab caches on the system */
> -};
> -
> -#endif /* CONFIG_SLOB */
> -
>  /*
>   * Kmalloc array related definitions
>   */
> @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_TRACING */
>  
> -#ifdef CONFIG_SLAB
> -#include <linux/slab_def.h>
> -#endif
> -
> -#ifdef CONFIG_SLUB
> -#include <linux/slub_def.h>
> -#endif
> -
>  extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
>  
>  #ifdef CONFIG_TRACING
> @@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
>  	return kmalloc_node(size, flags | __GFP_ZERO, node);
>  }
>  
> -/*
> - * Determine the size of a slab object
> - */
> -static inline unsigned int kmem_cache_size(struct kmem_cache *s)
> -{
> -	return s->object_size;
> -}
> -
> +unsigned int kmem_cache_size(struct kmem_cache *s);
>  void __init kmem_cache_init_late(void);
>  
>  #endif	/* _LINUX_SLAB_H */
> diff --git a/mm/slab.h b/mm/slab.h
> index 0e0fdd3..bd1c54a 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -4,6 +4,39 @@
>   * Internal slab definitions
>   */
>  
> +#ifdef CONFIG_SLOB
> +/*
> + * Common fields provided in kmem_cache by all slab allocators
> + * This struct is either used directly by the allocator (SLOB)
> + * or the allocator must include definitions for all fields
> + * provided in kmem_cache_common in their definition of kmem_cache.
> + *
> + * Once we can do anonymous structs (C11 standard) we could put a
> + * anonymous struct definition in these allocators so that the
> + * separate allocations in the kmem_cache structure of SLAB and
> + * SLUB is no longer needed.
> + */
> +struct kmem_cache {
> +	unsigned int object_size;/* The original size of the object */
> +	unsigned int size;	/* The aligned/padded/added on size  */
> +	unsigned int align;	/* Alignment as calculated */
> +	unsigned long flags;	/* Active flags on the slab */
> +	const char *name;	/* Slab name for sysfs */
> +	int refcount;		/* Use counter */
> +	void (*ctor)(void *);	/* Called on object slot creation */
> +	struct list_head list;	/* List of all slab caches on the system */
> +};
> +
> +#endif /* CONFIG_SLOB */
> +
> +#ifdef CONFIG_SLAB
> +#include <linux/slab_def.h>
> +#endif
> +
> +#ifdef CONFIG_SLUB
> +#include <linux/slub_def.h>
> +#endif
> +
>  /*
>   * State of the slab allocator.
>   *
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index d319502..2088904 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -30,6 +30,14 @@ LIST_HEAD(slab_caches);
>  DEFINE_MUTEX(slab_mutex);
>  struct kmem_cache *kmem_cache;
>  
> +/*
> + * Determine the size of a slab object
> + */
> +unsigned int kmem_cache_size(struct kmem_cache *s)
> +{
> +	return s->object_size;
> +}
> +
>  #ifdef CONFIG_DEBUG_VM
>  static int kmem_cache_sanity_check(const char *name, size_t size)
>  {
> 


-- 
Thanks.
Zhang Yanfei

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
@ 2014-08-21  8:27   ` Zhang Yanfei
  0 siblings, 0 replies; 18+ messages in thread
From: Zhang Yanfei @ 2014-08-21  8:27 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	linux-mm, linux-kernel

Hello Joonsoo,

Seems like this is a cleanup patchset. I want to mention another
tiny cleanup here.

You removed the "struct slab" before but it seems there is still
a slab_page field in page descriptor left and has no user now, right?

Thanks
Zhang

On 08/21/2014 04:09 PM, Joonsoo Kim wrote:
> We don't need to keep kmem_cache definition in include/linux/slab.h
> if we don't need to inline kmem_cache_size(). According to my
> code inspection, this function is only called at lc_create() in
> lib/lru_cache.c which may be called at initialization phase of something,
> so we don't need to inline it. Therfore, move it to slab_common.c and
> move kmem_cache definition to internal header.
> 
> After this change, we can change kmem_cache definition easily without
> full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
> without full kernel build.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> ---
>  include/linux/slab.h |   42 +-----------------------------------------
>  mm/slab.h            |   33 +++++++++++++++++++++++++++++++++
>  mm/slab_common.c     |    8 ++++++++
>  3 files changed, 42 insertions(+), 41 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 1d9abb7..9062e4a 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -158,31 +158,6 @@ size_t ksize(const void *);
>  #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
>  #endif
>  
> -#ifdef CONFIG_SLOB
> -/*
> - * Common fields provided in kmem_cache by all slab allocators
> - * This struct is either used directly by the allocator (SLOB)
> - * or the allocator must include definitions for all fields
> - * provided in kmem_cache_common in their definition of kmem_cache.
> - *
> - * Once we can do anonymous structs (C11 standard) we could put a
> - * anonymous struct definition in these allocators so that the
> - * separate allocations in the kmem_cache structure of SLAB and
> - * SLUB is no longer needed.
> - */
> -struct kmem_cache {
> -	unsigned int object_size;/* The original size of the object */
> -	unsigned int size;	/* The aligned/padded/added on size  */
> -	unsigned int align;	/* Alignment as calculated */
> -	unsigned long flags;	/* Active flags on the slab */
> -	const char *name;	/* Slab name for sysfs */
> -	int refcount;		/* Use counter */
> -	void (*ctor)(void *);	/* Called on object slot creation */
> -	struct list_head list;	/* List of all slab caches on the system */
> -};
> -
> -#endif /* CONFIG_SLOB */
> -
>  /*
>   * Kmalloc array related definitions
>   */
> @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_TRACING */
>  
> -#ifdef CONFIG_SLAB
> -#include <linux/slab_def.h>
> -#endif
> -
> -#ifdef CONFIG_SLUB
> -#include <linux/slub_def.h>
> -#endif
> -
>  extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
>  
>  #ifdef CONFIG_TRACING
> @@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
>  	return kmalloc_node(size, flags | __GFP_ZERO, node);
>  }
>  
> -/*
> - * Determine the size of a slab object
> - */
> -static inline unsigned int kmem_cache_size(struct kmem_cache *s)
> -{
> -	return s->object_size;
> -}
> -
> +unsigned int kmem_cache_size(struct kmem_cache *s);
>  void __init kmem_cache_init_late(void);
>  
>  #endif	/* _LINUX_SLAB_H */
> diff --git a/mm/slab.h b/mm/slab.h
> index 0e0fdd3..bd1c54a 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -4,6 +4,39 @@
>   * Internal slab definitions
>   */
>  
> +#ifdef CONFIG_SLOB
> +/*
> + * Common fields provided in kmem_cache by all slab allocators
> + * This struct is either used directly by the allocator (SLOB)
> + * or the allocator must include definitions for all fields
> + * provided in kmem_cache_common in their definition of kmem_cache.
> + *
> + * Once we can do anonymous structs (C11 standard) we could put a
> + * anonymous struct definition in these allocators so that the
> + * separate allocations in the kmem_cache structure of SLAB and
> + * SLUB is no longer needed.
> + */
> +struct kmem_cache {
> +	unsigned int object_size;/* The original size of the object */
> +	unsigned int size;	/* The aligned/padded/added on size  */
> +	unsigned int align;	/* Alignment as calculated */
> +	unsigned long flags;	/* Active flags on the slab */
> +	const char *name;	/* Slab name for sysfs */
> +	int refcount;		/* Use counter */
> +	void (*ctor)(void *);	/* Called on object slot creation */
> +	struct list_head list;	/* List of all slab caches on the system */
> +};
> +
> +#endif /* CONFIG_SLOB */
> +
> +#ifdef CONFIG_SLAB
> +#include <linux/slab_def.h>
> +#endif
> +
> +#ifdef CONFIG_SLUB
> +#include <linux/slub_def.h>
> +#endif
> +
>  /*
>   * State of the slab allocator.
>   *
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index d319502..2088904 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -30,6 +30,14 @@ LIST_HEAD(slab_caches);
>  DEFINE_MUTEX(slab_mutex);
>  struct kmem_cache *kmem_cache;
>  
> +/*
> + * Determine the size of a slab object
> + */
> +unsigned int kmem_cache_size(struct kmem_cache *s)
> +{
> +	return s->object_size;
> +}
> +
>  #ifdef CONFIG_DEBUG_VM
>  static int kmem_cache_sanity_check(const char *name, size_t size)
>  {
> 


-- 
Thanks.
Zhang Yanfei

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
  2014-08-21  8:09 ` Joonsoo Kim
@ 2014-08-21 14:14   ` Christoph Lameter
  -1 siblings, 0 replies; 18+ messages in thread
From: Christoph Lameter @ 2014-08-21 14:14 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Pekka Enberg, David Rientjes, linux-mm, linux-kernel

On Thu, 21 Aug 2014, Joonsoo Kim wrote:

> We don't need to keep kmem_cache definition in include/linux/slab.h
> if we don't need to inline kmem_cache_size(). According to my
> code inspection, this function is only called at lc_create() in
> lib/lru_cache.c which may be called at initialization phase of something,
> so we don't need to inline it. Therfore, move it to slab_common.c and
> move kmem_cache definition to internal header.
>
> After this change, we can change kmem_cache definition easily without
> full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
> without full kernel build.

Wow. I did not realize that we were already at that point.

Acked-by: Christoph Lameter <cl@linux.com>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
@ 2014-08-21 14:14   ` Christoph Lameter
  0 siblings, 0 replies; 18+ messages in thread
From: Christoph Lameter @ 2014-08-21 14:14 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Pekka Enberg, David Rientjes, linux-mm, linux-kernel

On Thu, 21 Aug 2014, Joonsoo Kim wrote:

> We don't need to keep kmem_cache definition in include/linux/slab.h
> if we don't need to inline kmem_cache_size(). According to my
> code inspection, this function is only called at lc_create() in
> lib/lru_cache.c which may be called at initialization phase of something,
> so we don't need to inline it. Therfore, move it to slab_common.c and
> move kmem_cache definition to internal header.
>
> After this change, we can change kmem_cache definition easily without
> full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS
> without full kernel build.

Wow. I did not realize that we were already at that point.

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/5] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
  2014-08-21  8:09   ` Joonsoo Kim
@ 2014-08-21 14:15     ` Christoph Lameter
  -1 siblings, 0 replies; 18+ messages in thread
From: Christoph Lameter @ 2014-08-21 14:15 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Pekka Enberg, David Rientjes, linux-mm, linux-kernel

On Thu, 21 Aug 2014, Joonsoo Kim wrote:

> >From this change, we can turn on/off CONFIG_DEBUG_SLAB without full
> kernel build and remove some complicated '#if' defintion. It looks
> more benefitial to me.

I agree.

Acked-by: Christoph Lameter <cl@linux.com>


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/5] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
@ 2014-08-21 14:15     ` Christoph Lameter
  0 siblings, 0 replies; 18+ messages in thread
From: Christoph Lameter @ 2014-08-21 14:15 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Pekka Enberg, David Rientjes, linux-mm, linux-kernel

On Thu, 21 Aug 2014, Joonsoo Kim wrote:

> >From this change, we can turn on/off CONFIG_DEBUG_SLAB without full
> kernel build and remove some complicated '#if' defintion. It looks
> more benefitial to me.

I agree.

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
  2014-08-21  8:27   ` Zhang Yanfei
@ 2014-08-25  8:29     ` Joonsoo Kim
  -1 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-25  8:29 UTC (permalink / raw)
  To: Zhang Yanfei
  Cc: Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	linux-mm, linux-kernel

On Thu, Aug 21, 2014 at 04:27:52PM +0800, Zhang Yanfei wrote:
> Hello Joonsoo,

Hello. :)

> 
> Seems like this is a cleanup patchset. I want to mention another
> tiny cleanup here.

I think these are not only cleanup but also build improvement.

> You removed the "struct slab" before but it seems there is still
> a slab_page field in page descriptor left and has no user now, right?

Yes, you are right. I will do it in next spin. :)

Thanks.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header
@ 2014-08-25  8:29     ` Joonsoo Kim
  0 siblings, 0 replies; 18+ messages in thread
From: Joonsoo Kim @ 2014-08-25  8:29 UTC (permalink / raw)
  To: Zhang Yanfei
  Cc: Andrew Morton, Christoph Lameter, Pekka Enberg, David Rientjes,
	linux-mm, linux-kernel

On Thu, Aug 21, 2014 at 04:27:52PM +0800, Zhang Yanfei wrote:
> Hello Joonsoo,

Hello. :)

> 
> Seems like this is a cleanup patchset. I want to mention another
> tiny cleanup here.

I think these are not only cleanup but also build improvement.

> You removed the "struct slab" before but it seems there is still
> a slab_page field in page descriptor left and has no user now, right?

Yes, you are right. I will do it in next spin. :)

Thanks.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2014-08-25  8:29 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-21  8:09 [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header Joonsoo Kim
2014-08-21  8:09 ` Joonsoo Kim
2014-08-21  8:09 ` [PATCH 2/5] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller() Joonsoo Kim
2014-08-21  8:09   ` Joonsoo Kim
2014-08-21 14:15   ` Christoph Lameter
2014-08-21 14:15     ` Christoph Lameter
2014-08-21  8:09 ` [PATCH 3/5] mm/slab: move cache_flusharray() out of unlikely.text section Joonsoo Kim
2014-08-21  8:09   ` Joonsoo Kim
2014-08-21  8:09 ` [PATCH 4/5] mm/slab: noinline __ac_put_obj() Joonsoo Kim
2014-08-21  8:09   ` Joonsoo Kim
2014-08-21  8:09 ` [PATCH 5/5] mm/slab: factor out unlikely part of cache_free_alien() Joonsoo Kim
2014-08-21  8:09   ` Joonsoo Kim
2014-08-21  8:27 ` [PATCH 1/5] mm/slab_common: move kmem_cache definition to internal header Zhang Yanfei
2014-08-21  8:27   ` Zhang Yanfei
2014-08-25  8:29   ` Joonsoo Kim
2014-08-25  8:29     ` Joonsoo Kim
2014-08-21 14:14 ` Christoph Lameter
2014-08-21 14:14   ` Christoph Lameter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.