All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/1 v2] mm: Use BIT macro in SLAB bitmaps
@ 2017-05-09 11:04 Igor Stoppa
  2017-05-09 11:04 ` [PATCH 1/1 v2] Rework slab bitmasks Igor Stoppa
  0 siblings, 1 reply; 2+ messages in thread
From: Igor Stoppa @ 2017-05-09 11:04 UTC (permalink / raw)
  To: mhocko; +Cc: linux-mm, Igor Stoppa

The file include/linux/slab.h can be simplified by moving to use the
macro BIT() and making other bitmaps depend on their correspective
master-toggle configuration option.

checkpatch.pl will generate some warnings about line lenght, but I didn't
want to alter the initial layout, which already sufferend from this
problem.

The previous version was redefining a macro that is used by the exinos bsp.

Igor Stoppa (1):
  Rework slab bitmasks

 include/linux/slab.h | 71 +++++++++++++++++++++++-----------------------------
 1 file changed, 31 insertions(+), 40 deletions(-)

-- 
2.9.3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH 1/1 v2] Rework slab bitmasks
  2017-05-09 11:04 [PATCH 0/1 v2] mm: Use BIT macro in SLAB bitmaps Igor Stoppa
@ 2017-05-09 11:04 ` Igor Stoppa
  0 siblings, 0 replies; 2+ messages in thread
From: Igor Stoppa @ 2017-05-09 11:04 UTC (permalink / raw)
  To: mhocko; +Cc: linux-mm, Igor Stoppa

The bitmasks defined in the slab header can be made more readable by
using the BIT() macro.

Furthermore, several conditional definitions can be collapsed, by
expressing their value as a function of the configuration parameter that
controles them, using the macro IS_ENABLED().

Signed-off-by: Igor Stoppa <igor.stoppa@huawei.com>
---
 include/linux/slab.h | 71 +++++++++++++++++++++++-----------------------------
 1 file changed, 31 insertions(+), 40 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3c37a8c..6103bc4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -15,18 +15,24 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
+#define __BIT_VL(vl, shift)  (((unsigned long)(vl != 0)) << (shift))
+#define BIT_VL(vl, shift)    __BIT_VL(vl, shift)
+#define BIT_CFG(cfg, shift)  __BIT_VL(IS_ENABLED(CONFIG_##cfg), shift)
+#define BIT_DBG(dbg, shift)  BIT_CFG(DEBUG_##dbg, shift)
+#define BIT_DBG_SLAB(shift)  BIT_DBG(SLAB, shift)
 
 /*
  * Flags to pass to kmem_cache_create().
  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  */
-#define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
-#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
-#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
-#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
-#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
+
+#define SLAB_CONSISTENCY_CHECKS	BIT_DBG_SLAB(8)   /* Perform (expensive) checks on alloc/free */
+#define SLAB_RED_ZONE		BIT_DBG_SLAB(10)  /* Red zone objs in a cache */
+#define SLAB_POISON		BIT_DBG_SLAB(11)  /* Poison objects */
+#define SLAB_HWCACHE_ALIGN	BIT(13)           /* Align objs on cache lines */
+#define SLAB_CACHE_DMA		BIT(14)           /* Use GFP_DMA memory */
+#define SLAB_STORE_USER		BIT_DBG_SLAB(16)  /* Store the last owner for bug hunting */
+#define SLAB_PANIC		BIT(18)           /* Panic if kmem_cache_create() fails */
 /*
  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
  *
@@ -62,44 +68,29 @@
  * rcu_read_lock before reading the address, then rcu_read_unlock after
  * taking the spinlock within the structure expected at that address.
  */
-#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
-#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
-#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
+#define SLAB_DESTROY_BY_RCU	BIT(19)         /* Defer freeing slabs to RCU */
+#define SLAB_MEM_SPREAD		BIT(20)         /* Spread some memory over cpuset */
+#define SLAB_TRACE		BIT(21)         /* Trace allocations and frees */
 
 /* Flag to prevent checks on free */
-#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS	0x00400000UL
-#else
-# define SLAB_DEBUG_OBJECTS	0x00000000UL
-#endif
+# define SLAB_DEBUG_OBJECTS	BIT_DBG(OBJECTS, 22)
 
-#define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
+#define SLAB_NOLEAKTRACE	BIT(23)         /* Avoid kmemleak tracing */
 
 /* Don't track use of uninitialized memory */
-#ifdef CONFIG_KMEMCHECK
-# define SLAB_NOTRACK		0x01000000UL
-#else
-# define SLAB_NOTRACK		0x00000000UL
-#endif
-#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
-#else
-# define SLAB_FAILSLAB		0x00000000UL
-#endif
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-# define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
-#else
-# define SLAB_ACCOUNT		0x00000000UL
-#endif
+# define SLAB_NOTRACK		BIT_CFG(KMEMCHECK, 24)
 
-#ifdef CONFIG_KASAN
-#define SLAB_KASAN		0x08000000UL
-#else
-#define SLAB_KASAN		0x00000000UL
-#endif
+/* Fault injection mark */
+# define SLAB_FAILSLAB		BIT_CFG(FAILSLAB, 25)
+
+/* Account to memcg */
+# define SLAB_ACCOUNT		BIT_VL(IS_ENABLED(CONFIG_MEMCG) && \
+				       !IS_ENABLED(CONFIG_SLOB), 26)
+
+#define SLAB_KASAN		BIT_CFG(KASAN, 27)
 
 /* The following flags affect the page allocator grouping pages by mobility */
-#define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
+#define SLAB_RECLAIM_ACCOUNT	BIT(17)                 /* Objects are reclaimable */
 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
 /*
  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -246,9 +237,9 @@ static inline const char *__check_heap_object(const void *ptr,
 #endif
 
 /* Maximum allocatable size */
-#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
+#define KMALLOC_MAX_SIZE	BIT(KMALLOC_SHIFT_MAX)
 /* Maximum size for which we actually use a slab cache */
-#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
+#define KMALLOC_MAX_CACHE_SIZE	BIT(KMALLOC_SHIFT_HIGH)
 /* Maximum order allocatable via the slab allocagtor */
 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
 
@@ -256,7 +247,7 @@ static inline const char *__check_heap_object(const void *ptr,
  * Kmalloc subsystem.
  */
 #ifndef KMALLOC_MIN_SIZE
-#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
+#define KMALLOC_MIN_SIZE        BIT(KMALLOC_SHIFT_LOW)
 #endif
 
 /*
-- 
2.9.3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-05-09 11:07 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-09 11:04 [PATCH 0/1 v2] mm: Use BIT macro in SLAB bitmaps Igor Stoppa
2017-05-09 11:04 ` [PATCH 1/1 v2] Rework slab bitmasks Igor Stoppa

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.