* [PATCH] mm/slab: make calculate_alignment() function static
@ 2017-12-10 8:01 ` Byongho Lee
0 siblings, 0 replies; 4+ messages in thread
From: Byongho Lee @ 2017-12-10 8:01 UTC (permalink / raw)
Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, linux-mm, linux-kernel
calculate_alignment() function is only used inside 'slab_common.c'.
So make it static and let compiler do more optimizations.
After this patch there's small improvements in 'text' and 'data' size.
$ gcc --version
gcc (GCC) 7.2.1 20171128
Before:
text data bss dec hex filename
9890457 3828702 1212364 14931523 e3d643 vmlinux
After:
text data bss dec hex filename
9890437 3828670 1212364 14931471 e3d60f vmlinux
Also I fixed a 'style problem' reported by 'scripts/checkpatch.pl'.
WARNING: Missing a blank line after declarations
#53: FILE: mm/slab_common.c:286:
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
---
mm/slab.h | 3 ---
mm/slab_common.c | 56 +++++++++++++++++++++++++++++---------------------------
2 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 028cdc7df67e..e894889dc24a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -79,9 +79,6 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size);
-
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d7fe71ff5e4..d25e7b56e20b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign;
+
+ ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
/*
* Find a mergeable slab cache
*/
@@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
return NULL;
}
-/*
- * Figure out what the alignment of the objects will be given a set of
- * flags, a user specified alignment and the size of the objects.
- */
-unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *),
--
2.15.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH] mm/slab: make calculate_alignment() function static
@ 2017-12-10 8:01 ` Byongho Lee
0 siblings, 0 replies; 4+ messages in thread
From: Byongho Lee @ 2017-12-10 8:01 UTC (permalink / raw)
Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, linux-mm, linux-kernel
calculate_alignment() function is only used inside 'slab_common.c'.
So make it static and let compiler do more optimizations.
After this patch there's small improvements in 'text' and 'data' size.
$ gcc --version
gcc (GCC) 7.2.1 20171128
Before:
text data bss dec hex filename
9890457 3828702 1212364 14931523 e3d643 vmlinux
After:
text data bss dec hex filename
9890437 3828670 1212364 14931471 e3d60f vmlinux
Also I fixed a 'style problem' reported by 'scripts/checkpatch.pl'.
WARNING: Missing a blank line after declarations
#53: FILE: mm/slab_common.c:286:
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
---
mm/slab.h | 3 ---
mm/slab_common.c | 56 +++++++++++++++++++++++++++++---------------------------
2 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 028cdc7df67e..e894889dc24a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -79,9 +79,6 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size);
-
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d7fe71ff5e4..d25e7b56e20b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign;
+
+ ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
/*
* Find a mergeable slab cache
*/
@@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
return NULL;
}
-/*
- * Figure out what the alignment of the objects will be given a set of
- * flags, a user specified alignment and the size of the objects.
- */
-unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *),
--
2.15.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] mm/slab: make calculate_alignment() function static
2017-12-10 8:01 ` Byongho Lee
@ 2017-12-10 10:40 ` Michal Hocko
-1 siblings, 0 replies; 4+ messages in thread
From: Michal Hocko @ 2017-12-10 10:40 UTC (permalink / raw)
To: Byongho Lee
Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, linux-mm, linux-kernel
On Sun 10-12-17 17:01:32, Byongho Lee wrote:
> calculate_alignment() function is only used inside 'slab_common.c'.
> So make it static and let compiler do more optimizations.
>
> After this patch there's small improvements in 'text' and 'data' size.
>
> $ gcc --version
> gcc (GCC) 7.2.1 20171128
>
> Before:
> text data bss dec hex filename
> 9890457 3828702 1212364 14931523 e3d643 vmlinux
>
> After:
> text data bss dec hex filename
> 9890437 3828670 1212364 14931471 e3d60f vmlinux
>
> Also I fixed a 'style problem' reported by 'scripts/checkpatch.pl'.
>
> WARNING: Missing a blank line after declarations
> #53: FILE: mm/slab_common.c:286:
> + unsigned long ralign = cache_line_size();
> + while (size <= ralign / 2)
>
> Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
> ---
> mm/slab.h | 3 ---
> mm/slab_common.c | 56 +++++++++++++++++++++++++++++---------------------------
> 2 files changed, 29 insertions(+), 30 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 028cdc7df67e..e894889dc24a 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -79,9 +79,6 @@ extern const struct kmalloc_info_struct {
> unsigned long size;
> } kmalloc_info[];
>
> -unsigned long calculate_alignment(unsigned long flags,
> - unsigned long align, unsigned long size);
> -
> #ifndef CONFIG_SLOB
> /* Kmalloc array related functions */
> void setup_kmalloc_cache_index_table(void);
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 0d7fe71ff5e4..d25e7b56e20b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
> }
> #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
>
> +/*
> + * Figure out what the alignment of the objects will be given a set of
> + * flags, a user specified alignment and the size of the objects.
> + */
> +static unsigned long calculate_alignment(unsigned long flags,
> + unsigned long align, unsigned long size)
> +{
> + /*
> + * If the user wants hardware cache aligned objects then follow that
> + * suggestion if the object is sufficiently large.
> + *
> + * The hardware cache alignment cannot override the specified
> + * alignment though. If that is greater then use it.
> + */
> + if (flags & SLAB_HWCACHE_ALIGN) {
> + unsigned long ralign;
> +
> + ralign = cache_line_size();
> + while (size <= ralign / 2)
> + ralign /= 2;
> + align = max(align, ralign);
> + }
> +
> + if (align < ARCH_SLAB_MINALIGN)
> + align = ARCH_SLAB_MINALIGN;
> +
> + return ALIGN(align, sizeof(void *));
> +}
> +
> /*
> * Find a mergeable slab cache
> */
> @@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
> return NULL;
> }
>
> -/*
> - * Figure out what the alignment of the objects will be given a set of
> - * flags, a user specified alignment and the size of the objects.
> - */
> -unsigned long calculate_alignment(unsigned long flags,
> - unsigned long align, unsigned long size)
> -{
> - /*
> - * If the user wants hardware cache aligned objects then follow that
> - * suggestion if the object is sufficiently large.
> - *
> - * The hardware cache alignment cannot override the specified
> - * alignment though. If that is greater then use it.
> - */
> - if (flags & SLAB_HWCACHE_ALIGN) {
> - unsigned long ralign = cache_line_size();
> - while (size <= ralign / 2)
> - ralign /= 2;
> - align = max(align, ralign);
> - }
> -
> - if (align < ARCH_SLAB_MINALIGN)
> - align = ARCH_SLAB_MINALIGN;
> -
> - return ALIGN(align, sizeof(void *));
> -}
> -
> static struct kmem_cache *create_cache(const char *name,
> size_t object_size, size_t size, size_t align,
> unsigned long flags, void (*ctor)(void *),
> --
> 2.15.1
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
--
Michal Hocko
SUSE Labs
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] mm/slab: make calculate_alignment() function static
@ 2017-12-10 10:40 ` Michal Hocko
0 siblings, 0 replies; 4+ messages in thread
From: Michal Hocko @ 2017-12-10 10:40 UTC (permalink / raw)
To: Byongho Lee
Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, linux-mm, linux-kernel
On Sun 10-12-17 17:01:32, Byongho Lee wrote:
> calculate_alignment() function is only used inside 'slab_common.c'.
> So make it static and let compiler do more optimizations.
>
> After this patch there's small improvements in 'text' and 'data' size.
>
> $ gcc --version
> gcc (GCC) 7.2.1 20171128
>
> Before:
> text data bss dec hex filename
> 9890457 3828702 1212364 14931523 e3d643 vmlinux
>
> After:
> text data bss dec hex filename
> 9890437 3828670 1212364 14931471 e3d60f vmlinux
>
> Also I fixed a 'style problem' reported by 'scripts/checkpatch.pl'.
>
> WARNING: Missing a blank line after declarations
> #53: FILE: mm/slab_common.c:286:
> + unsigned long ralign = cache_line_size();
> + while (size <= ralign / 2)
>
> Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
> ---
> mm/slab.h | 3 ---
> mm/slab_common.c | 56 +++++++++++++++++++++++++++++---------------------------
> 2 files changed, 29 insertions(+), 30 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 028cdc7df67e..e894889dc24a 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -79,9 +79,6 @@ extern const struct kmalloc_info_struct {
> unsigned long size;
> } kmalloc_info[];
>
> -unsigned long calculate_alignment(unsigned long flags,
> - unsigned long align, unsigned long size);
> -
> #ifndef CONFIG_SLOB
> /* Kmalloc array related functions */
> void setup_kmalloc_cache_index_table(void);
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 0d7fe71ff5e4..d25e7b56e20b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
> }
> #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
>
> +/*
> + * Figure out what the alignment of the objects will be given a set of
> + * flags, a user specified alignment and the size of the objects.
> + */
> +static unsigned long calculate_alignment(unsigned long flags,
> + unsigned long align, unsigned long size)
> +{
> + /*
> + * If the user wants hardware cache aligned objects then follow that
> + * suggestion if the object is sufficiently large.
> + *
> + * The hardware cache alignment cannot override the specified
> + * alignment though. If that is greater then use it.
> + */
> + if (flags & SLAB_HWCACHE_ALIGN) {
> + unsigned long ralign;
> +
> + ralign = cache_line_size();
> + while (size <= ralign / 2)
> + ralign /= 2;
> + align = max(align, ralign);
> + }
> +
> + if (align < ARCH_SLAB_MINALIGN)
> + align = ARCH_SLAB_MINALIGN;
> +
> + return ALIGN(align, sizeof(void *));
> +}
> +
> /*
> * Find a mergeable slab cache
> */
> @@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
> return NULL;
> }
>
> -/*
> - * Figure out what the alignment of the objects will be given a set of
> - * flags, a user specified alignment and the size of the objects.
> - */
> -unsigned long calculate_alignment(unsigned long flags,
> - unsigned long align, unsigned long size)
> -{
> - /*
> - * If the user wants hardware cache aligned objects then follow that
> - * suggestion if the object is sufficiently large.
> - *
> - * The hardware cache alignment cannot override the specified
> - * alignment though. If that is greater then use it.
> - */
> - if (flags & SLAB_HWCACHE_ALIGN) {
> - unsigned long ralign = cache_line_size();
> - while (size <= ralign / 2)
> - ralign /= 2;
> - align = max(align, ralign);
> - }
> -
> - if (align < ARCH_SLAB_MINALIGN)
> - align = ARCH_SLAB_MINALIGN;
> -
> - return ALIGN(align, sizeof(void *));
> -}
> -
> static struct kmem_cache *create_cache(const char *name,
> size_t object_size, size_t size, size_t align,
> unsigned long flags, void (*ctor)(void *),
> --
> 2.15.1
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2017-12-10 10:40 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-10 8:01 [PATCH] mm/slab: make calculate_alignment() function static Byongho Lee
2017-12-10 8:01 ` Byongho Lee
2017-12-10 10:40 ` Michal Hocko
2017-12-10 10:40 ` Michal Hocko
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.