All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 12:07 ` Thomas Gleixner
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Gleixner @ 2015-07-10 12:07 UTC (permalink / raw)
  To: LKML
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Steven Rostedt, Peter Zijlstra

[-- Attachment #1: slub-move-slab-init-into-irq-enabled-region.patch --]
[-- Type: text/plain, Size: 3839 bytes --]

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 mm/slub.c |   85 +++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 43 insertions(+), 42 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
 			stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (!page)
+		goto out;
+
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
 	if (!page)
-		return NULL;
+		goto out;
 
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page->pfmemalloc)
@@ -1448,10 +1425,34 @@ static struct page *new_slab(struct kmem
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);



^ permalink raw reply	[flat|nested] 14+ messages in thread

* [patch] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 12:07 ` Thomas Gleixner
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Gleixner @ 2015-07-10 12:07 UTC (permalink / raw)
  To: LKML
  Cc: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Steven Rostedt, Peter Zijlstra

[-- Attachment #1: slub-move-slab-init-into-irq-enabled-region.patch --]
[-- Type: text/plain, Size: 4064 bytes --]

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 mm/slub.c |   85 +++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 43 insertions(+), 42 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
 			stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (!page)
+		goto out;
+
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
 	if (!page)
-		return NULL;
+		goto out;
 
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page->pfmemalloc)
@@ -1448,10 +1425,34 @@ static struct page *new_slab(struct kmem
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 12:07 ` Thomas Gleixner
@ 2015-07-10 14:19   ` Christoph Lameter
  -1 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 14:19 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Pekka Enberg, David Rientjes, Joonsoo Kim, Andrew Morton,
	linux-mm, Sebastian Andrzej Siewior, Steven Rostedt,
	Peter Zijlstra

There is a duplicate check for page == NULL now.


Subject: slub: allocate_slab: no need to check twice for page == NULL

Remove the second check.

Signed-off-by: Christoph Lameter <cl@linux.com>

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1396,9 +1396,6 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}

-	if (!page)
-		goto out;
-
 	page->objects = oo_objects(oo);

 	order = compound_order(page);

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 14:19   ` Christoph Lameter
  0 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 14:19 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Pekka Enberg, David Rientjes, Joonsoo Kim, Andrew Morton,
	linux-mm, Sebastian Andrzej Siewior, Steven Rostedt,
	Peter Zijlstra

There is a duplicate check for page == NULL now.


Subject: slub: allocate_slab: no need to check twice for page == NULL

Remove the second check.

Signed-off-by: Christoph Lameter <cl@linux.com>

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1396,9 +1396,6 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}

-	if (!page)
-		goto out;
-
 	page->objects = oo_objects(oo);

 	order = compound_order(page);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 12:07 ` Thomas Gleixner
@ 2015-07-10 15:02   ` Steven Rostedt
  -1 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2015-07-10 15:02 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015 12:07:13 -0000
Thomas Gleixner <tglx@linutronix.de> wrote:


>  /*
>   * Slab allocation and freeing
>   */
> @@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
>  	struct page *page;
>  	struct kmem_cache_order_objects oo = s->oo;
>  	gfp_t alloc_gfp;
> +	void *start, *p;
> +	int idx, order;
>  
>  	flags &= gfp_allowed_mask;
>  
> @@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
>  			stat(s, ORDER_FALLBACK);
>  	}
>  
> -	if (kmemcheck_enabled && page
> -		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
> +	if (!page)
> +		goto out;

Since the above now looks like this:

	page = alloc_slab_page(s, alloc_gfp, node, oo);
	if (unlikely(!page)) {
		oo = s->min;
		alloc_gfp = flags;
		/*
		 * Allocation may have failed due to fragmentation.
		 * Try a lower order alloc if possible
		 */
		page = alloc_slab_page(s, alloc_gfp, node, oo);

		if (page)
			stat(s, ORDER_FALLBACK);
	}

	if (!page)
		goto out;

Why not have it do this:

	page = alloc_slab_page(s, alloc_gfp, node, oo);
	if (unlikely(!page)) {
		oo = s->min;
		alloc_gfp = flags;
		/*
		 * Allocation may have failed due to fragmentation.
		 * Try a lower order alloc if possible
		 */
		page = alloc_slab_page(s, alloc_gfp, node, oo);
		if (unlikely(!page))
			goto out;

		stat(s, ORDER_FALLBACK);
	}

And get rid of the double check for !page in the fast path.

-- Steve


> +
> +	if (kmemcheck_enabled &&
> +	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
>  		int pages = 1 << oo_order(oo);
>  
>  		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
> @@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
>  			kmemcheck_mark_unallocated_pages(page, pages);
>  	}
>  
> -	if (flags & __GFP_WAIT)
> -		local_irq_disable();
>  	if (!page)
> -		return NULL;
> +		goto out;
>  
>  	page->objects = oo_objects(oo);


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 15:02   ` Steven Rostedt
  0 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2015-07-10 15:02 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Christoph Lameter, Pekka Enberg, David Rientjes,
	Joonsoo Kim, Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015 12:07:13 -0000
Thomas Gleixner <tglx@linutronix.de> wrote:


>  /*
>   * Slab allocation and freeing
>   */
> @@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
>  	struct page *page;
>  	struct kmem_cache_order_objects oo = s->oo;
>  	gfp_t alloc_gfp;
> +	void *start, *p;
> +	int idx, order;
>  
>  	flags &= gfp_allowed_mask;
>  
> @@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
>  			stat(s, ORDER_FALLBACK);
>  	}
>  
> -	if (kmemcheck_enabled && page
> -		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
> +	if (!page)
> +		goto out;

Since the above now looks like this:

	page = alloc_slab_page(s, alloc_gfp, node, oo);
	if (unlikely(!page)) {
		oo = s->min;
		alloc_gfp = flags;
		/*
		 * Allocation may have failed due to fragmentation.
		 * Try a lower order alloc if possible
		 */
		page = alloc_slab_page(s, alloc_gfp, node, oo);

		if (page)
			stat(s, ORDER_FALLBACK);
	}

	if (!page)
		goto out;

Why not have it do this:

	page = alloc_slab_page(s, alloc_gfp, node, oo);
	if (unlikely(!page)) {
		oo = s->min;
		alloc_gfp = flags;
		/*
		 * Allocation may have failed due to fragmentation.
		 * Try a lower order alloc if possible
		 */
		page = alloc_slab_page(s, alloc_gfp, node, oo);
		if (unlikely(!page))
			goto out;

		stat(s, ORDER_FALLBACK);
	}

And get rid of the double check for !page in the fast path.

-- Steve


> +
> +	if (kmemcheck_enabled &&
> +	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
>  		int pages = 1 << oo_order(oo);
>  
>  		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
> @@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
>  			kmemcheck_mark_unallocated_pages(page, pages);
>  	}
>  
> -	if (flags & __GFP_WAIT)
> -		local_irq_disable();
>  	if (!page)
> -		return NULL;
> +		goto out;
>  
>  	page->objects = oo_objects(oo);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 15:02   ` Steven Rostedt
@ 2015-07-10 19:22     ` Christoph Lameter
  -1 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 19:22 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Thomas Gleixner, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015, Steven Rostedt wrote:

> And get rid of the double check for !page in the fast path.

This is the 2nd of 3 checks by the way. Both our approaches together get
it down to 1.


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 19:22     ` Christoph Lameter
  0 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 19:22 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Thomas Gleixner, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015, Steven Rostedt wrote:

> And get rid of the double check for !page in the fast path.

This is the 2nd of 3 checks by the way. Both our approaches together get
it down to 1.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [patch V2] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 19:22     ` Christoph Lameter
@ 2015-07-10 21:08       ` Thomas Gleixner
  -1 siblings, 0 replies; 14+ messages in thread
From: Thomas Gleixner @ 2015-07-10 21:08 UTC (permalink / raw)
  To: Christoph Lameter
  Cc: Steven Rostedt, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---

V2: Removed the extra NULL checks as suggested by Steven and Christoph

 mm/slub.c |   89 +++++++++++++++++++++++++++++---------------------------------
 1 file changed, 42 insertions(+), 47 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct
 		 * Try a lower order alloc if possible
 		 */
 		page = alloc_slab_page(s, alloc_gfp, node, oo);
-
-		if (page)
-			stat(s, ORDER_FALLBACK);
+		if (unlikely(!page))
+			goto out;
+		stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
-	if (!page)
-		return NULL;
-
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page->pfmemalloc)
@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [patch V2] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 21:08       ` Thomas Gleixner
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Gleixner @ 2015-07-10 21:08 UTC (permalink / raw)
  To: Christoph Lameter
  Cc: Steven Rostedt, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---

V2: Removed the extra NULL checks as suggested by Steven and Christoph

 mm/slub.c |   89 +++++++++++++++++++++++++++++---------------------------------
 1 file changed, 42 insertions(+), 47 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct
 		 * Try a lower order alloc if possible
 		 */
 		page = alloc_slab_page(s, alloc_gfp, node, oo);
-
-		if (page)
-			stat(s, ORDER_FALLBACK);
+		if (unlikely(!page))
+			goto out;
+		stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
-	if (!page)
-		return NULL;
-
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page->pfmemalloc)
@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch V2] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 21:08       ` Thomas Gleixner
@ 2015-07-10 21:27         ` Christoph Lameter
  -1 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 21:27 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Steven Rostedt, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra


Acked-by: Christoph Lameter <cl@linux.com>



^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch V2] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-10 21:27         ` Christoph Lameter
  0 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2015-07-10 21:27 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Steven Rostedt, LKML, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra


Acked-by: Christoph Lameter <cl@linux.com>


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch V2] mm/slub: Move slab initialization into irq enabled region
  2015-07-10 21:08       ` Thomas Gleixner
@ 2015-07-15 23:43         ` David Rientjes
  -1 siblings, 0 replies; 14+ messages in thread
From: David Rientjes @ 2015-07-15 23:43 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Christoph Lameter, Steven Rostedt, LKML, Pekka Enberg,
	Joonsoo Kim, Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015, Thomas Gleixner wrote:

> Initializing a new slab can introduce rather large latencies because
> most of the initialization runs always with interrupts disabled.
> 
> There is no point in doing so. The newly allocated slab is not visible
> yet, so there is no reason to protect it against concurrent alloc/free.
> 
> Move the expensive parts of the initialization into allocate_slab(),
> so for all allocations with GFP_WAIT set, interrupts are enabled.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-mm@kvack.org
> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Peter Zijlstra <peterz@infradead.org>

Acked-by: David Rientjes <rientjes@google.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [patch V2] mm/slub: Move slab initialization into irq enabled region
@ 2015-07-15 23:43         ` David Rientjes
  0 siblings, 0 replies; 14+ messages in thread
From: David Rientjes @ 2015-07-15 23:43 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Christoph Lameter, Steven Rostedt, LKML, Pekka Enberg,
	Joonsoo Kim, Andrew Morton, linux-mm, Sebastian Andrzej Siewior,
	Peter Zijlstra

On Fri, 10 Jul 2015, Thomas Gleixner wrote:

> Initializing a new slab can introduce rather large latencies because
> most of the initialization runs always with interrupts disabled.
> 
> There is no point in doing so. The newly allocated slab is not visible
> yet, so there is no reason to protect it against concurrent alloc/free.
> 
> Move the expensive parts of the initialization into allocate_slab(),
> so for all allocations with GFP_WAIT set, interrupts are enabled.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-mm@kvack.org
> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Peter Zijlstra <peterz@infradead.org>

Acked-by: David Rientjes <rientjes@google.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2015-07-15 23:43 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-10 12:07 [patch] mm/slub: Move slab initialization into irq enabled region Thomas Gleixner
2015-07-10 12:07 ` Thomas Gleixner
2015-07-10 14:19 ` Christoph Lameter
2015-07-10 14:19   ` Christoph Lameter
2015-07-10 15:02 ` Steven Rostedt
2015-07-10 15:02   ` Steven Rostedt
2015-07-10 19:22   ` Christoph Lameter
2015-07-10 19:22     ` Christoph Lameter
2015-07-10 21:08     ` [patch V2] " Thomas Gleixner
2015-07-10 21:08       ` Thomas Gleixner
2015-07-10 21:27       ` Christoph Lameter
2015-07-10 21:27         ` Christoph Lameter
2015-07-15 23:43       ` David Rientjes
2015-07-15 23:43         ` David Rientjes

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.