linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: linux-kernel@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Subject: [slubllv7 07/17] slub: explicit list_lock taking
Date: Wed, 01 Jun 2011 12:25:50 -0500	[thread overview]
Message-ID: <20110601172615.848578051@linux.com> (raw)
In-Reply-To: 20110601172543.437240675@linux.com

[-- Attachment #1: unlock_list_ops --]
[-- Type: text/plain, Size: 6845 bytes --]

The allocator fastpath rework does change the usage of the list_lock.
Remove the list_lock processing from the functions that hide them from the
critical sections and move them into those critical sections.

This in turn simplifies the support functions (no __ variant needed anymore)
and simplifies the lock handling on bootstrap.

Inline add_partial since it becomes pretty simple.

Signed-off-by: Christoph Lameter <cl@linux.com>

---
 mm/slub.c |   89 ++++++++++++++++++++++++++++++++++----------------------------
 1 file changed, 49 insertions(+), 40 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2011-05-31 10:14:00.812977367 -0500
+++ linux-2.6/mm/slub.c	2011-05-31 10:14:03.852977349 -0500
@@ -916,26 +916,27 @@ static inline void slab_free_hook(struct
 
 /*
  * Tracking of fully allocated slabs for debugging purposes.
+ *
+ * list_lock must be held.
  */
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s,
+	struct kmem_cache_node *n, struct page *page)
 {
-	spin_lock(&n->list_lock);
+	if (!(s->flags & SLAB_STORE_USER))
+		return;
+
 	list_add(&page->lru, &n->full);
-	spin_unlock(&n->list_lock);
 }
 
+/*
+ * list_lock must be held.
+ */
 static void remove_full(struct kmem_cache *s, struct page *page)
 {
-	struct kmem_cache_node *n;
-
 	if (!(s->flags & SLAB_STORE_USER))
 		return;
 
-	n = get_node(s, page_to_nid(page));
-
-	spin_lock(&n->list_lock);
 	list_del(&page->lru);
-	spin_unlock(&n->list_lock);
 }
 
 /* Tracking of the number of slabs for debugging purposes */
@@ -1060,8 +1061,13 @@ static noinline int free_debug_processin
 	}
 
 	/* Special debug activities for freeing objects */
-	if (!page->frozen && !page->freelist)
+	if (!page->frozen && !page->freelist) {
+		struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+		spin_lock(&n->list_lock);
 		remove_full(s, page);
+		spin_unlock(&n->list_lock);
+	}
 	if (s->flags & SLAB_STORE_USER)
 		set_track(s, object, TRACK_FREE, addr);
 	trace(s, page, object, 0);
@@ -1170,7 +1176,8 @@ static inline int slab_pad_check(struct
 			{ return 1; }
 static inline int check_object(struct kmem_cache *s, struct page *page,
 			void *object, u8 val) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+					struct page *page) {}
 static inline unsigned long kmem_cache_flags(unsigned long objsize,
 	unsigned long flags, const char *name,
 	void (*ctor)(void *))
@@ -1420,38 +1427,33 @@ static __always_inline int slab_trylock(
 }
 
 /*
- * Management of partially allocated slabs
+ * Management of partially allocated slabs.
+ *
+ * list_lock must be held.
  */
-static void add_partial(struct kmem_cache_node *n,
+static inline void add_partial(struct kmem_cache_node *n,
 				struct page *page, int tail)
 {
-	spin_lock(&n->list_lock);
 	n->nr_partial++;
 	if (tail)
 		list_add_tail(&page->lru, &n->partial);
 	else
 		list_add(&page->lru, &n->partial);
-	spin_unlock(&n->list_lock);
 }
 
-static inline void __remove_partial(struct kmem_cache_node *n,
+/*
+ * list_lock must be held.
+ */
+static inline void remove_partial(struct kmem_cache_node *n,
 					struct page *page)
 {
 	list_del(&page->lru);
 	n->nr_partial--;
 }
 
-static void remove_partial(struct kmem_cache *s, struct page *page)
-{
-	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
-	spin_lock(&n->list_lock);
-	__remove_partial(n, page);
-	spin_unlock(&n->list_lock);
-}
-
 /*
- * Lock slab and remove from the partial list.
+ * Lock slab, remove from the partial list and put the object into the
+ * per cpu freelist.
  *
  * Must hold list_lock.
  */
@@ -1459,7 +1461,7 @@ static inline int lock_and_freeze_slab(s
 							struct page *page)
 {
 	if (slab_trylock(page)) {
-		__remove_partial(n, page);
+		remove_partial(n, page);
 		return 1;
 	}
 	return 0;
@@ -1576,12 +1578,17 @@ static void unfreeze_slab(struct kmem_ca
 	if (page->inuse) {
 
 		if (page->freelist) {
+			spin_lock(&n->list_lock);
 			add_partial(n, page, tail);
+			spin_unlock(&n->list_lock);
 			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
 		} else {
 			stat(s, DEACTIVATE_FULL);
-			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
-				add_full(n, page);
+			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) {
+				spin_lock(&n->list_lock);
+				add_full(s, n, page);
+				spin_unlock(&n->list_lock);
+			}
 		}
 		slab_unlock(page);
 	} else {
@@ -1597,7 +1604,9 @@ static void unfreeze_slab(struct kmem_ca
 			 * kmem_cache_shrink can reclaim any empty slabs from
 			 * the partial list.
 			 */
+			spin_lock(&n->list_lock);
 			add_partial(n, page, 1);
+			spin_unlock(&n->list_lock);
 			slab_unlock(page);
 		} else {
 			slab_unlock(page);
@@ -2099,7 +2108,11 @@ static void __slab_free(struct kmem_cach
 	 * then add it.
 	 */
 	if (unlikely(!prior)) {
+		struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+		spin_lock(&n->list_lock);
 		add_partial(get_node(s, page_to_nid(page)), page, 1);
+		spin_unlock(&n->list_lock);
 		stat(s, FREE_ADD_PARTIAL);
 	}
 
@@ -2113,7 +2126,11 @@ slab_empty:
 		/*
 		 * Slab still on the partial list.
 		 */
-		remove_partial(s, page);
+		struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+		spin_lock(&n->list_lock);
+		remove_partial(n, page);
+		spin_unlock(&n->list_lock);
 		stat(s, FREE_REMOVE_PARTIAL);
 	}
 	slab_unlock(page);
@@ -2395,7 +2412,6 @@ static void early_kmem_cache_node_alloc(
 {
 	struct page *page;
 	struct kmem_cache_node *n;
-	unsigned long flags;
 
 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
 
@@ -2422,14 +2438,7 @@ static void early_kmem_cache_node_alloc(
 	init_kmem_cache_node(n, kmem_cache_node);
 	inc_slabs_node(kmem_cache_node, node, page->objects);
 
-	/*
-	 * lockdep requires consistent irq usage for each lock
-	 * so even though there cannot be a race this early in
-	 * the boot sequence, we still disable irqs.
-	 */
-	local_irq_save(flags);
 	add_partial(n, page, 0);
-	local_irq_restore(flags);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2713,7 +2722,7 @@ static void free_partial(struct kmem_cac
 	spin_lock_irqsave(&n->list_lock, flags);
 	list_for_each_entry_safe(page, h, &n->partial, lru) {
 		if (!page->inuse) {
-			__remove_partial(n, page);
+			remove_partial(n, page);
 			discard_slab(s, page);
 		} else {
 			list_slab_objects(s, page,
@@ -3051,7 +3060,7 @@ int kmem_cache_shrink(struct kmem_cache
 				 * may have freed the last object and be
 				 * waiting to release the slab.
 				 */
-				__remove_partial(n, page);
+				remove_partial(n, page);
 				slab_unlock(page);
 				discard_slab(s, page);
 			} else {


  parent reply	other threads:[~2011-06-01 17:28 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-06-01 17:25 [slubllv7 00/17] SLUB: Lockless freelists for objects V7 Christoph Lameter
2011-06-01 17:25 ` [slubllv7 01/17] slub: Push irq disable into allocate_slab() Christoph Lameter
2011-06-01 17:25 ` [slubllv7 02/17] slub: Do not use frozen page flag but a bit in the page counters Christoph Lameter
2011-06-01 17:25 ` [slubllv7 03/17] slub: Move page->frozen handling near where the page->freelist handling occurs Christoph Lameter
2011-06-01 17:25 ` [slubllv7 04/17] x86: Add support for cmpxchg_double Christoph Lameter
2011-06-09  9:53   ` Pekka Enberg
2011-06-10 15:17     ` Christoph Lameter
2011-06-11  9:50       ` Pekka Enberg
2011-06-11 17:02         ` Christoph Lameter
2011-06-14  5:49           ` Pekka Enberg
2011-06-14  8:04             ` Ingo Molnar
2011-06-14 14:04               ` Christoph Lameter
2011-06-14 15:05                 ` H. Peter Anvin
2011-06-15  8:55   ` Tejun Heo
2011-06-15 14:26     ` Christoph Lameter
2011-06-15 16:39       ` Tejun Heo
2011-06-15 17:19         ` Christoph Lameter
2011-06-25 23:49   ` [tip:x86/atomic] " tip-bot for Christoph Lameter
2011-06-01 17:25 ` [slubllv7 05/17] mm: Rearrange struct page Christoph Lameter
2011-06-09  9:57   ` Pekka Enberg
2011-06-09 16:45     ` Andrew Morton
2011-06-09 17:03       ` [PATCH] checkpatch: Add a "prefer __aligned" check Joe Perches
2011-06-01 17:25 ` [slubllv7 06/17] slub: Add cmpxchg_double_slab() Christoph Lameter
2011-07-11 19:55   ` Eric Dumazet
2011-07-12 15:59     ` Christoph Lameter
2011-07-12 16:06       ` Eric Dumazet
2011-07-12 16:47         ` Christoph Lameter
2011-07-12 18:40           ` H. Peter Anvin
2011-07-12 18:53             ` Christoph Lameter
2011-07-12 20:40               ` H. Peter Anvin
2011-06-01 17:25 ` Christoph Lameter [this message]
2011-06-01 17:25 ` [slubllv7 08/17] slub: Pass kmem_cache struct to lock and freeze slab Christoph Lameter
2011-06-01 17:25 ` [slubllv7 09/17] slub: Rework allocator fastpaths Christoph Lameter
2011-06-01 17:25 ` [slubllv7 10/17] slub: Invert locking and avoid slab lock Christoph Lameter
2011-06-01 17:25 ` [slubllv7 11/17] slub: Disable interrupts in free_debug processing Christoph Lameter
2011-06-01 17:25 ` [slubllv7 12/17] slub: Avoid disabling interrupts in free slowpath Christoph Lameter
2011-06-01 17:25 ` [slubllv7 13/17] slub: Get rid of the another_slab label Christoph Lameter
2011-06-01 17:25 ` [slubllv7 14/17] slub: Add statistics for the case that the current slab does not match the node Christoph Lameter
2011-06-01 17:25 ` [slubllv7 15/17] slub: fast release on full slab Christoph Lameter
2011-06-01 17:25 ` [slubllv7 16/17] slub: Not necessary to check for empty slab on load_freelist Christoph Lameter
2011-06-01 17:26 ` [slubllv7 17/17] slub: slabinfo update for cmpxchg handling Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110601172615.848578051@linux.com \
    --to=cl@linux.com \
    --cc=penberg@cs.helsinki.fi \
    --cc=rientjes@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).