linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 00/10] slab updates
@ 2006-01-14 12:46 Pekka Enberg
  2006-01-14 12:46 ` [patch 01/10] slab: distinguish between object and buffer size Pekka Enberg
                   ` (9 more replies)
  0 siblings, 10 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

Hi Andrew,

This patchset contains slab updates from various people. Please apply.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
--

 slab.c |  601 +++++++++++++++++++++++++++++++++++------------------------------
  1 file changed, 331 insertions(+), 270 deletions(-)


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 01/10] slab: distinguish between object and buffer size
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 02/10] slab: minor cleanup to kmem_cache_alloc_node Pekka Enberg
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Manfred Spraul <manfred@colorfullife.com>

An object cache has two different object lengths:

  - the amount of memory available for the user (object size)
  - the amount of memory allocated internally (buffer size)

This patch does some renames to make the code reflect that better.

Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |  154 ++++++++++++++++++++++++++++++++------------------------------
 1 file changed, 80 insertions(+), 74 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -373,7 +373,7 @@ struct kmem_cache {
 	unsigned int batchcount;
 	unsigned int limit;
 	unsigned int shared;
-	unsigned int objsize;
+	unsigned int buffer_size;
 /* 2) touched by every alloc & free from the backend */
 	struct kmem_list3 *nodelists[MAX_NUMNODES];
 	unsigned int flags;	/* constant flags */
@@ -421,8 +421,14 @@ struct kmem_cache {
 	atomic_t freemiss;
 #endif
 #if DEBUG
-	int dbghead;
-	int reallen;
+	/*
+	 * If debugging is enabled, then the allocator can add additional
+	 * fields and/or padding to every object. buffer_size contains the total
+	 * object size including these internal fields, the following two
+	 * variables contain the offset to the user object and its size.
+	 */
+	int obj_offset;
+	int obj_size;
 #endif
 };
 
@@ -493,50 +499,50 @@ struct kmem_cache {
 
 /* memory layout of objects:
  * 0		: objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
+ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
  * 		the end of an object is aligned with the end of the real
  * 		allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
+ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
  * 		redzone word.
- * cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->obj_offset: The real object.
+ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
  */
-static int obj_dbghead(kmem_cache_t *cachep)
+static int obj_offset(kmem_cache_t *cachep)
 {
-	return cachep->dbghead;
+	return cachep->obj_offset;
 }
 
-static int obj_reallen(kmem_cache_t *cachep)
+static int obj_size(kmem_cache_t *cachep)
 {
-	return cachep->reallen;
+	return cachep->obj_size;
 }
 
 static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
-	return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
 }
 
 static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 	if (cachep->flags & SLAB_STORE_USER)
-		return (unsigned long *)(objp + cachep->objsize -
+		return (unsigned long *)(objp + cachep->buffer_size -
 					 2 * BYTES_PER_WORD);
-	return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
+	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
 }
 
 static void **dbg_userword(kmem_cache_t *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
-	return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
+	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
 }
 
 #else
 
-#define obj_dbghead(x)			0
-#define obj_reallen(cachep)		(cachep->objsize)
+#define obj_offset(x)			0
+#define obj_size(cachep)		(cachep->buffer_size)
 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
@@ -621,12 +627,12 @@ static kmem_cache_t cache_cache = {
 	.batchcount = 1,
 	.limit = BOOT_CPUCACHE_ENTRIES,
 	.shared = 1,
-	.objsize = sizeof(kmem_cache_t),
+	.buffer_size = sizeof(kmem_cache_t),
 	.flags = SLAB_NO_REAP,
 	.spinlock = SPIN_LOCK_UNLOCKED,
 	.name = "kmem_cache",
 #if DEBUG
-	.reallen = sizeof(kmem_cache_t),
+	.obj_size = sizeof(kmem_cache_t),
 #endif
 };
 
@@ -1054,9 +1060,9 @@ void __init kmem_cache_init(void)
 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
 	cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
 
-	cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
+	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
 
-	cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
+	cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
 		       &left_over, &cache_cache.num);
 	if (!cache_cache.num)
 		BUG();
@@ -1271,9 +1277,9 @@ static void kmem_rcu_free(struct rcu_hea
 static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
 			    unsigned long caller)
 {
-	int size = obj_reallen(cachep);
+	int size = obj_size(cachep);
 
-	addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
+	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
 
 	if (size < 5 * sizeof(unsigned long))
 		return;
@@ -1303,8 +1309,8 @@ static void store_stackinfo(kmem_cache_t
 
 static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
 {
-	int size = obj_reallen(cachep);
-	addr = &((char *)addr)[obj_dbghead(cachep)];
+	int size = obj_size(cachep);
+	addr = &((char *)addr)[obj_offset(cachep)];
 
 	memset(addr, val, size);
 	*(unsigned char *)(addr + size - 1) = POISON_END;
@@ -1341,8 +1347,8 @@ static void print_objinfo(kmem_cache_t *
 			     (unsigned long)*dbg_userword(cachep, objp));
 		printk("\n");
 	}
-	realobj = (char *)objp + obj_dbghead(cachep);
-	size = obj_reallen(cachep);
+	realobj = (char *)objp + obj_offset(cachep);
+	size = obj_size(cachep);
 	for (i = 0; i < size && lines; i += 16, lines--) {
 		int limit;
 		limit = 16;
@@ -1358,8 +1364,8 @@ static void check_poison_obj(kmem_cache_
 	int size, i;
 	int lines = 0;
 
-	realobj = (char *)objp + obj_dbghead(cachep);
-	size = obj_reallen(cachep);
+	realobj = (char *)objp + obj_offset(cachep);
+	size = obj_size(cachep);
 
 	for (i = 0; i < size; i++) {
 		char exp = POISON_FREE;
@@ -1395,17 +1401,17 @@ static void check_poison_obj(kmem_cache_
 		struct slab *slabp = page_get_slab(virt_to_page(objp));
 		int objnr;
 
-		objnr = (objp - slabp->s_mem) / cachep->objsize;
+		objnr = (objp - slabp->s_mem) / cachep->buffer_size;
 		if (objnr) {
-			objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
-			realobj = (char *)objp + obj_dbghead(cachep);
+			objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
+			realobj = (char *)objp + obj_offset(cachep);
 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
 			       realobj, size);
 			print_objinfo(cachep, objp, 2);
 		}
 		if (objnr + 1 < cachep->num) {
-			objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
-			realobj = (char *)objp + obj_dbghead(cachep);
+			objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
+			realobj = (char *)objp + obj_offset(cachep);
 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
 			       realobj, size);
 			print_objinfo(cachep, objp, 2);
@@ -1425,14 +1431,14 @@ static void slab_destroy(kmem_cache_t *c
 #if DEBUG
 	int i;
 	for (i = 0; i < cachep->num; i++) {
-		void *objp = slabp->s_mem + cachep->objsize * i;
+		void *objp = slabp->s_mem + cachep->buffer_size * i;
 
 		if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-			if ((cachep->objsize % PAGE_SIZE) == 0
+			if ((cachep->buffer_size % PAGE_SIZE) == 0
 			    && OFF_SLAB(cachep))
 				kernel_map_pages(virt_to_page(objp),
-						 cachep->objsize / PAGE_SIZE,
+						 cachep->buffer_size / PAGE_SIZE,
 						 1);
 			else
 				check_poison_obj(cachep, objp);
@@ -1449,13 +1455,13 @@ static void slab_destroy(kmem_cache_t *c
 					   "was overwritten");
 		}
 		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
-			(cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
+			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
 	}
 #else
 	if (cachep->dtor) {
 		int i;
 		for (i = 0; i < cachep->num; i++) {
-			void *objp = slabp->s_mem + cachep->objsize * i;
+			void *objp = slabp->s_mem + cachep->buffer_size * i;
 			(cachep->dtor) (objp, cachep, 0);
 		}
 	}
@@ -1475,7 +1481,7 @@ static void slab_destroy(kmem_cache_t *c
 	}
 }
 
-/* For setting up all the kmem_list3s for cache whose objsize is same
+/* For setting up all the kmem_list3s for cache whose buffer_size is same
    as size of kmem_list3. */
 static inline void set_up_list3s(kmem_cache_t *cachep, int index)
 {
@@ -1608,7 +1614,7 @@ kmem_cache_create (const char *name, siz
 		set_fs(old_fs);
 		if (res) {
 			printk("SLAB: cache with size %d has lost its name\n",
-			       pc->objsize);
+			       pc->buffer_size);
 			continue;
 		}
 
@@ -1699,14 +1705,14 @@ kmem_cache_create (const char *name, siz
 	memset(cachep, 0, sizeof(kmem_cache_t));
 
 #if DEBUG
-	cachep->reallen = size;
+	cachep->obj_size = size;
 
 	if (flags & SLAB_RED_ZONE) {
 		/* redzoning only works with word aligned caches */
 		align = BYTES_PER_WORD;
 
 		/* add space for red zone words */
-		cachep->dbghead += BYTES_PER_WORD;
+		cachep->obj_offset += BYTES_PER_WORD;
 		size += 2 * BYTES_PER_WORD;
 	}
 	if (flags & SLAB_STORE_USER) {
@@ -1719,8 +1725,8 @@ kmem_cache_create (const char *name, siz
 	}
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
-	    && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
-		cachep->dbghead += PAGE_SIZE - size;
+	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+		cachep->obj_offset += PAGE_SIZE - size;
 		size = PAGE_SIZE;
 	}
 #endif
@@ -1783,7 +1789,7 @@ kmem_cache_create (const char *name, siz
 	if (flags & SLAB_CACHE_DMA)
 		cachep->gfpflags |= GFP_DMA;
 	spin_lock_init(&cachep->spinlock);
-	cachep->objsize = size;
+	cachep->buffer_size = size;
 
 	if (flags & CFLGS_OFF_SLAB)
 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -2115,7 +2121,7 @@ static void cache_init_objs(kmem_cache_t
 	int i;
 
 	for (i = 0; i < cachep->num; i++) {
-		void *objp = slabp->s_mem + cachep->objsize * i;
+		void *objp = slabp->s_mem + cachep->buffer_size * i;
 #if DEBUG
 		/* need to poison the objs? */
 		if (cachep->flags & SLAB_POISON)
@@ -2133,7 +2139,7 @@ static void cache_init_objs(kmem_cache_t
 		 * Otherwise, deadlock. They must also be threaded.
 		 */
 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
-			cachep->ctor(objp + obj_dbghead(cachep), cachep,
+			cachep->ctor(objp + obj_offset(cachep), cachep,
 				     ctor_flags);
 
 		if (cachep->flags & SLAB_RED_ZONE) {
@@ -2144,10 +2150,10 @@ static void cache_init_objs(kmem_cache_t
 				slab_error(cachep, "constructor overwrote the"
 					   " start of an object");
 		}
-		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
 		    && cachep->flags & SLAB_POISON)
 			kernel_map_pages(virt_to_page(objp),
-					 cachep->objsize / PAGE_SIZE, 0);
+					 cachep->buffer_size / PAGE_SIZE, 0);
 #else
 		if (cachep->ctor)
 			cachep->ctor(objp, cachep, ctor_flags);
@@ -2306,7 +2312,7 @@ static void *cache_free_debugcheck(kmem_
 	unsigned int objnr;
 	struct slab *slabp;
 
-	objp -= obj_dbghead(cachep);
+	objp -= obj_offset(cachep);
 	kfree_debugcheck(objp);
 	page = virt_to_page(objp);
 
@@ -2338,31 +2344,31 @@ static void *cache_free_debugcheck(kmem_
 	if (cachep->flags & SLAB_STORE_USER)
 		*dbg_userword(cachep, objp) = caller;
 
-	objnr = (objp - slabp->s_mem) / cachep->objsize;
+	objnr = (objp - slabp->s_mem) / cachep->buffer_size;
 
 	BUG_ON(objnr >= cachep->num);
-	BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
+	BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
 
 	if (cachep->flags & SLAB_DEBUG_INITIAL) {
 		/* Need to call the slab's constructor so the
 		 * caller can perform a verify of its state (debugging).
 		 * Called without the cache-lock held.
 		 */
-		cachep->ctor(objp + obj_dbghead(cachep),
+		cachep->ctor(objp + obj_offset(cachep),
 			     cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
 	}
 	if (cachep->flags & SLAB_POISON && cachep->dtor) {
 		/* we want to cache poison the object,
 		 * call the destruction callback
 		 */
-		cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
+		cachep->dtor(objp + obj_offset(cachep), cachep, 0);
 	}
 	if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
+		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
 			store_stackinfo(cachep, objp, (unsigned long)caller);
 			kernel_map_pages(virt_to_page(objp),
-					 cachep->objsize / PAGE_SIZE, 0);
+					 cachep->buffer_size / PAGE_SIZE, 0);
 		} else {
 			poison_obj(cachep, objp, POISON_FREE);
 		}
@@ -2465,7 +2471,7 @@ static void *cache_alloc_refill(kmem_cac
 
 			/* get obj pointer */
 			ac->entry[ac->avail++] = slabp->s_mem +
-			    slabp->free * cachep->objsize;
+			    slabp->free * cachep->buffer_size;
 
 			slabp->inuse++;
 			next = slab_bufctl(slabp)[slabp->free];
@@ -2523,9 +2529,9 @@ static void *cache_alloc_debugcheck_afte
 		return objp;
 	if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
 			kernel_map_pages(virt_to_page(objp),
-					 cachep->objsize / PAGE_SIZE, 1);
+					 cachep->buffer_size / PAGE_SIZE, 1);
 		else
 			check_poison_obj(cachep, objp);
 #else
@@ -2550,7 +2556,7 @@ static void *cache_alloc_debugcheck_afte
 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
 	}
-	objp += obj_dbghead(cachep);
+	objp += obj_offset(cachep);
 	if (cachep->ctor && cachep->flags & SLAB_POISON) {
 		unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
 
@@ -2636,7 +2642,7 @@ static void *__cache_alloc_node(kmem_cac
 	BUG_ON(slabp->inuse == cachep->num);
 
 	/* get obj pointer */
-	obj = slabp->s_mem + slabp->free * cachep->objsize;
+	obj = slabp->s_mem + slabp->free * cachep->buffer_size;
 	slabp->inuse++;
 	next = slab_bufctl(slabp)[slabp->free];
 #if DEBUG
@@ -2687,7 +2693,7 @@ static void free_block(kmem_cache_t *cac
 		slabp = page_get_slab(virt_to_page(objp));
 		l3 = cachep->nodelists[node];
 		list_del(&slabp->list);
-		objnr = (objp - slabp->s_mem) / cachep->objsize;
+		objnr = (objp - slabp->s_mem) / cachep->buffer_size;
 		check_spinlock_acquired_node(cachep, node);
 		check_slabp(cachep, slabp);
 
@@ -2869,7 +2875,7 @@ int fastcall kmem_ptr_validate(kmem_cach
 	unsigned long addr = (unsigned long)ptr;
 	unsigned long min_addr = PAGE_OFFSET;
 	unsigned long align_mask = BYTES_PER_WORD - 1;
-	unsigned long size = cachep->objsize;
+	unsigned long size = cachep->buffer_size;
 	struct page *page;
 
 	if (unlikely(addr < min_addr))
@@ -3071,7 +3077,7 @@ void kfree(const void *objp)
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
 	c = page_get_cache(virt_to_page(objp));
-	mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
+	mutex_debug_check_no_locks_freed(objp, obj_size(c));
 	__cache_free(c, (void *)objp);
 	local_irq_restore(flags);
 }
@@ -3102,7 +3108,7 @@ EXPORT_SYMBOL(free_percpu);
 
 unsigned int kmem_cache_size(kmem_cache_t *cachep)
 {
-	return obj_reallen(cachep);
+	return obj_size(cachep);
 }
 EXPORT_SYMBOL(kmem_cache_size);
 
@@ -3246,13 +3252,13 @@ static void enable_cpucache(kmem_cache_t
 	 * The numbers are guessed, we should auto-tune as described by
 	 * Bonwick.
 	 */
-	if (cachep->objsize > 131072)
+	if (cachep->buffer_size > 131072)
 		limit = 1;
-	else if (cachep->objsize > PAGE_SIZE)
+	else if (cachep->buffer_size > PAGE_SIZE)
 		limit = 8;
-	else if (cachep->objsize > 1024)
+	else if (cachep->buffer_size > 1024)
 		limit = 24;
-	else if (cachep->objsize > 256)
+	else if (cachep->buffer_size > 256)
 		limit = 54;
 	else
 		limit = 120;
@@ -3267,7 +3273,7 @@ static void enable_cpucache(kmem_cache_t
 	 */
 	shared = 0;
 #ifdef CONFIG_SMP
-	if (cachep->objsize <= PAGE_SIZE)
+	if (cachep->buffer_size <= PAGE_SIZE)
 		shared = 8;
 #endif
 
@@ -3516,7 +3522,7 @@ static int s_show(struct seq_file *m, vo
 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
 
 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-		   name, active_objs, num_objs, cachep->objsize,
+		   name, active_objs, num_objs, cachep->buffer_size,
 		   cachep->num, (1 << cachep->gfporder));
 	seq_printf(m, " : tunables %4u %4u %4u",
 		   cachep->limit, cachep->batchcount, cachep->shared);
@@ -3644,5 +3650,5 @@ unsigned int ksize(const void *objp)
 	if (unlikely(objp == NULL))
 		return 0;
 
-	return obj_reallen(page_get_cache(virt_to_page(objp)));
+	return obj_size(page_get_cache(virt_to_page(objp)));
 }

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 02/10] slab: minor cleanup to kmem_cache_alloc_node
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
  2006-01-14 12:46 ` [patch 01/10] slab: distinguish between object and buffer size Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 05/10] slab: extract slab_destroy_objs() Pekka Enberg
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Christoph Lameter <clameter@engr.sgi.com>

This patch cleans up kmem_cache_alloc_node a bit.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   21 ++++++---------------
 1 file changed, 6 insertions(+), 15 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -2916,27 +2916,18 @@ void *kmem_cache_alloc_node(kmem_cache_t
 	unsigned long save_flags;
 	void *ptr;
 
-	if (nodeid == -1)
-		return __cache_alloc(cachep, flags);
-
-	if (unlikely(!cachep->nodelists[nodeid])) {
-		/* Fall back to __cache_alloc if we run into trouble */
-		printk(KERN_WARNING
-		       "slab: not allocating in inactive node %d for cache %s\n",
-		       nodeid, cachep->name);
-		return __cache_alloc(cachep, flags);
-	}
-
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
-	if (nodeid == numa_node_id())
+
+	if (nodeid == -1 || nodeid == numa_node_id() ||
+	    !cachep->nodelists[nodeid])
 		ptr = ____cache_alloc(cachep, flags);
 	else
 		ptr = __cache_alloc_node(cachep, flags, nodeid);
 	local_irq_restore(save_flags);
-	ptr =
-	    cache_alloc_debugcheck_after(cachep, flags, ptr,
-					 __builtin_return_address(0));
+
+	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
+					   __builtin_return_address(0));
 
 	return ptr;
 }

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 05/10] slab: extract slab_destroy_objs()
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
  2006-01-14 12:46 ` [patch 01/10] slab: distinguish between object and buffer size Pekka Enberg
  2006-01-14 12:46 ` [patch 02/10] slab: minor cleanup to kmem_cache_alloc_node Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-18 18:31   ` Christoph Lameter
  2006-01-14 12:46 ` [patch 03/10] slab: have index_of bug at compile time Pekka Enberg
                   ` (6 subsequent siblings)
  9 siblings, 1 reply; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Matthew Dobson <colpatch@us.ibm.com>

This patch creates a helper function, slab_destroy_objs() which called from
slab_destroy(). This makes slab_destroy() smaller and more readable, and moves
ifdefs outside the function body.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   26 +++++++++++++++++++-------
 1 file changed, 19 insertions(+), 7 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -1456,15 +1456,13 @@ static void check_poison_obj(kmem_cache_
 }
 #endif
 
-/* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache.
- * The cache-lock is not held/needed.
+#if DEBUG
+/**
+ * slab_destroy_objs - call the registered destructor for each object in
+ *      a slab that is to be destroyed.
  */
-static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
 {
-	void *addr = slabp->s_mem - slabp->colouroff;
-
-#if DEBUG
 	int i;
 	for (i = 0; i < cachep->num; i++) {
 		void *objp = slabp->s_mem + cachep->buffer_size * i;
@@ -1493,7 +1491,10 @@ static void slab_destroy(kmem_cache_t *c
 		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
 			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
 	}
+}
 #else
+static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
+{
 	if (cachep->dtor) {
 		int i;
 		for (i = 0; i < cachep->num; i++) {
@@ -1501,8 +1502,19 @@ static void slab_destroy(kmem_cache_t *c
 			(cachep->dtor) (objp, cachep, 0);
 		}
 	}
+}
 #endif
 
+/**
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
+ */
+static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
+{
+	void *addr = slabp->s_mem - slabp->colouroff;
+
+	slab_destroy_objs(cachep, slabp);
 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
 		struct slab_rcu *slab_rcu;
 

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 06/10] slab: extract slab_{put|get}_obj
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (3 preceding siblings ...)
  2006-01-14 12:46 ` [patch 03/10] slab: have index_of bug at compile time Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 04/10] slab: cache_estimate cleanup Pekka Enberg
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Matthew Dobson <colpatch@us.ibm.com>

This patch create two helper functions slab_get_obj() and
slab_put_obj() to replace duplicated code in mm/slab.c

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   77 ++++++++++++++++++++++++++++++++------------------------------
 1 file changed, 40 insertions(+), 37 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -2223,6 +2223,42 @@ static void kmem_flagcheck(kmem_cache_t 
 	}
 }
 
+static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid)
+{
+	void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
+	kmem_bufctl_t next;
+
+	slabp->inuse++;
+	next = slab_bufctl(slabp)[slabp->free];
+#if DEBUG
+	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+	WARN_ON(slabp->nodeid != nodeid);
+#endif
+	slabp->free = next;
+
+	return objp;
+}
+
+static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp,
+			  int nodeid)
+{
+	unsigned int objnr = (objp - slabp->s_mem) / cachep->buffer_size;
+
+#if DEBUG
+	/* Verify that the slab belongs to the intended node */
+	WARN_ON(slabp->nodeid != nodeid);
+
+	if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+		printk(KERN_ERR "slab: double free detected in cache "
+		       "'%s', objp %p\n", cachep->name, objp);
+		BUG();
+	}
+#endif
+	slab_bufctl(slabp)[objnr] = slabp->free;
+	slabp->free = objnr;
+	slabp->inuse--;
+}
+
 static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
 {
 	int i;
@@ -2512,22 +2548,12 @@ static void *cache_alloc_refill(kmem_cac
 		check_slabp(cachep, slabp);
 		check_spinlock_acquired(cachep);
 		while (slabp->inuse < cachep->num && batchcount--) {
-			kmem_bufctl_t next;
 			STATS_INC_ALLOCED(cachep);
 			STATS_INC_ACTIVE(cachep);
 			STATS_SET_HIGH(cachep);
 
-			/* get obj pointer */
-			ac->entry[ac->avail++] = slabp->s_mem +
-			    slabp->free * cachep->buffer_size;
-
-			slabp->inuse++;
-			next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
-			slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-			WARN_ON(numa_node_id() != slabp->nodeid);
-#endif
-			slabp->free = next;
+			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
+							    numa_node_id());
 		}
 		check_slabp(cachep, slabp);
 
@@ -2663,7 +2689,6 @@ static void *__cache_alloc_node(kmem_cac
 	struct slab *slabp;
 	struct kmem_list3 *l3;
 	void *obj;
-	kmem_bufctl_t next;
 	int x;
 
 	l3 = cachep->nodelists[nodeid];
@@ -2689,14 +2714,7 @@ static void *__cache_alloc_node(kmem_cac
 
 	BUG_ON(slabp->inuse == cachep->num);
 
-	/* get obj pointer */
-	obj = slabp->s_mem + slabp->free * cachep->buffer_size;
-	slabp->inuse++;
-	next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
-	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
-	slabp->free = next;
+	obj = slab_get_obj(cachep, slabp, nodeid);
 	check_slabp(cachep, slabp);
 	l3->free_objects--;
 	/* move slabp to correct slabp list: */
@@ -2736,29 +2754,14 @@ static void free_block(kmem_cache_t *cac
 	for (i = 0; i < nr_objects; i++) {
 		void *objp = objpp[i];
 		struct slab *slabp;
-		unsigned int objnr;
 
 		slabp = page_get_slab(virt_to_page(objp));
 		l3 = cachep->nodelists[node];
 		list_del(&slabp->list);
-		objnr = (objp - slabp->s_mem) / cachep->buffer_size;
 		check_spinlock_acquired_node(cachep, node);
 		check_slabp(cachep, slabp);
-
-#if DEBUG
-		/* Verify that the slab belongs to the intended node */
-		WARN_ON(slabp->nodeid != node);
-
-		if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
-			printk(KERN_ERR "slab: double free detected in cache "
-			       "'%s', objp %p\n", cachep->name, objp);
-			BUG();
-		}
-#endif
-		slab_bufctl(slabp)[objnr] = slabp->free;
-		slabp->free = objnr;
+		slab_put_obj(cachep, slabp, objp, node);
 		STATS_DEC_ACTIVE(cachep);
-		slabp->inuse--;
 		l3->free_objects++;
 		check_slabp(cachep, slabp);
 

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 03/10] slab: have index_of bug at compile time
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (2 preceding siblings ...)
  2006-01-14 12:46 ` [patch 05/10] slab: extract slab_destroy_objs() Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 06/10] slab: extract slab_{put|get}_obj Pekka Enberg
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Steven Rostedt <rostedt@goodmis.org>

I noticed the code for index_of is a creative way of finding the cache
index using the compiler to optimize to a single hard coded number.  But
I couldn't help noticing that it uses two methods to let you know that
someone used it wrong.  One is at compile time (the correct way), and
the other is at run time (not good).

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |    9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -314,6 +314,8 @@ struct kmem_list3 __initdata initkmem_li
  */
 static __always_inline int index_of(const size_t size)
 {
+	extern void __bad_size(void);
+
 	if (__builtin_constant_p(size)) {
 		int i = 0;
 
@@ -324,12 +326,9 @@ static __always_inline int index_of(cons
 		i++;
 #include "linux/kmalloc_sizes.h"
 #undef CACHE
-		{
-			extern void __bad_size(void);
-			__bad_size();
-		}
+		__bad_size();
 	} else
-		BUG();
+		__bad_size();
 	return 0;
 }
 

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 04/10] slab: cache_estimate cleanup
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (4 preceding siblings ...)
  2006-01-14 12:46 ` [patch 06/10] slab: extract slab_{put|get}_obj Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-16  2:38   ` Andrew Morton
  2006-01-14 12:46 ` [patch 08/10] slab: extract virt_to_{cache|slab} Pekka Enberg
                   ` (3 subsequent siblings)
  9 siblings, 1 reply; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Steven Rostedt <rostedt@goodmis.org>

This patch cleans up cache_estimate() in mm/slab.c and improves the
algorithm from O(n) to O(1). We first calculate the maximum number of
objects a slab can hold after struct slab and kmem_bufctl_t for each
object has been given enough space. After that, to respect alignment
rules, we decrease the number of objects if necessary. As required
padding is at most align-1 and memory of obj_size is at least align,
it is always enough to decrease number of objects by one.

The optimization was originally made by Balbir Singh with more 
improvements from Steven Rostedt. Manfred Spraul provider further
modifications: no loop at all for the off-slab case and added comments
to explain the background.

Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   87 ++++++++++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 25 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -700,32 +700,69 @@ kmem_cache_t *kmem_find_general_cachep(s
 }
 EXPORT_SYMBOL(kmem_find_general_cachep);
 
-/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
-			   int flags, size_t *left_over, unsigned int *num)
+static size_t slab_mgmt_size(size_t nr_objs, size_t align)
 {
-	int i;
-	size_t wastage = PAGE_SIZE << gfporder;
-	size_t extra = 0;
-	size_t base = 0;
-
-	if (!(flags & CFLGS_OFF_SLAB)) {
-		base = sizeof(struct slab);
-		extra = sizeof(kmem_bufctl_t);
-	}
-	i = 0;
-	while (i * size + ALIGN(base + i * extra, align) <= wastage)
-		i++;
-	if (i > 0)
-		i--;
-
-	if (i > SLAB_LIMIT)
-		i = SLAB_LIMIT;
-
-	*num = i;
-	wastage -= i * size;
-	wastage -= ALIGN(base + i * extra, align);
-	*left_over = wastage;
+	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+}
+
+/* Calculate the number of objects and left-over bytes for a given
+   buffer size. */
+static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+			   size_t align, int flags, size_t *left_over,
+			   unsigned int *num)
+{
+	int nr_objs;
+	size_t mgmt_size;
+	size_t slab_size = PAGE_SIZE << gfporder;
+
+	/*
+	 * The slab management structure can be either off the slab or
+	 * on it. For the latter case, the memory allocated for a
+	 * slab is used for:
+	 *
+	 * - The struct slab
+	 * - One kmem_bufctl_t for each object
+	 * - Padding to respect alignment of @align
+	 * - @buffer_size bytes for each object
+	 *
+	 * If the slab management structure is off the slab, then the
+	 * alignment will already be calculated into the size. Because
+	 * the slabs are all pages aligned, the objects will be at the
+	 * correct alignment when allocated.
+	 */
+	if (flags & CFLGS_OFF_SLAB) {
+		mgmt_size = 0;
+		nr_objs = slab_size / buffer_size;
+
+		if (nr_objs > SLAB_LIMIT)
+			nr_objs = SLAB_LIMIT;
+	} else {
+		/*
+		 * Ignore padding for the initial guess. The padding
+		 * is at most @align-1 bytes, and @buffer_size is at
+		 * least @align. In the worst case, this result will
+		 * be one greater than the number of objects that fit
+		 * into the memory allocation when taking the padding
+		 * into account.
+		 */
+		nr_objs = (slab_size - sizeof(struct slab)) /
+			  (buffer_size + sizeof(kmem_bufctl_t));
+
+		/*
+		 * This calculated number will be either the right
+		 * amount, or one greater than what we want.
+		 */
+		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+		       > slab_size)
+			nr_objs--;
+
+		if (nr_objs > SLAB_LIMIT)
+			nr_objs = SLAB_LIMIT;
+
+		mgmt_size = slab_mgmt_size(nr_objs, align);
+	}
+	*num = nr_objs;
+	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 }
 
 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 07/10] slab: reduce inlining
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (6 preceding siblings ...)
  2006-01-14 12:46 ` [patch 08/10] slab: extract virt_to_{cache|slab} Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 10/10] slab: replace kmem_cache_t with struct kmem_cache Pekka Enberg
  2006-01-14 12:46 ` [patch 09/10] slab: rename ac_data to cpu_cache_get Pekka Enberg
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Manfred Spraul <manfred@colorfullife.com>

Reduce the amount of inline functions in slab to the functions that
are used in the hot path:

  - no inline for debug functions
  - no __always_inline, inline is already __always_inline
  - remove inline from a few numa support functions.

Before:

   text    data     bss     dec     hex filename
  13588     752      48   14388    3834 mm/slab.o (defconfig)
  16671    2492      48   19211    4b0b mm/slab.o (numa)

After:

   text    data     bss     dec     hex filename
  13366     752      48   14166    3756 mm/slab.o (defconfig)
  16230    2492      48   18770    4952 mm/slab.o (numa)

Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -335,7 +335,7 @@ static __always_inline int index_of(cons
 #define INDEX_AC index_of(sizeof(struct arraycache_init))
 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
 
-static inline void kmem_list3_init(struct kmem_list3 *parent)
+static void kmem_list3_init(struct kmem_list3 *parent)
 {
 	INIT_LIST_HEAD(&parent->slabs_full);
 	INIT_LIST_HEAD(&parent->slabs_partial);
@@ -814,7 +814,7 @@ static struct array_cache *alloc_arrayca
 }
 
 #ifdef CONFIG_NUMA
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
+static struct array_cache **alloc_alien_cache(int node, int limit)
 {
 	struct array_cache **ac_ptr;
 	int memsize = sizeof(void *) * MAX_NUMNODES;
@@ -841,7 +841,7 @@ static inline struct array_cache **alloc
 	return ac_ptr;
 }
 
-static inline void free_alien_cache(struct array_cache **ac_ptr)
+static void free_alien_cache(struct array_cache **ac_ptr)
 {
 	int i;
 
@@ -854,8 +854,8 @@ static inline void free_alien_cache(stru
 	kfree(ac_ptr);
 }
 
-static inline void __drain_alien_cache(kmem_cache_t *cachep,
-				       struct array_cache *ac, int node)
+static void __drain_alien_cache(kmem_cache_t *cachep,
+				struct array_cache *ac, int node)
 {
 	struct kmem_list3 *rl3 = cachep->nodelists[node];
 
@@ -1531,7 +1531,7 @@ static void slab_destroy(kmem_cache_t *c
 
 /* For setting up all the kmem_list3s for cache whose buffer_size is same
    as size of kmem_list3. */
-static inline void set_up_list3s(kmem_cache_t *cachep, int index)
+static void set_up_list3s(kmem_cache_t *cachep, int index)
 {
 	int node;
 
@@ -1934,7 +1934,7 @@ static void check_spinlock_acquired(kmem
 #endif
 }
 
-static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
+static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
 {
 #ifdef CONFIG_SMP
 	check_irq_off();

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 08/10] slab: extract virt_to_{cache|slab}
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (5 preceding siblings ...)
  2006-01-14 12:46 ` [patch 04/10] slab: cache_estimate cleanup Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 07/10] slab: reduce inlining Pekka Enberg
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Pekka Enberg <penberg@cs.helsinki.fi>

This patch introduces virt_to_cache() and virt_to_slab() functions
to reduce duplicate code and introduce a proper abstraction should
we want to support other kind of mapping for address to slab and
cache (eg. for vmalloc() or I/O memory).

Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -594,6 +594,18 @@ static inline struct slab *page_get_slab
 	return (struct slab *)page->lru.prev;
 }
 
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+	struct page *page = virt_to_page(obj);
+	return page_get_cache(page);
+}
+
+static inline struct slab *virt_to_slab(const void *obj)
+{
+	struct page *page = virt_to_page(obj);
+	return page_get_slab(page);
+}
+
 /* These are the default caches for kmalloc. Custom caches can have other sizes. */
 struct cache_sizes malloc_sizes[] = {
 #define CACHE(x) { .cs_size = (x) },
@@ -1434,7 +1446,7 @@ static void check_poison_obj(kmem_cache_
 		/* Print some data about the neighboring objects, if they
 		 * exist:
 		 */
-		struct slab *slabp = page_get_slab(virt_to_page(objp));
+		struct slab *slabp = virt_to_slab(objp);
 		int objnr;
 
 		objnr = (objp - slabp->s_mem) / cachep->buffer_size;
@@ -2755,7 +2767,7 @@ static void free_block(kmem_cache_t *cac
 		void *objp = objpp[i];
 		struct slab *slabp;
 
-		slabp = page_get_slab(virt_to_page(objp));
+		slabp = virt_to_slab(objp);
 		l3 = cachep->nodelists[node];
 		list_del(&slabp->list);
 		check_spinlock_acquired_node(cachep, node);
@@ -2855,7 +2867,7 @@ static inline void __cache_free(kmem_cac
 #ifdef CONFIG_NUMA
 	{
 		struct slab *slabp;
-		slabp = page_get_slab(virt_to_page(objp));
+		slabp = virt_to_slab(objp);
 		if (unlikely(slabp->nodeid != numa_node_id())) {
 			struct array_cache *alien = NULL;
 			int nodeid = slabp->nodeid;
@@ -3118,7 +3130,7 @@ void kfree(const void *objp)
 		return;
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
-	c = page_get_cache(virt_to_page(objp));
+	c = virt_to_cache(objp);
 	mutex_debug_check_no_locks_freed(objp, obj_size(c));
 	__cache_free(c, (void *)objp);
 	local_irq_restore(flags);
@@ -3692,5 +3704,5 @@ unsigned int ksize(const void *objp)
 	if (unlikely(objp == NULL))
 		return 0;
 
-	return obj_size(page_get_cache(virt_to_page(objp)));
+	return obj_size(virt_to_cache(objp));
 }

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 10/10] slab: replace kmem_cache_t with struct kmem_cache
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (7 preceding siblings ...)
  2006-01-14 12:46 ` [patch 07/10] slab: reduce inlining Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  2006-01-14 12:46 ` [patch 09/10] slab: rename ac_data to cpu_cache_get Pekka Enberg
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Pekka Enberg <penberg@cs.helsinki.fi>

This patch replaces uses of kmem_cache_t with proper struct kmem_cache in
mm/slab.c.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |  193 +++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 97 insertions(+), 96 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -55,7 +55,7 @@
  *
  * SMP synchronization:
  *  constructors and destructors are called without any locking.
- *  Several members in kmem_cache_t and struct slab never change, they
+ *  Several members in struct kmem_cache and struct slab never change, they
  *	are accessed without any locking.
  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  *  	and local interrupts are disabled so slab code is preempt-safe.
@@ -242,7 +242,7 @@ struct slab {
  */
 struct slab_rcu {
 	struct rcu_head head;
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 	void *addr;
 };
 
@@ -361,7 +361,7 @@ static void kmem_list3_init(struct kmem_
 	} while (0)
 
 /*
- * kmem_cache_t
+ * struct kmem_cache
  *
  * manages a cache.
  */
@@ -389,15 +389,15 @@ struct kmem_cache {
 	size_t colour;		/* cache colouring range */
 	unsigned int colour_off;	/* colour offset */
 	unsigned int colour_next;	/* cache colouring */
-	kmem_cache_t *slabp_cache;
+	struct kmem_cache *slabp_cache;
 	unsigned int slab_size;
 	unsigned int dflags;	/* dynamic flags */
 
 	/* constructor func */
-	void (*ctor) (void *, kmem_cache_t *, unsigned long);
+	void (*ctor) (void *, struct kmem_cache *, unsigned long);
 
 	/* de-constructor func */
-	void (*dtor) (void *, kmem_cache_t *, unsigned long);
+	void (*dtor) (void *, struct kmem_cache *, unsigned long);
 
 /* 4) cache creation/removal */
 	const char *name;
@@ -507,23 +507,23 @@ struct kmem_cache {
  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
  */
-static int obj_offset(kmem_cache_t *cachep)
+static int obj_offset(struct kmem_cache *cachep)
 {
 	return cachep->obj_offset;
 }
 
-static int obj_size(kmem_cache_t *cachep)
+static int obj_size(struct kmem_cache *cachep)
 {
 	return cachep->obj_size;
 }
 
-static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
 }
 
-static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 	if (cachep->flags & SLAB_STORE_USER)
@@ -532,7 +532,7 @@ static unsigned long *dbg_redzone2(kmem_
 	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
 }
 
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
@@ -634,16 +634,16 @@ static struct arraycache_init initarray_
     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 
 /* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
+static struct kmem_cache cache_cache = {
 	.batchcount = 1,
 	.limit = BOOT_CPUCACHE_ENTRIES,
 	.shared = 1,
-	.buffer_size = sizeof(kmem_cache_t),
+	.buffer_size = sizeof(struct kmem_cache),
 	.flags = SLAB_NO_REAP,
 	.spinlock = SPIN_LOCK_UNLOCKED,
 	.name = "kmem_cache",
 #if DEBUG
-	.obj_size = sizeof(kmem_cache_t),
+	.obj_size = sizeof(struct kmem_cache),
 #endif
 };
 
@@ -672,17 +672,17 @@ static enum {
 
 static DEFINE_PER_CPU(struct work_struct, reap_work);
 
-static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
-static void enable_cpucache(kmem_cache_t *cachep);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
+static void enable_cpucache(struct kmem_cache *cachep);
 static void cache_reap(void *unused);
-static int __node_shrink(kmem_cache_t *cachep, int node);
+static int __node_shrink(struct kmem_cache *cachep, int node);
 
-static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep)
+static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
 	return cachep->array[smp_processor_id()];
 }
 
-static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
 {
 	struct cache_sizes *csizep = malloc_sizes;
 
@@ -706,7 +706,7 @@ static inline kmem_cache_t *__find_gener
 	return csizep->cs_cachep;
 }
 
-kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
 {
 	return __find_general_cachep(size, gfpflags);
 }
@@ -779,7 +779,7 @@ static void cache_estimate(unsigned long
 
 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
 
-static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
 {
 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
 	       function, cachep->name, msg);
@@ -866,7 +866,7 @@ static void free_alien_cache(struct arra
 	kfree(ac_ptr);
 }
 
-static void __drain_alien_cache(kmem_cache_t *cachep,
+static void __drain_alien_cache(struct kmem_cache *cachep,
 				struct array_cache *ac, int node)
 {
 	struct kmem_list3 *rl3 = cachep->nodelists[node];
@@ -879,7 +879,7 @@ static void __drain_alien_cache(kmem_cac
 	}
 }
 
-static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
 	int i = 0;
 	struct array_cache *ac;
@@ -904,7 +904,7 @@ static int __devinit cpuup_callback(stru
 				    unsigned long action, void *hcpu)
 {
 	long cpu = (long)hcpu;
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 	struct kmem_list3 *l3 = NULL;
 	int node = cpu_to_node(cpu);
 	int memsize = sizeof(struct kmem_list3);
@@ -1042,7 +1042,7 @@ static struct notifier_block cpucache_no
 /*
  * swap the static kmem_list3 with kmalloced memory
  */
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
 {
 	struct kmem_list3 *ptr;
 
@@ -1082,14 +1082,14 @@ void __init kmem_cache_init(void)
 
 	/* Bootstrap is tricky, because several objects are allocated
 	 * from caches that do not exist yet:
-	 * 1) initialize the cache_cache cache: it contains the kmem_cache_t
+	 * 1) initialize the cache_cache cache: it contains the struct kmem_cache
 	 *    structures of all caches, except cache_cache itself: cache_cache
 	 *    is statically allocated.
 	 *    Initially an __init data area is used for the head array and the
 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
 	 *    array at the end of the bootstrap.
 	 * 2) Create the first kmalloc cache.
-	 *    The kmem_cache_t for the new cache is allocated normally.
+	 *    The struct kmem_cache for the new cache is allocated normally.
 	 *    An __init data area is used for the head array.
 	 * 3) Create the remaining kmalloc caches, with minimally sized
 	 *    head arrays.
@@ -1221,7 +1221,7 @@ void __init kmem_cache_init(void)
 
 	/* 6) resize the head arrays to their final sizes */
 	{
-		kmem_cache_t *cachep;
+		struct kmem_cache *cachep;
 		down(&cache_chain_sem);
 		list_for_each_entry(cachep, &cache_chain, next)
 		    enable_cpucache(cachep);
@@ -1264,7 +1264,7 @@ __initcall(cpucache_init);
  * did not request dmaable memory, we might get it, but that
  * would be relatively rare and ignorable.
  */
-static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	struct page *page;
 	void *addr;
@@ -1290,7 +1290,7 @@ static void *kmem_getpages(kmem_cache_t 
 /*
  * Interface to system's page release.
  */
-static void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, void *addr)
 {
 	unsigned long i = (1 << cachep->gfporder);
 	struct page *page = virt_to_page(addr);
@@ -1312,7 +1312,7 @@ static void kmem_freepages(kmem_cache_t 
 static void kmem_rcu_free(struct rcu_head *head)
 {
 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
-	kmem_cache_t *cachep = slab_rcu->cachep;
+	struct kmem_cache *cachep = slab_rcu->cachep;
 
 	kmem_freepages(cachep, slab_rcu->addr);
 	if (OFF_SLAB(cachep))
@@ -1322,7 +1322,7 @@ static void kmem_rcu_free(struct rcu_hea
 #if DEBUG
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
+static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
 			    unsigned long caller)
 {
 	int size = obj_size(cachep);
@@ -1355,7 +1355,7 @@ static void store_stackinfo(kmem_cache_t
 }
 #endif
 
-static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
+static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
 {
 	int size = obj_size(cachep);
 	addr = &((char *)addr)[obj_offset(cachep)];
@@ -1377,7 +1377,7 @@ static void dump_line(char *data, int of
 
 #if DEBUG
 
-static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
+static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
 {
 	int i, size;
 	char *realobj;
@@ -1406,7 +1406,7 @@ static void print_objinfo(kmem_cache_t *
 	}
 }
 
-static void check_poison_obj(kmem_cache_t *cachep, void *objp)
+static void check_poison_obj(struct kmem_cache *cachep, void *objp)
 {
 	char *realobj;
 	int size, i;
@@ -1473,7 +1473,7 @@ static void check_poison_obj(kmem_cache_
  * slab_destroy_objs - call the registered destructor for each object in
  *      a slab that is to be destroyed.
  */
-static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
 {
 	int i;
 	for (i = 0; i < cachep->num; i++) {
@@ -1505,7 +1505,7 @@ static void slab_destroy_objs(kmem_cache
 	}
 }
 #else
-static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
 {
 	if (cachep->dtor) {
 		int i;
@@ -1522,7 +1522,7 @@ static void slab_destroy_objs(kmem_cache
  * Before calling the slab must have been unlinked from the cache.
  * The cache-lock is not held/needed.
  */
-static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
 {
 	void *addr = slabp->s_mem - slabp->colouroff;
 
@@ -1543,7 +1543,7 @@ static void slab_destroy(kmem_cache_t *c
 
 /* For setting up all the kmem_list3s for cache whose buffer_size is same
    as size of kmem_list3. */
-static void set_up_list3s(kmem_cache_t *cachep, int index)
+static void set_up_list3s(struct kmem_cache *cachep, int index)
 {
 	int node;
 
@@ -1563,7 +1563,7 @@ static void set_up_list3s(kmem_cache_t *
  * high order pages for slabs.  When the gfp() functions are more friendly
  * towards high-order requests, this should be changed.
  */
-static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
+static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size,
 					  size_t align, gfp_t flags)
 {
 	size_t left_over = 0;
@@ -1635,13 +1635,13 @@ static inline size_t calculate_slab_orde
  * cacheline.  This can be beneficial if you're counting cycles as closely
  * as davem.
  */
-kmem_cache_t *
+struct kmem_cache *
 kmem_cache_create (const char *name, size_t size, size_t align,
-	unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
-	void (*dtor)(void*, kmem_cache_t *, unsigned long))
+	unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
+	void (*dtor)(void*, struct kmem_cache *, unsigned long))
 {
 	size_t left_over, slab_size, ralign;
-	kmem_cache_t *cachep = NULL;
+	struct kmem_cache *cachep = NULL;
 	struct list_head *p;
 
 	/*
@@ -1659,7 +1659,7 @@ kmem_cache_create (const char *name, siz
 	down(&cache_chain_sem);
 
 	list_for_each(p, &cache_chain) {
-		kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+		struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
 		mm_segment_t old_fs = get_fs();
 		char tmp;
 		int res;
@@ -1759,10 +1759,10 @@ kmem_cache_create (const char *name, siz
 	align = ralign;
 
 	/* Get cache's description obj. */
-	cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+	cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
 	if (!cachep)
 		goto oops;
-	memset(cachep, 0, sizeof(kmem_cache_t));
+	memset(cachep, 0, sizeof(struct kmem_cache));
 
 #if DEBUG
 	cachep->obj_size = size;
@@ -1938,7 +1938,7 @@ static void check_irq_on(void)
 	BUG_ON(irqs_disabled());
 }
 
-static void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(struct kmem_cache *cachep)
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
@@ -1946,7 +1946,7 @@ static void check_spinlock_acquired(kmem
 #endif
 }
 
-static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
+static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
@@ -1979,12 +1979,12 @@ static void smp_call_function_all_cpus(v
 	preempt_enable();
 }
 
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
 				int force, int node);
 
 static void do_drain(void *arg)
 {
-	kmem_cache_t *cachep = (kmem_cache_t *) arg;
+	struct kmem_cache *cachep = (struct kmem_cache *) arg;
 	struct array_cache *ac;
 	int node = numa_node_id();
 
@@ -1996,7 +1996,7 @@ static void do_drain(void *arg)
 	ac->avail = 0;
 }
 
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(struct kmem_cache *cachep)
 {
 	struct kmem_list3 *l3;
 	int node;
@@ -2017,7 +2017,7 @@ static void drain_cpu_caches(kmem_cache_
 	spin_unlock_irq(&cachep->spinlock);
 }
 
-static int __node_shrink(kmem_cache_t *cachep, int node)
+static int __node_shrink(struct kmem_cache *cachep, int node)
 {
 	struct slab *slabp;
 	struct kmem_list3 *l3 = cachep->nodelists[node];
@@ -2046,7 +2046,7 @@ static int __node_shrink(kmem_cache_t *c
 	return ret;
 }
 
-static int __cache_shrink(kmem_cache_t *cachep)
+static int __cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0, i = 0;
 	struct kmem_list3 *l3;
@@ -2072,7 +2072,7 @@ static int __cache_shrink(kmem_cache_t *
  * Releases as many slabs as possible for a cache.
  * To help debugging, a zero exit status indicates all slabs were released.
  */
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int kmem_cache_shrink(struct kmem_cache *cachep)
 {
 	if (!cachep || in_interrupt())
 		BUG();
@@ -2085,7 +2085,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  * kmem_cache_destroy - delete a cache
  * @cachep: the cache to destroy
  *
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a struct kmem_cache object from the slab cache.
  * Returns 0 on success.
  *
  * It is expected this function will be called by a module when it is
@@ -2098,7 +2098,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  * The caller must guarantee that noone will allocate memory from the cache
  * during the kmem_cache_destroy().
  */
-int kmem_cache_destroy(kmem_cache_t *cachep)
+int kmem_cache_destroy(struct kmem_cache *cachep)
 {
 	int i;
 	struct kmem_list3 *l3;
@@ -2149,7 +2149,7 @@ int kmem_cache_destroy(kmem_cache_t *cac
 EXPORT_SYMBOL(kmem_cache_destroy);
 
 /* Get the memory for a slab management obj. */
-static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
+static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
 				   int colour_off, gfp_t local_flags)
 {
 	struct slab *slabp;
@@ -2175,7 +2175,7 @@ static inline kmem_bufctl_t *slab_bufctl
 	return (kmem_bufctl_t *) (slabp + 1);
 }
 
-static void cache_init_objs(kmem_cache_t *cachep,
+static void cache_init_objs(struct kmem_cache *cachep,
 			    struct slab *slabp, unsigned long ctor_flags)
 {
 	int i;
@@ -2224,7 +2224,7 @@ static void cache_init_objs(kmem_cache_t
 	slabp->free = 0;
 }
 
-static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
+static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
 	if (flags & SLAB_DMA) {
 		if (!(cachep->gfpflags & GFP_DMA))
@@ -2235,7 +2235,7 @@ static void kmem_flagcheck(kmem_cache_t 
 	}
 }
 
-static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
 {
 	void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
 	kmem_bufctl_t next;
@@ -2251,7 +2251,7 @@ static void *slab_get_obj(kmem_cache_t *
 	return objp;
 }
 
-static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp,
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
 			  int nodeid)
 {
 	unsigned int objnr = (objp - slabp->s_mem) / cachep->buffer_size;
@@ -2271,7 +2271,7 @@ static void slab_put_obj(kmem_cache_t *c
 	slabp->inuse--;
 }
 
-static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
 {
 	int i;
 	struct page *page;
@@ -2290,7 +2290,7 @@ static void set_slab_attr(kmem_cache_t *
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	struct slab *slabp;
 	void *objp;
@@ -2401,7 +2401,7 @@ static void kfree_debugcheck(const void 
 	}
 }
 
-static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
 				   void *caller)
 {
 	struct page *page;
@@ -2475,7 +2475,7 @@ static void *cache_free_debugcheck(kmem_
 	return objp;
 }
 
-static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
 {
 	kmem_bufctl_t i;
 	int entries = 0;
@@ -2508,7 +2508,7 @@ static void check_slabp(kmem_cache_t *ca
 #define check_slabp(x,y) do { } while(0)
 #endif
 
-static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 {
 	int batchcount;
 	struct kmem_list3 *l3;
@@ -2599,7 +2599,7 @@ static void *cache_alloc_refill(kmem_cac
 }
 
 static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
+cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
 {
 	might_sleep_if(flags & __GFP_WAIT);
 #if DEBUG
@@ -2608,7 +2608,7 @@ cache_alloc_debugcheck_before(kmem_cache
 }
 
 #if DEBUG
-static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
 					void *objp, void *caller)
 {
 	if (!objp)
@@ -2657,7 +2657,7 @@ static void *cache_alloc_debugcheck_afte
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
-static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
 	void *objp;
 	struct array_cache *ac;
@@ -2675,7 +2675,7 @@ static inline void *____cache_alloc(kmem
 	return objp;
 }
 
-static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
 	unsigned long save_flags;
 	void *objp;
@@ -2695,7 +2695,7 @@ static inline void *__cache_alloc(kmem_c
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	struct list_head *entry;
 	struct slab *slabp;
@@ -2757,7 +2757,7 @@ static void *__cache_alloc_node(kmem_cac
 /*
  * Caller needs to acquire correct kmem_list's list_lock
  */
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
+static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
 		       int node)
 {
 	int i;
@@ -2795,7 +2795,7 @@ static void free_block(kmem_cache_t *cac
 	}
 }
 
-static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
 {
 	int batchcount;
 	struct kmem_list3 *l3;
@@ -2854,7 +2854,7 @@ static void cache_flusharray(kmem_cache_
  *
  * Called with disabled ints.
  */
-static inline void __cache_free(kmem_cache_t *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp)
 {
 	struct array_cache *ac = cpu_cache_get(cachep);
 
@@ -2913,7 +2913,7 @@ static inline void __cache_free(kmem_cac
  * Allocate an object from this cache.  The flags are only relevant
  * if the cache has no available objects.
  */
-void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
 	return __cache_alloc(cachep, flags);
 }
@@ -2933,7 +2933,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  *
  * Currently only used for dentry validation.
  */
-int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
 {
 	unsigned long addr = (unsigned long)ptr;
 	unsigned long min_addr = PAGE_OFFSET;
@@ -2974,7 +2974,7 @@ int fastcall kmem_ptr_validate(kmem_cach
  * New and improved: it will now make sure that the object gets
  * put on the correct node list so that there is no false sharing.
  */
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	unsigned long save_flags;
 	void *ptr;
@@ -2998,7 +2998,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
 
 void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	cachep = kmem_find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
@@ -3031,7 +3031,7 @@ EXPORT_SYMBOL(kmalloc_node);
  */
 void *__kmalloc(size_t size, gfp_t flags)
 {
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	/* If you want to save a few bytes .text space: replace
 	 * __ with kmem_.
@@ -3102,7 +3102,7 @@ EXPORT_SYMBOL(__alloc_percpu);
  * Free an object which was previously allocated from this
  * cache.
  */
-void kmem_cache_free(kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
 	unsigned long flags;
 
@@ -3123,7 +3123,7 @@ EXPORT_SYMBOL(kmem_cache_free);
  */
 void kfree(const void *objp)
 {
-	kmem_cache_t *c;
+	struct kmem_cache *c;
 	unsigned long flags;
 
 	if (unlikely(!objp))
@@ -3160,13 +3160,13 @@ void free_percpu(const void *objp)
 EXPORT_SYMBOL(free_percpu);
 #endif
 
-unsigned int kmem_cache_size(kmem_cache_t *cachep)
+unsigned int kmem_cache_size(struct kmem_cache *cachep)
 {
 	return obj_size(cachep);
 }
 EXPORT_SYMBOL(kmem_cache_size);
 
-const char *kmem_cache_name(kmem_cache_t *cachep)
+const char *kmem_cache_name(struct kmem_cache *cachep)
 {
 	return cachep->name;
 }
@@ -3175,7 +3175,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
 /*
  * This initializes kmem_list3 for all nodes.
  */
-static int alloc_kmemlist(kmem_cache_t *cachep)
+static int alloc_kmemlist(struct kmem_cache *cachep)
 {
 	int node;
 	struct kmem_list3 *l3;
@@ -3231,7 +3231,7 @@ static int alloc_kmemlist(kmem_cache_t *
 }
 
 struct ccupdate_struct {
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 	struct array_cache *new[NR_CPUS];
 };
 
@@ -3247,7 +3247,7 @@ static void do_ccupdate_local(void *info
 	new->new[smp_processor_id()] = old;
 }
 
-static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
 			    int shared)
 {
 	struct ccupdate_struct new;
@@ -3293,7 +3293,7 @@ static int do_tune_cpucache(kmem_cache_t
 	return 0;
 }
 
-static void enable_cpucache(kmem_cache_t *cachep)
+static void enable_cpucache(struct kmem_cache *cachep)
 {
 	int err;
 	int limit, shared;
@@ -3345,7 +3345,7 @@ static void enable_cpucache(kmem_cache_t
 		       cachep->name, -err);
 }
 
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
 				int force, int node)
 {
 	int tofree;
@@ -3390,12 +3390,12 @@ static void cache_reap(void *unused)
 	}
 
 	list_for_each(walk, &cache_chain) {
-		kmem_cache_t *searchp;
+		struct kmem_cache *searchp;
 		struct list_head *p;
 		int tofree;
 		struct slab *slabp;
 
-		searchp = list_entry(walk, kmem_cache_t, next);
+		searchp = list_entry(walk, struct kmem_cache, next);
 
 		if (searchp->flags & SLAB_NO_REAP)
 			goto next;
@@ -3498,15 +3498,15 @@ static void *s_start(struct seq_file *m,
 		if (p == &cache_chain)
 			return NULL;
 	}
-	return list_entry(p, kmem_cache_t, next);
+	return list_entry(p, struct kmem_cache, next);
 }
 
 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 {
-	kmem_cache_t *cachep = p;
+	struct kmem_cache *cachep = p;
 	++*pos;
 	return cachep->next.next == &cache_chain ? NULL
-	    : list_entry(cachep->next.next, kmem_cache_t, next);
+	    : list_entry(cachep->next.next, struct kmem_cache, next);
 }
 
 static void s_stop(struct seq_file *m, void *p)
@@ -3516,7 +3516,7 @@ static void s_stop(struct seq_file *m, v
 
 static int s_show(struct seq_file *m, void *p)
 {
-	kmem_cache_t *cachep = p;
+	struct kmem_cache *cachep = p;
 	struct list_head *q;
 	struct slab *slabp;
 	unsigned long active_objs;
@@ -3666,7 +3666,8 @@ ssize_t slabinfo_write(struct file *file
 	down(&cache_chain_sem);
 	res = -EINVAL;
 	list_for_each(p, &cache_chain) {
-		kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+		struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+						       next);
 
 		if (!strcmp(cachep->name, kbuf)) {
 			if (limit < 1 ||

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [patch 09/10] slab: rename ac_data to cpu_cache_get
  2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
                   ` (8 preceding siblings ...)
  2006-01-14 12:46 ` [patch 10/10] slab: replace kmem_cache_t with struct kmem_cache Pekka Enberg
@ 2006-01-14 12:46 ` Pekka Enberg
  9 siblings, 0 replies; 16+ messages in thread
From: Pekka Enberg @ 2006-01-14 12:46 UTC (permalink / raw)
  To: akpm; +Cc: linux-kernel, manfred

From: Pekka Enberg <penberg@cs.helsinki.fi>

This patch renames the ac_data() function to more descriptive cpu_cache_get().

Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---

 mm/slab.c |   36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -677,7 +677,7 @@ static void enable_cpucache(kmem_cache_t
 static void cache_reap(void *unused);
 static int __node_shrink(kmem_cache_t *cachep, int node);
 
-static inline struct array_cache *ac_data(kmem_cache_t *cachep)
+static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep)
 {
 	return cachep->array[smp_processor_id()];
 }
@@ -1183,8 +1183,8 @@ void __init kmem_cache_init(void)
 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
 		local_irq_disable();
-		BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
-		memcpy(ptr, ac_data(&cache_cache),
+		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
+		memcpy(ptr, cpu_cache_get(&cache_cache),
 		       sizeof(struct arraycache_init));
 		cache_cache.array[smp_processor_id()] = ptr;
 		local_irq_enable();
@@ -1192,9 +1192,9 @@ void __init kmem_cache_init(void)
 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
 		local_irq_disable();
-		BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
+		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
 		       != &initarray_generic.cache);
-		memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
+		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
 		       sizeof(struct arraycache_init));
 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
 		    ptr;
@@ -1232,7 +1232,7 @@ void __init kmem_cache_init(void)
 	g_cpucache_up = FULL;
 
 	/* Register a cpu startup notifier callback
-	 * that initializes ac_data for all new cpus
+	 * that initializes cpu_cache_get for all new cpus
 	 */
 	register_cpu_notifier(&cpucache_notifier);
 
@@ -1906,11 +1906,11 @@ kmem_cache_create (const char *name, siz
 		    jiffies + REAPTIMEOUT_LIST3 +
 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
-		BUG_ON(!ac_data(cachep));
-		ac_data(cachep)->avail = 0;
-		ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
-		ac_data(cachep)->batchcount = 1;
-		ac_data(cachep)->touched = 0;
+		BUG_ON(!cpu_cache_get(cachep));
+		cpu_cache_get(cachep)->avail = 0;
+		cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+		cpu_cache_get(cachep)->batchcount = 1;
+		cpu_cache_get(cachep)->touched = 0;
 		cachep->batchcount = 1;
 		cachep->limit = BOOT_CPUCACHE_ENTRIES;
 	}
@@ -1989,7 +1989,7 @@ static void do_drain(void *arg)
 	int node = numa_node_id();
 
 	check_irq_off();
-	ac = ac_data(cachep);
+	ac = cpu_cache_get(cachep);
 	spin_lock(&cachep->nodelists[node]->list_lock);
 	free_block(cachep, ac->entry, ac->avail, node);
 	spin_unlock(&cachep->nodelists[node]->list_lock);
@@ -2515,7 +2515,7 @@ static void *cache_alloc_refill(kmem_cac
 	struct array_cache *ac;
 
 	check_irq_off();
-	ac = ac_data(cachep);
+	ac = cpu_cache_get(cachep);
       retry:
 	batchcount = ac->batchcount;
 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -2587,7 +2587,7 @@ static void *cache_alloc_refill(kmem_cac
 		x = cache_grow(cachep, flags, numa_node_id());
 
 		// cache_grow can reenable interrupts, then ac could change.
-		ac = ac_data(cachep);
+		ac = cpu_cache_get(cachep);
 		if (!x && ac->avail == 0)	// no objects in sight? abort
 			return NULL;
 
@@ -2663,7 +2663,7 @@ static inline void *____cache_alloc(kmem
 	struct array_cache *ac;
 
 	check_irq_off();
-	ac = ac_data(cachep);
+	ac = cpu_cache_get(cachep);
 	if (likely(ac->avail)) {
 		STATS_INC_ALLOCHIT(cachep);
 		ac->touched = 1;
@@ -2856,7 +2856,7 @@ static void cache_flusharray(kmem_cache_
  */
 static inline void __cache_free(kmem_cache_t *cachep, void *objp)
 {
-	struct array_cache *ac = ac_data(cachep);
+	struct array_cache *ac = cpu_cache_get(cachep);
 
 	check_irq_off();
 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
@@ -3241,7 +3241,7 @@ static void do_ccupdate_local(void *info
 	struct array_cache *old;
 
 	check_irq_off();
-	old = ac_data(new->cachep);
+	old = cpu_cache_get(new->cachep);
 
 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
 	new->new[smp_processor_id()] = old;
@@ -3407,7 +3407,7 @@ static void cache_reap(void *unused)
 			drain_alien_cache(searchp, l3);
 		spin_lock_irq(&l3->list_lock);
 
-		drain_array_locked(searchp, ac_data(searchp), 0,
+		drain_array_locked(searchp, cpu_cache_get(searchp), 0,
 				   numa_node_id());
 
 		if (time_after(l3->next_reap, jiffies))

--


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [patch 04/10] slab: cache_estimate cleanup
  2006-01-14 12:46 ` [patch 04/10] slab: cache_estimate cleanup Pekka Enberg
@ 2006-01-16  2:38   ` Andrew Morton
  2006-01-16  7:00     ` Pekka J Enberg
  0 siblings, 1 reply; 16+ messages in thread
From: Andrew Morton @ 2006-01-16  2:38 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: linux-kernel, manfred

"Pekka Enberg" <penberg@cs.helsinki.fi> wrote:
>
> From: Steven Rostedt <rostedt@goodmis.org>
> 
> This patch cleans up cache_estimate() in mm/slab.c and improves the
> algorithm from O(n) to O(1). We first calculate the maximum number of
> objects a slab can hold after struct slab and kmem_bufctl_t for each
> object has been given enough space. After that, to respect alignment
> rules, we decrease the number of objects if necessary. As required
> padding is at most align-1 and memory of obj_size is at least align,
> it is always enough to decrease number of objects by one.
> 
> The optimization was originally made by Balbir Singh with more 
> improvements from Steven Rostedt. Manfred Spraul provider further
> modifications: no loop at all for the off-slab case and added comments
> to explain the background.
> 
> ...
> -	size_t wastage = PAGE_SIZE << gfporder;
> -	size_t extra = 0;
> -	size_t base = 0;
> ...
> +	size_t mgmt_size;
> +	size_t slab_size = PAGE_SIZE << gfporder;

Can anyone think of a reason for using size_t in there instead of plain old
unsigned int?

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [patch 04/10] slab: cache_estimate cleanup
  2006-01-16  2:38   ` Andrew Morton
@ 2006-01-16  7:00     ` Pekka J Enberg
  0 siblings, 0 replies; 16+ messages in thread
From: Pekka J Enberg @ 2006-01-16  7:00 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel, manfred

"Pekka Enberg" <penberg@cs.helsinki.fi> wrote:
> > -	size_t wastage = PAGE_SIZE << gfporder;
> > -	size_t extra = 0;
> > -	size_t base = 0;
> > ...
> > +	size_t mgmt_size;
> > +	size_t slab_size = PAGE_SIZE << gfporder;

On Sun, 15 Jan 2006, Andrew Morton wrote:
> Can anyone think of a reason for using size_t in there instead of plain old
> unsigned int?

Not really but unsigned long would probably be safer.

			Pekka

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [patch 05/10] slab: extract slab_destroy_objs()
  2006-01-14 12:46 ` [patch 05/10] slab: extract slab_destroy_objs() Pekka Enberg
@ 2006-01-18 18:31   ` Christoph Lameter
  2006-01-19  9:07     ` Pekka J Enberg
  2006-01-20  5:18     ` Horst von Brand
  0 siblings, 2 replies; 16+ messages in thread
From: Christoph Lameter @ 2006-01-18 18:31 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: akpm, linux-kernel, manfred

On Sat, 14 Jan 2006, Pekka Enberg wrote:

> +static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)

This is only called once right? Make this inline?

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [patch 05/10] slab: extract slab_destroy_objs()
  2006-01-18 18:31   ` Christoph Lameter
@ 2006-01-19  9:07     ` Pekka J Enberg
  2006-01-20  5:18     ` Horst von Brand
  1 sibling, 0 replies; 16+ messages in thread
From: Pekka J Enberg @ 2006-01-19  9:07 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: akpm, linux-kernel, manfred

On Sat, 14 Jan 2006, Pekka Enberg wrote:
> > +static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
 
On Wed, 18 Jan 2006, Christoph Lameter wrote:
> This is only called once right? Make this inline?

We get better stack traces when its not inlined so I don't see the point.

			Pekka

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [patch 05/10] slab: extract slab_destroy_objs()
  2006-01-18 18:31   ` Christoph Lameter
  2006-01-19  9:07     ` Pekka J Enberg
@ 2006-01-20  5:18     ` Horst von Brand
  1 sibling, 0 replies; 16+ messages in thread
From: Horst von Brand @ 2006-01-20  5:18 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, akpm, linux-kernel, manfred

Christoph Lameter <clameter@engr.sgi.com> wrote:
> On Sat, 14 Jan 2006, Pekka Enberg wrote:
> 
> > +static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
> 
> This is only called once right? Make this inline?

Leave it to gcc. It might be called once today, and all over the place
tomorrow.

In any case, it can't be performance-critical.
-- 
Dr. Horst H. von Brand                   User #22616 counter.li.org
Departamento de Informatica                     Fono: +56 32 654431
Universidad Tecnica Federico Santa Maria              +56 32 654239
Casilla 110-V, Valparaiso, Chile                Fax:  +56 32 797513


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2006-01-20 17:09 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-01-14 12:46 [patch 00/10] slab updates Pekka Enberg
2006-01-14 12:46 ` [patch 01/10] slab: distinguish between object and buffer size Pekka Enberg
2006-01-14 12:46 ` [patch 02/10] slab: minor cleanup to kmem_cache_alloc_node Pekka Enberg
2006-01-14 12:46 ` [patch 05/10] slab: extract slab_destroy_objs() Pekka Enberg
2006-01-18 18:31   ` Christoph Lameter
2006-01-19  9:07     ` Pekka J Enberg
2006-01-20  5:18     ` Horst von Brand
2006-01-14 12:46 ` [patch 03/10] slab: have index_of bug at compile time Pekka Enberg
2006-01-14 12:46 ` [patch 06/10] slab: extract slab_{put|get}_obj Pekka Enberg
2006-01-14 12:46 ` [patch 04/10] slab: cache_estimate cleanup Pekka Enberg
2006-01-16  2:38   ` Andrew Morton
2006-01-16  7:00     ` Pekka J Enberg
2006-01-14 12:46 ` [patch 08/10] slab: extract virt_to_{cache|slab} Pekka Enberg
2006-01-14 12:46 ` [patch 07/10] slab: reduce inlining Pekka Enberg
2006-01-14 12:46 ` [patch 10/10] slab: replace kmem_cache_t with struct kmem_cache Pekka Enberg
2006-01-14 12:46 ` [patch 09/10] slab: rename ac_data to cpu_cache_get Pekka Enberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).